Search is not available for this dataset
repo_id
stringlengths 12
110
| file_path
stringlengths 24
164
| content
stringlengths 3
89.3M
| __index_level_0__
int64 0
0
|
---|---|---|---|
public_repos | public_repos/torchmetrics/MANIFEST.in | # Manifest syntax https://packaging.python.org/en/latest/guides/using-manifest-in/
graft wheelhouse
recursive-exclude __pycache__ *.py[cod] *.orig
# include also models
recursive-include src *.pth
# Include the README and CHANGELOG
include *.md
recursive-include src *.md
# Include the license file
include LICENSE
# Include Citation file
include *.cff
# Include marker file for PEP 561
recursive-include src *.typed
exclude *.sh
exclude *.toml
exclude *.svg
# exclude tests from package
recursive-exclude tests *
recursive-exclude site *
exclude tests
# Exclude the documentation files
recursive-exclude docs *
exclude docs
# Include the Requirements
include requirements.txt
recursive-include requirements *.txt
recursive-exclude requirements *.py
# Exclude build configs
exclude *.yml
exclude *.yaml
exclude Makefile
prune .devcontainer
prune .git
prune .github
prune examples*
prune temp*
prune test*
prune SandBox*
| 0 |
public_repos | public_repos/torchmetrics/CITATION.cff | cff-version: 1.2.0
message: "If you want to cite the framework, feel free to use this (but only if you loved it 😊)"
title: "TorchMetrics - Measuring Reproducibility in PyTorch"
abstract:
"A main problem with reproducing machine learning publications is the variance of metric implementations across papers.
A lack of standardization leads to different behavior in mech- anisms such as checkpointing, learning rate schedulers or early stopping, that will influence the reported results.
For example, a complex metric such as Fréchet inception distance (FID) for synthetic image quality evaluation will differ based on the specific interpolation method used.
There have been a few attempts at tackling the reproducibility issues.
Papers With Code links research code with its corresponding paper. Similarly, arXiv recently added a code and data section that links both official and community code to papers.
However, these methods rely on the paper code to be made publicly accessible which is not always possible.
Our approach is to provide the de-facto reference implementation for metrics.
This approach enables proprietary work to still be comparable as long as they’ve used our reference implementations.
We introduce TorchMetrics, a general-purpose metrics package that covers a wide variety of tasks and domains used in the machine learning community.
TorchMetrics provides standard classification and regression metrics; and domain-specific metrics for audio, computer vision, natural language processing, and information retrieval.
Our process for adding a new metric is as follows, first we integrate a well-tested and established third-party library.
Once we’ve verified the implementations and written tests for them, we re-implement them in native PyTorch to enable hardware acceleration and remove any bottlenecks in inter-device transfer."
authors:
- name: Nicki Skafte Detlefsen
orcid: "https://orcid.org/0000-0002-8133-682X"
- name: Jiri Borovec
orcid: "https://orcid.org/0000-0001-7437-824X"
- name: Justus Schock
orcid: "https://orcid.org/0000-0003-0512-3053"
- name: Ananya Harsh
- name: Teddy Koker
- name: Luca Di Liello
- name: Daniel Stancl
- name: Changsheng Quan
- name: Maxim Grechkin
- name: William Falcon
doi: 10.21105/joss.04101
license: "Apache-2.0"
url: "https://www.pytorchlightning.ai"
repository-code: "https://github.com/Lightning-AI/torchmetrics"
date-released: 2022-02-11
keywords:
- machine learning
- deep learning
- artificial intelligence
- metrics
- pytorch
| 0 |
public_repos | public_repos/torchmetrics/setup.py | #!/usr/bin/env python
import glob
import os
import re
from functools import partial
from importlib.util import module_from_spec, spec_from_file_location
from itertools import chain
from pathlib import Path
from typing import Any, Iterable, Iterator, List, Optional, Tuple, Union
from pkg_resources import Requirement, yield_lines
from setuptools import find_packages, setup
_PATH_ROOT = os.path.realpath(os.path.dirname(__file__))
_PATH_SOURCE = os.path.join(_PATH_ROOT, "src")
_PATH_REQUIRE = os.path.join(_PATH_ROOT, "requirements")
_FREEZE_REQUIREMENTS = os.environ.get("FREEZE_REQUIREMENTS", "0").lower() in ("1", "true")
class _RequirementWithComment(Requirement):
strict_string = "# strict"
def __init__(self, *args: Any, comment: str = "", pip_argument: Optional[str] = None, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.comment = comment
if pip_argument is not None and not pip_argument:
raise ValueError("Expected `pip_argument` to either be `None` or an str, but got an empty string")
self.pip_argument = pip_argument
self.strict = self.strict_string in comment.lower()
def adjust(self, unfreeze: bool) -> str:
"""Remove version restrictions unless they are strict.
>>> _RequirementWithComment("arrow<=1.2.2,>=1.2.0", comment="# anything").adjust(False)
'arrow<=1.2.2,>=1.2.0'
>>> _RequirementWithComment("arrow<=1.2.2,>=1.2.0", comment="# strict").adjust(False)
'arrow<=1.2.2,>=1.2.0 # strict'
>>> _RequirementWithComment("arrow<=1.2.2,>=1.2.0", comment="# my name").adjust(True)
'arrow>=1.2.0'
>>> _RequirementWithComment("arrow>=1.2.0, <=1.2.2", comment="# strict").adjust(True)
'arrow<=1.2.2,>=1.2.0 # strict'
>>> _RequirementWithComment("arrow").adjust(True)
'arrow'
"""
out = str(self)
if self.strict:
return f"{out} {self.strict_string}"
if unfreeze:
for operator, version in self.specs:
if operator in ("<", "<="):
# drop upper bound
return out.replace(f"{operator}{version},", "")
return out
def _parse_requirements(strs: Union[str, Iterable[str]]) -> Iterator[_RequirementWithComment]:
r"""Adapted from `pkg_resources.parse_requirements` to include comments.
>>> txt = ['# ignored', '', 'this # is an', '--piparg', 'example', 'foo # strict', 'thing', '-r different/file.txt']
>>> [r.adjust('none') for r in _parse_requirements(txt)]
['this', 'example', 'foo # strict', 'thing']
>>> txt = '\\n'.join(txt)
>>> [r.adjust('none') for r in _parse_requirements(txt)]
['this', 'example', 'foo # strict', 'thing']
"""
lines = yield_lines(strs)
pip_argument = None
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if " #" in line:
comment_pos = line.find(" #")
line, comment = line[:comment_pos], line[comment_pos:]
else:
comment = ""
# If there is a line continuation, drop it, and append the next line.
if line.endswith("\\"):
line = line[:-2].strip()
try:
line += next(lines)
except StopIteration:
return
if "@" in line or re.search("https?://", line):
# skip lines with links like `pesq @ git+https://github.com/ludlows/python-pesq`
continue
# If there's a pip argument, save it
if line.startswith("--"):
pip_argument = line
continue
if line.startswith("-r "):
# linked requirement files are unsupported
continue
yield _RequirementWithComment(line, comment=comment, pip_argument=pip_argument)
pip_argument = None
def _load_requirements(
path_dir: str, file_name: str = "base.txt", unfreeze: bool = not _FREEZE_REQUIREMENTS
) -> List[str]:
"""Load requirements from a file.
>>> _load_requirements(_PATH_REQUIRE)
['numpy...', 'torch..."]
"""
path = Path(path_dir) / file_name
if not path.exists():
raise ValueError("Path {path} not found for input dir {path_dir} and filename {file_name}.")
text = path.read_text()
return [req.adjust(unfreeze) for req in _parse_requirements(text)]
def _load_readme_description(path_dir: str, homepage: str, version: str) -> str:
"""Load readme as decribtion.
>>> _load_readme_description(_PATH_ROOT, "", "")
'<div align="center">...'
"""
path_readme = os.path.join(path_dir, "README.md")
with open(path_readme, encoding="utf-8") as fp:
text = fp.read()
# https://github.com/Lightning-AI/torchmetrics/raw/master/docs/source/_static/images/lightning_module/pt_to_pl.png
github_source_url = os.path.join(homepage, "raw", version)
# replace relative repository path to absolute link to the release
# do not replace all "docs" as in the readme we replace some other sources with particular path to docs
text = text.replace("docs/source/_static/", f"{os.path.join(github_source_url, 'docs/source/_static/')}")
# readthedocs badge
text = text.replace("badge/?version=stable", f"badge/?version={version}")
text = text.replace("torchmetrics.readthedocs.io/en/stable/", f"torchmetrics.readthedocs.io/en/{version}")
# codecov badge
text = text.replace("/branch/master/graph/badge.svg", f"/release/{version}/graph/badge.svg")
# replace github badges for release ones
text = text.replace("badge.svg?branch=master&event=push", f"badge.svg?tag={version}")
# Azure...
text = text.replace("?branchName=master", f"?branchName=refs%2Ftags%2F{version}")
text = re.sub(r"\?definitionId=\d+&branchName=master", f"?definitionId=2&branchName=refs%2Ftags%2F{version}", text)
skip_begin = r"<!-- following section will be skipped from PyPI description -->"
skip_end = r"<!-- end skipping PyPI description -->"
# todo: wrap content as commented description
return re.sub(rf"{skip_begin}.+?{skip_end}", "<!-- -->", text, flags=re.IGNORECASE + re.DOTALL)
def _load_py_module(fname: str, pkg: str = "torchmetrics"):
spec = spec_from_file_location(os.path.join(pkg, fname), os.path.join(_PATH_SOURCE, pkg, fname))
py = module_from_spec(spec)
spec.loader.exec_module(py)
return py
ABOUT = _load_py_module("__about__.py")
LONG_DESCRIPTION = _load_readme_description(
_PATH_ROOT,
homepage=ABOUT.__homepage__,
version=f"v{ABOUT.__version__}",
)
BASE_REQUIREMENTS = _load_requirements(path_dir=_PATH_REQUIRE, file_name="base.txt")
def _prepare_extras(skip_pattern: str = "^_", skip_files: Tuple[str] = ("base.txt",)) -> dict:
"""Preparing extras for the package listing requirements.
Args:
skip_pattern: ignore files with this pattern, by default all files starting with _
skip_files: ignore some additional files, by default base requirements
Note, particular domain test requirement are aggregated in single "_tests" extra (which is not accessible).
"""
# find all extra requirements
_load_req = partial(_load_requirements, path_dir=_PATH_REQUIRE)
found_req_files = sorted(os.path.basename(p) for p in glob.glob(os.path.join(_PATH_REQUIRE, "*.txt")))
# filter unwanted files
found_req_files = [n for n in found_req_files if not re.match(skip_pattern, n)]
found_req_files = [n for n in found_req_files if n not in skip_files]
found_req_names = [os.path.splitext(req)[0] for req in found_req_files]
# define basic and extra extras
extras_req = {"_tests": []}
for name, fname in zip(found_req_names, found_req_files):
if name.endswith("_test"):
extras_req["_tests"] += _load_req(file_name=fname)
else:
extras_req[name] = _load_req(file_name=fname)
# filter the uniques
extras_req = {n: list(set(req)) for n, req in extras_req.items()}
# create an 'all' keyword that install all possible dependencies
extras_req["all"] = list(chain([pkgs for k, pkgs in extras_req.items() if k not in ("_test", "_tests")]))
extras_req["dev"] = extras_req["all"] + extras_req["_tests"]
return extras_req
# https://packaging.python.org/discussions/install-requires-vs-requirements /
# keep the meta-data here for simplicity in reading this file... it's not obvious
# what happens and to non-engineers they won't know to look in init ...
# the goal of the project is simplicity for researchers, don't want to add too much
# engineer specific practices
if __name__ == "__main__":
setup(
name="torchmetrics",
version=ABOUT.__version__,
description=ABOUT.__docs__,
author=ABOUT.__author__,
author_email=ABOUT.__author_email__,
url=ABOUT.__homepage__,
download_url=os.path.join(ABOUT.__homepage__, "archive", "master.zip"),
license=ABOUT.__license__,
packages=find_packages(where="src"),
package_dir={"": "src"},
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
include_package_data=True,
zip_safe=False,
keywords=["deep learning", "machine learning", "pytorch", "metrics", "AI"],
python_requires=">=3.8",
setup_requires=[],
install_requires=BASE_REQUIREMENTS,
extras_require=_prepare_extras(),
project_urls={
"Bug Tracker": os.path.join(ABOUT.__homepage__, "issues"),
"Documentation": "https://torchmetrics.rtfd.io/en/latest/",
"Source Code": ABOUT.__homepage__,
},
classifiers=[
"Environment :: Console",
"Natural Language :: English",
# How mature is this project? Common values are
# 3 - Alpha, 4 - Beta, 5 - Production/Stable
"Development Status :: 5 - Production/Stable",
# Indicate who your project is intended for
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Scientific/Engineering :: Information Analysis",
# Pick your license as you wish
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
],
)
| 0 |
public_repos | public_repos/torchmetrics/.readthedocs.yml | # Copyright The Lightning AI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Build documentation in the docs/ directory with Sphinx
# reference: https://docs.readthedocs.io/en/stable/config-file/v2.html#sphinx
sphinx:
fail_on_warning: true
build:
os: "ubuntu-22.04"
tools:
python: "3.9"
commands:
- printenv
- pwd ; ls -lh
- pip install -U pip awscli --user
- python -m awscli s3 sync --no-sign-request s3://sphinx-packages/ dist/ ; ls -lh dist/
- >
pip install -e . -q -r requirements/_docs.txt \
-f 'https://download.pytorch.org/whl/cpu/torch_stable.html' -f dist/ ;
pip list
# this need to be split so `sphinx-build` is picked from previous installation
- bash docs/rtfd-build.sh
- mkdir -p _readthedocs ; mv docs/build/html _readthedocs/html
| 0 |
public_repos | public_repos/torchmetrics/requirements.txt | -r requirements/base.txt
| 0 |
public_repos | public_repos/torchmetrics/.codecov.yml | # see https://docs.codecov.io/docs/codecov-yaml
# Validation check:
# $ curl --data-binary @.codecov.yml https://codecov.io/validate
# https://docs.codecov.io/docs/codecovyml-reference
codecov:
bot: "codecov-io"
strict_yaml_branch: "yaml-config"
require_ci_to_pass: yes
notify:
# after_n_builds: 2
wait_for_ci: yes
coverage:
precision: 0 # 2 = xx.xx%, 0 = xx%
round: nearest # how coverage is rounded: down/up/nearest
range: 40...100 # custom range of coverage colors from red -> yellow -> green
status:
# https://codecov.readme.io/v1.0/docs/commit-status
project:
default:
informational: true
target: 95% # specify the target coverage for each commit status
threshold: 30% # allow this little decrease on project
# https://github.com/codecov/support/wiki/Filtering-Branches
# branches: master
if_ci_failed: error
# https://github.com/codecov/support/wiki/Patch-Status
patch:
default:
informational: true
threshold: 50% # allow this much decrease on patch
changes: false
# https://docs.codecov.com/docs/github-checks#disabling-github-checks-patch-annotations
github_checks:
annotations: false
parsers:
gcov:
branch_detection:
conditional: true
loop: true
macro: false
method: false
javascript:
enable_partials: false
comment:
layout: header, diff
require_changes: false
behavior: default # update if exists else create new
# branches: *
| 0 |
public_repos | public_repos/torchmetrics/Makefile | .PHONY: test clean docs env data
export FREEZE_REQUIREMENTS=1
# assume you have installed need packages
export SPHINX_MOCK_REQUIREMENTS=1
export SPHINX_FETCH_ASSETS=0
clean:
# clean all temp runs
rm -rf $(shell find . -name "mlruns")
rm -rf _ckpt_*
rm -rf .mypy_cache
rm -rf .pytest_cache
rm -rf tests/.pytest_cache
rm -rf ./docs/build
rm -rf ./docs/source/generated
rm -rf ./docs/source/*/generated
rm -rf ./docs/source/api
rm -rf build
rm -rf dist
rm -rf *.egg-info
rm -rf src/*.egg-info
test: clean env data
# run tests with coverage
cd src && python -m pytest torchmetrics
cd tests && python -m pytest unittests -v --cov=torchmetrics
cd tests && python -m coverage report
docs: clean
pip install -e . --quiet -r requirements/_docs.txt
# apt-get install -y texlive-latex-extra dvipng texlive-pictures texlive-fonts-recommended cm-super
TOKENIZERS_PARALLELISM=false python -m sphinx -b html -W --keep-going docs/source docs/build
env:
pip install -e . -U -r requirements/_devel.txt
data:
python -c "from urllib.request import urlretrieve ; urlretrieve('https://pl-public-data.s3.amazonaws.com/metrics/data.zip', 'data.zip')"
unzip -o data.zip -d ./tests
| 0 |
public_repos | public_repos/torchmetrics/.pre-commit-config.yaml | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
default_language_version:
python: python3
ci:
autofix_prs: true
autoupdate_commit_msg: "[pre-commit.ci] pre-commit suggestions"
autoupdate_schedule: quarterly
# submodules: true
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
# - id: check-json
- id: check-yaml
- id: check-toml
- id: check-docstring-first
- id: check-executables-have-shebangs
- id: check-case-conflict
- id: check-added-large-files
args: ["--maxkb=100", "--enforce-all"]
- id: detect-private-key
- repo: https://github.com/asottile/pyupgrade
rev: v3.14.0
hooks:
- id: pyupgrade
args: ["--py38-plus"]
name: Upgrade code
- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
hooks:
- id: codespell
additional_dependencies: [tomli]
args: ["--write-changes"]
exclude: pyproject.toml
- repo: https://github.com/crate-ci/typos
rev: v1.16.17
hooks:
- id: typos
# empty to do not write fixes
args: []
exclude: pyproject.toml
- repo: https://github.com/PyCQA/docformatter
rev: v1.7.5
hooks:
- id: docformatter
additional_dependencies: [tomli]
args: ["--in-place"]
- repo: https://github.com/psf/black
rev: 23.9.1
hooks:
- id: black
name: Format code
- repo: https://github.com/executablebooks/mdformat
rev: 0.7.17
hooks:
- id: mdformat
additional_dependencies:
- mdformat-gfm
- mdformat-black
- mdformat_frontmatter
exclude: |
(?x)^(
CHANGELOG.md|
docs/paper_JOSS/paper.md
)$
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.0.3
hooks:
- id: prettier
# https://prettier.io/docs/en/options.html#print-width
args: ["--print-width=120"]
- repo: https://github.com/asottile/yesqa
rev: v1.5.0
hooks:
- id: yesqa
additional_dependencies:
- pep8-naming
- flake8-pytest-style
- flake8-bandit
- flake8-builtins
- flake8-bugbear
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.10.0
hooks:
# Enforce that noqa annotations always occur with specific codes. Sample annotations: # noqa: F401, # noqa: F401,W203
- id: python-check-blanket-noqa
# Enforce that # type: ignore annotations always occur with specific codes. Sample annotations: # type: ignore[attr-defined], # type: ignore[attr-defined, name-defined]
#- id: python-check-blanket-type-ignore # TODO
# Prevent common mistakes of assert mck.not_called(), assert mck.called_once_with(...) and mck.assert_called.
- id: python-check-mock-methods
# A quick check for the eval() built-in function
#- id: python-no-eval broken check - https://github.com/pre-commit/pygrep-hooks/issues/135
# A quick check for the deprecated .warn() method of python loggers
- id: python-no-log-warn
# Enforce that python3.6+ type annotations are used instead of type comments
#- id: python-use-type-annotations # false positive - https://github.com/pre-commit/pygrep-hooks/issues/154
# Detect common mistake of using single backticks when writing rst
#- id: rst-backticks # todo
# Detect mistake of rst directive not ending with double colon or space before the double colon
- id: rst-directive-colons
# Detect mistake of inline code touching normal text in rst
- id: rst-inline-touching-normal
# Forbid files which have a UTF-8 Unicode replacement character
- id: text-unicode-replacement-char
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.0.292
hooks:
- id: ruff
args: ["--fix"]
| 0 |
public_repos | public_repos/torchmetrics/pyproject.toml | [metadata]
license_file = "LICENSE"
description-file = "README.md"
[build-system]
requires = ["setuptools", "wheel"]
[tool.check-manifest]
ignore = ["*.yml", ".github", ".github/*"]
[tool.pytest.ini_options]
norecursedirs = [".git", ".github", "dist", "build", "docs"]
addopts = [
"--strict-markers",
"--doctest-modules",
"--doctest-plus",
"--color=yes",
"--disable-pytest-warnings",
]
#filterwarnings = ["error::FutureWarning"] # ToDo
xfail_strict = true
junit_duration_report = "call"
[tool.coverage.report]
exclude_lines = ["pragma: no cover", "pass"]
[tool.coverage.run]
parallel = true
concurrency = "thread"
relative_files = true
[tool.black]
# https://github.com/psf/black
line-length = 120
exclude = "(.eggs|.git|.hg|.mypy_cache|.venv|_build|buck-out|build|dist)"
[tool.docformatter]
recursive = true
# some docstring start with r"""
wrap-summaries = 119
wrap-descriptions = 120
blank = true
[tool.codespell]
#skip = '*.py'
quiet-level = 3
# Todo: comma separated list of words; waiting for:
# https://github.com/codespell-project/codespell/issues/2839#issuecomment-1731601603
# Todo: also adding links until they ignored by its: nature
# https://github.com/codespell-project/codespell/issues/2243#issuecomment-1732019960
ignore-words-list = """
rouge, \
mape, \
wil, \
fpr, \
raison, \
archiv
"""
[tool.typos.default]
extend-ignore-identifiers-re = [
# *sigh* this just isn't worth the cost of fixing
"AttributeID.*Supress.*",
]
[tool.typos.default.extend-identifiers]
# *sigh* this just isn't worth the cost of fixing
MAPE = "MAPE"
WIL = "WIL"
Raison = "Raison"
[tool.typos.default.extend-words]
# Don't correct the surname "Teh"
fpr = "fpr"
mape = "mape"
wil = "wil"
[tool.ruff]
target-version = "py38"
line-length = 120
# Enable Pyflakes `E` and `F` codes by default.
select = [
"E",
"W", # see: https://pypi.org/project/pycodestyle
"F", # see: https://pypi.org/project/pyflakes
"I", #see: https://pypi.org/project/isort/
"D", # see: https://pypi.org/project/pydocstyle
"N", # see: https://pypi.org/project/pep8-naming
"S", # see: https://pypi.org/project/flake8-bandit
]
extend-select = [
"A", # see: https://pypi.org/project/flake8-builtins
"B", # see: https://pypi.org/project/flake8-bugbear
"C4", # see: https://pypi.org/project/flake8-comprehensions
"PT", # see: https://pypi.org/project/flake8-pytest-style
"RET", # see: https://pypi.org/project/flake8-return
"SIM", # see: https://pypi.org/project/flake8-simplify
"YTT", # see: https://pypi.org/project/flake8-2020
"ANN", # see: https://pypi.org/project/flake8-annotations
"TID", # see: https://pypi.org/project/flake8-tidy-imports/
"T10", # see: https://pypi.org/project/flake8-debugger
"Q", # see: https://pypi.org/project/flake8-quotes
"RUF", # Ruff-specific rules
"EXE", # see: https://pypi.org/project/flake8-executable
"ISC", # see: https://pypi.org/project/flake8-implicit-str-concat
"PIE", # see: https://pypi.org/project/flake8-pie
"PLE", # see: https://pypi.org/project/pylint/
"PERF", # see: https://pypi.org/project/perflint/
"PYI", # see: https://pypi.org/project/flake8-pyi/
]
ignore = [
"E731", # Do not assign a lambda expression, use a def
"D100", # todo: Missing docstring in public module
"D104", # todo: Missing docstring in public package
"D107", # Missing docstring in `__init__`
"ANN101", # Missing type annotation for `self` in method
"S301", # todo: `pickle` and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue # todo
"S310", # todo: Audit URL open for permitted schemes. Allowing use of `file:` or custom schemes is often unexpected. # todo
"B905", # todo: `zip()` without an explicit `strict=` parameter
]
# Exclude a variety of commonly ignored directories.
exclude = [
".eggs",
".git",
".mypy_cache",
".ruff_cache",
"__pypackages__",
"_build",
"build",
"dist",
"docs",
]
ignore-init-module-imports = true
unfixable = ["F401"]
[tool.ruff.per-file-ignores]
"setup.py" = ["ANN202", "ANN401"]
"src/**" = ["ANN401"]
"tests/**" = ["S101", "ANN001", "ANN201", "ANN202", "ANN401"]
[tool.ruff.pydocstyle]
# Use Google-style docstrings.
convention = "google"
#[tool.ruff.pycodestyle]
#ignore-overlong-task-comments = true
[tool.ruff.mccabe]
# Unlike Flake8, default to a complexity level of 10.
max-complexity = 10
[tool.mypy]
files = ["src/torchmetrics"]
install_types = "True"
non_interactive = "True"
disallow_untyped_defs = "True"
ignore_missing_imports = "True"
show_error_codes = "True"
warn_redundant_casts = "True"
warn_unused_configs = "True"
warn_unused_ignores = "True"
allow_redefinition = "True"
# disable this rule as the Trainer attributes are defined in the connectors, not in its __init__
disable_error_code = "attr-defined"
# style choices
warn_no_return = "False"
# Ignore mypy errors for these files
# TODO: the goal is for this to be empty
[[tool.mypy.overrides]]
module = [
"torchmetrics.classification.exact_match",
"torchmetrics.classification.f_beta",
"torchmetrics.classification.precision_recall",
"torchmetrics.classification.ranking",
"torchmetrics.classification.recall_at_fixed_precision",
"torchmetrics.classification.roc",
"torchmetrics.classification.stat_scores",
"torchmetrics.detection._mean_ap",
"torchmetrics.detection.mean_ap",
"torchmetrics.functional.image.psnr",
"torchmetrics.functional.image.ssim",
"torchmetrics.image.psnr",
"torchmetrics.image.ssim",
]
ignore_errors = "True"
| 0 |
public_repos | public_repos/torchmetrics/.prettierignore | # Ignore all MD files:
**/*.md
| 0 |
public_repos | public_repos/torchmetrics/README.md | <div align="center">
<img src="docs/source/_static/images/logo.png" width="400px">
**Machine learning metrics for distributed, scalable PyTorch applications.**
______________________________________________________________________
<p align="center">
<a href="#what-is-torchmetrics">What is Torchmetrics</a> •
<a href="#implementing-your-own-module-metric">Implementing a metric</a> •
<a href="#build-in-metrics">Built-in metrics</a> •
<a href="https://lightning.ai/docs/torchmetrics/stable/">Docs</a> •
<a href="#community">Community</a> •
<a href="#license">License</a>
</p>
______________________________________________________________________
[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/torchmetrics)](https://pypi.org/project/torchmetrics/)
[![PyPI Status](https://badge.fury.io/py/torchmetrics.svg)](https://badge.fury.io/py/torchmetrics)
[![PyPI - Downloads](https://img.shields.io/pypi/dm/torchmetrics)
](https://pepy.tech/project/torchmetrics)
[![Conda](https://img.shields.io/conda/v/conda-forge/torchmetrics?label=conda&color=success)](https://anaconda.org/conda-forge/torchmetrics)
[![license](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/Lightning-AI/torchmetrics/blob/master/LICENSE)
[![CI testing | CPU](https://github.com/Lightning-AI/torchmetrics/actions/workflows/ci-tests.yml/badge.svg?event=push)](https://github.com/Lightning-AI/torchmetrics/actions/workflows/ci-tests.yml)
[![Build Status](https://dev.azure.com/Lightning-AI/Metrics/_apis/build/status%2FTM.unittests?branchName=master)](https://dev.azure.com/Lightning-AI/Metrics/_build/latest?definitionId=54&branchName=master)
[![codecov](https://codecov.io/gh/Lightning-AI/torchmetrics/branch/master/graph/badge.svg?token=NER6LPI3HS)](https://codecov.io/gh/Lightning-AI/torchmetrics)
[![pre-commit.ci status](https://results.pre-commit.ci/badge/github/Lightning-AI/torchmetrics/master.svg)](https://results.pre-commit.ci/latest/github/Lightning-AI/torchmetrics/master)
[![Documentation Status](https://readthedocs.org/projects/torchmetrics/badge/?version=latest)](https://torchmetrics.readthedocs.io/en/latest/?badge=latest)
[![Discord](https://img.shields.io/discord/1077906959069626439?style=plastic)](https://discord.gg/VptPCZkGNa)
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5844769.svg)](https://doi.org/10.5281/zenodo.5844769)
[![JOSS status](https://joss.theoj.org/papers/561d9bb59b400158bc8204e2639dca43/status.svg)](https://joss.theoj.org/papers/561d9bb59b400158bc8204e2639dca43)
______________________________________________________________________
</div>
## Installation
Simple installation from PyPI
```bash
pip install torchmetrics
```
<details>
<summary>Other installations</summary>
Install using conda
```bash
conda install -c conda-forge torchmetrics
```
Pip from source
```bash
# with git
pip install git+https://github.com/Lightning-AI/torchmetrics.git@release/stable
```
Pip from archive
```bash
pip install https://github.com/Lightning-AI/torchmetrics/archive/refs/heads/release/stable.zip
```
Extra dependencies for specialized metrics:
```bash
pip install torchmetrics[audio]
pip install torchmetrics[image]
pip install torchmetrics[text]
pip install torchmetrics[all] # install all of the above
```
Install latest developer version
```bash
pip install https://github.com/Lightning-AI/torchmetrics/archive/master.zip
```
</details>
______________________________________________________________________
## What is TorchMetrics
TorchMetrics is a collection of 100+ PyTorch metrics implementations and an easy-to-use API to create custom metrics. It offers:
- A standardized interface to increase reproducibility
- Reduces boilerplate
- Automatic accumulation over batches
- Metrics optimized for distributed-training
- Automatic synchronization between multiple devices
You can use TorchMetrics with any PyTorch model or with [PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/) to enjoy additional features such as:
- Module metrics are automatically placed on the correct device.
- Native support for logging metrics in Lightning to reduce even more boilerplate.
## Using TorchMetrics
### Module metrics
The [module-based metrics](https://lightning.ai/docs/torchmetrics/stable/references/metric.html) contain internal metric states (similar to the parameters of the PyTorch module) that automate accumulation and synchronization across devices!
- Automatic accumulation over multiple batches
- Automatic synchronization between multiple devices
- Metric arithmetic
**This can be run on CPU, single GPU or multi-GPUs!**
For the single GPU/CPU case:
```python
import torch
# import our library
import torchmetrics
# initialize metric
metric = torchmetrics.classification.Accuracy(task="multiclass", num_classes=5)
# move the metric to device you want computations to take place
device = "cuda" if torch.cuda.is_available() else "cpu"
metric.to(device)
n_batches = 10
for i in range(n_batches):
# simulate a classification problem
preds = torch.randn(10, 5).softmax(dim=-1).to(device)
target = torch.randint(5, (10,)).to(device)
# metric on current batch
acc = metric(preds, target)
print(f"Accuracy on batch {i}: {acc}")
# metric on all batches using custom accumulation
acc = metric.compute()
print(f"Accuracy on all data: {acc}")
```
Module metric usage remains the same when using multiple GPUs or multiple nodes.
<details>
<summary>Example using DDP</summary>
<!--phmdoctest-mark.skip-->
```python
import os
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch import nn
from torch.nn.parallel import DistributedDataParallel as DDP
import torchmetrics
def metric_ddp(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
# create default process group
dist.init_process_group("gloo", rank=rank, world_size=world_size)
# initialize model
metric = torchmetrics.classification.Accuracy(task="multiclass", num_classes=5)
# define a model and append your metric to it
# this allows metric states to be placed on correct accelerators when
# .to(device) is called on the model
model = nn.Linear(10, 10)
model.metric = metric
model = model.to(rank)
# initialize DDP
model = DDP(model, device_ids=[rank])
n_epochs = 5
# this shows iteration over multiple training epochs
for n in range(n_epochs):
# this will be replaced by a DataLoader with a DistributedSampler
n_batches = 10
for i in range(n_batches):
# simulate a classification problem
preds = torch.randn(10, 5).softmax(dim=-1)
target = torch.randint(5, (10,))
# metric on current batch
acc = metric(preds, target)
if rank == 0: # print only for rank 0
print(f"Accuracy on batch {i}: {acc}")
# metric on all batches and all accelerators using custom accumulation
# accuracy is same across both accelerators
acc = metric.compute()
print(f"Accuracy on all data: {acc}, accelerator rank: {rank}")
# Resetting internal state such that metric ready for new data
metric.reset()
# cleanup
dist.destroy_process_group()
if __name__ == "__main__":
world_size = 2 # number of gpus to parallelize over
mp.spawn(metric_ddp, args=(world_size,), nprocs=world_size, join=True)
```
</details>
### Implementing your own Module metric
Implementing your own metric is as easy as subclassing an [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). Simply, subclass `torchmetrics.Metric`
and just implement the `update` and `compute` methods:
```python
import torch
from torchmetrics import Metric
class MyAccuracy(Metric):
def __init__(self):
# remember to call super
super().__init__()
# call `self.add_state`for every internal state that is needed for the metrics computations
# dist_reduce_fx indicates the function that should be used to reduce
# state from multiple processes
self.add_state("correct", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, preds: torch.Tensor, target: torch.Tensor) -> None:
# extract predicted class index for computing accuracy
preds = preds.argmax(dim=-1)
assert preds.shape == target.shape
# update metric states
self.correct += torch.sum(preds == target)
self.total += target.numel()
def compute(self) -> torch.Tensor:
# compute final result
return self.correct.float() / self.total
my_metric = MyAccuracy()
preds = torch.randn(10, 5).softmax(dim=-1)
target = torch.randint(5, (10,))
print(my_metric(preds, target))
```
### Functional metrics
Similar to [`torch.nn`](https://pytorch.org/docs/stable/nn.html), most metrics have both a [module-based](https://lightning.ai/docs/torchmetrics/stable/references/metric.html) and functional version.
The functional versions are simple python functions that as input take [torch.tensors](https://pytorch.org/docs/stable/tensors.html) and return the corresponding metric as a [torch.tensor](https://pytorch.org/docs/stable/tensors.html).
```python
import torch
# import our library
import torchmetrics
# simulate a classification problem
preds = torch.randn(10, 5).softmax(dim=-1)
target = torch.randint(5, (10,))
acc = torchmetrics.functional.classification.multiclass_accuracy(
preds, target, num_classes=5
)
```
### Covered domains and example metrics
In total TorchMetrics contains [100+ metrics](https://lightning.ai/docs/torchmetrics/stable/all-metrics.html), which
covers the following domains:
- Audio
- Classification
- Detection
- Information Retrieval
- Image
- Multimodal (Image-Text)
- Nominal
- Regression
- Text
Each domain may require some additional dependencies which can be installed with `pip install torchmetrics[audio]`,
`pip install torchmetrics['image']` etc.
### Additional features
#### Plotting
Visualization of metrics can be important to help understand what is going on with your machine learning algorithms.
Torchmetrics have built-in plotting support (install dependencies with `pip install torchmetrics[visual]`) for nearly
all modular metrics through the `.plot` method. Simply call the method to get a simple visualization of any metric!
```python
import torch
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix
num_classes = 3
# this will generate two distributions that comes more similar as iterations increase
w = torch.randn(num_classes)
target = lambda it: torch.multinomial((it * w).softmax(dim=-1), 100, replacement=True)
preds = lambda it: torch.multinomial((it * w).softmax(dim=-1), 100, replacement=True)
acc = MulticlassAccuracy(num_classes=num_classes, average="micro")
acc_per_class = MulticlassAccuracy(num_classes=num_classes, average=None)
confmat = MulticlassConfusionMatrix(num_classes=num_classes)
# plot single value
for i in range(5):
acc_per_class.update(preds(i), target(i))
confmat.update(preds(i), target(i))
fig1, ax1 = acc_per_class.plot()
fig2, ax2 = confmat.plot()
# plot multiple values
values = []
for i in range(10):
values.append(acc(preds(i), target(i)))
fig3, ax3 = acc.plot(values)
```
<p align="center">
<img src="docs/source/_static/images/plot_example.png" width="1000">
</p>
For examples of plotting different metrics try running [this example file](examples/plotting.py).
## Contribute!
The lightning + TorchMetrics team is hard at work adding even more metrics.
But we're looking for incredible contributors like you to submit new metrics
and improve existing ones!
Join our [Slack](https://www.pytorchlightning.ai/community) to get help with becoming a contributor!
## Community
For help or questions, join our huge community on [Slack](https://www.pytorchlightning.ai/community)!
## Citation
We’re excited to continue the strong legacy of open source software and have been inspired
over the years by Caffe, Theano, Keras, PyTorch, torchbearer, ignite, sklearn and fast.ai.
If you want to cite this framework feel free to use GitHub's built-in citation option to generate a bibtex or APA-Style citation based on [this file](https://github.com/Lightning-AI/torchmetrics/blob/master/CITATION.cff) (but only if you loved it 😊).
## License
Please observe the Apache 2.0 license that is listed in this repository.
In addition, the Lightning framework is Patent Pending.
| 0 |
public_repos | public_repos/torchmetrics/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2020-2022 Lightning-AI team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
public_repos | public_repos/torchmetrics/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
**Note: we move fast, but still we preserve 0.1 version (one feature release) back compatibility.**
## [UnReleased] - 2023-MM-DD
### Added
- Added more tokenizers for `SacreBLEU` metric ([#2068](https://github.com/Lightning-AI/torchmetrics/pull/2068))
- Added `average` argument to multiclass versions of `PrecisionRecallCurve` and `ROC` ([#2084](https://github.com/Lightning-AI/torchmetrics/pull/2084))
- Added error if `NoTrainInceptionV3` is being initialized without `torch-fidelity` not being installed ([#2143](https://github.com/Lightning-AI/torchmetrics/pull/2143))
- Added support for Pytorch v2.1 ([#2142](https://github.com/Lightning-AI/torchmetrics/pull/2142))
- Added support for logging `MultiTaskWrapper` directly with lightnings `log_dict` method ([#2213](https://github.com/Lightning-AI/torchmetrics/pull/2213))
### Changed
- Change default state of `SpectralAngleMapper` and `UniversalImageQualityIndex` to be tensors ([#2089](https://github.com/Lightning-AI/torchmetrics/pull/2089))
- Changed minimum supported Pytorch version from 1.8 to 1.10 ([#2145](https://github.com/Lightning-AI/torchmetrics/pull/2145))
### Deprecated
- Deprecated `metric._update_called` ([#2141](https://github.com/Lightning-AI/torchmetrics/pull/2141))
- Changed x-/y-axis order for `PrecisionRecallCurve` to be consistent with scikit-learn ([#2183](https://github.com/Lightning-AI/torchmetrics/pull/2183))
### Removed
-
### Fixed
- Fixed numerical stability bug in `LearnedPerceptualImagePatchSimilarity` metric ([#2144](https://github.com/Lightning-AI/torchmetrics/pull/2144))
## [1.2.0] - 2023-09-22
### Added
- Added metric to cluster package:
- `MutualInformationScore` ([#2008](https://github.com/Lightning-AI/torchmetrics/pull/2008))
- `RandScore` ([#2025](https://github.com/Lightning-AI/torchmetrics/pull/2025))
- `NormalizedMutualInfoScore` ([#2029](https://github.com/Lightning-AI/torchmetrics/pull/2029))
- `AdjustedRandScore` ([#2032](https://github.com/Lightning-AI/torchmetrics/pull/2032))
- `CalinskiHarabaszScore` ([#2036](https://github.com/Lightning-AI/torchmetrics/pull/2036))
- `DunnIndex` ([#2049](https://github.com/Lightning-AI/torchmetrics/pull/2049))
- `HomogeneityScore` ([#2053](https://github.com/Lightning-AI/torchmetrics/pull/2053))
- `CompletenessScore` ([#2053](https://github.com/Lightning-AI/torchmetrics/pull/2053))
- `VMeasureScore` ([#2053](https://github.com/Lightning-AI/torchmetrics/pull/2053))
- `FowlkesMallowsIndex` ([#2066](https://github.com/Lightning-AI/torchmetrics/pull/2066))
- `AdjustedMutualInfoScore` ([#2058](https://github.com/Lightning-AI/torchmetrics/pull/2058))
- `DaviesBouldinScore` ([#2071](https://github.com/Lightning-AI/torchmetrics/pull/2071))
- Added `backend` argument to `MeanAveragePrecision` ([#2034](https://github.com/Lightning-AI/torchmetrics/pull/2034))
## [1.1.2] - 2023-09-11
### Fixed
- Fixed tie breaking in ndcg metric ([#2031](https://github.com/Lightning-AI/torchmetrics/pull/2031))
- Fixed bug in `BootStrapper` when very few samples were evaluated that could lead to crash ([#2052](https://github.com/Lightning-AI/torchmetrics/pull/2052))
- Fixed bug when creating multiple plots that lead to not all plots being shown ([#2060](https://github.com/Lightning-AI/torchmetrics/pull/2060))
- Fixed performance issues in `RecallAtFixedPrecision` for large batch sizes ([#2042](https://github.com/Lightning-AI/torchmetrics/pull/2042))
- Fixed bug related to `MetricCollection` used with custom metrics have `prefix`/`postfix` attributes ([#2070](https://github.com/Lightning-AI/torchmetrics/pull/2070))
## [1.1.1] - 2023-08-29
### Added
- Added `average` argument to `MeanAveragePrecision` ([#2018](https://github.com/Lightning-AI/torchmetrics/pull/2018))
### Fixed
- Fixed bug in `PearsonCorrCoef` is updated on single samples at a time ([#2019](https://github.com/Lightning-AI/torchmetrics/pull/2019))
- Fixed support for pixel-wise MSE ([#2017](https://github.com/Lightning-AI/torchmetrics/pull/2017))
- Fixed bug in `MetricCollection` when used with multiple metrics that return dicts with same keys ([#2027](https://github.com/Lightning-AI/torchmetrics/pull/2027))
- Fixed bug in detection intersection metrics when `class_metrics=True` resulting in wrong values ([#1924](https://github.com/Lightning-AI/torchmetrics/pull/1924))
- Fixed missing attributes `higher_is_better`, `is_differentiable` for some metrics ([#2028](https://github.com/Lightning-AI/torchmetrics/pull/2028))
## [1.1.0] - 2023-08-22
### Added
- Added source aggregated signal-to-distortion ratio (SA-SDR) metric ([#1882](https://github.com/Lightning-AI/torchmetrics/pull/1882)
- Added `VisualInformationFidelity` to image package ([#1830](https://github.com/Lightning-AI/torchmetrics/pull/1830))
- Added `EditDistance` to text package ([#1906](https://github.com/Lightning-AI/torchmetrics/pull/1906))
- Added `top_k` argument to `RetrievalMRR` in retrieval package ([#1961](https://github.com/Lightning-AI/torchmetrics/pull/1961))
- Added support for evaluating `"segm"` and `"bbox"` detection in `MeanAveragePrecision` at the same time ([#1928](https://github.com/Lightning-AI/torchmetrics/pull/1928))
- Added `PerceptualPathLength` to image package ([#1939](https://github.com/Lightning-AI/torchmetrics/pull/1939))
- Added support for multioutput evaluation in `MeanSquaredError` ([#1937](https://github.com/Lightning-AI/torchmetrics/pull/1937))
- Added argument `extended_summary` to `MeanAveragePrecision` such that precision, recall, iou can be easily returned ([#1983](https://github.com/Lightning-AI/torchmetrics/pull/1983))
- Added warning to `ClipScore` if long captions are detected and truncate ([#2001](https://github.com/Lightning-AI/torchmetrics/pull/2001))
- Added `CLIPImageQualityAssessment` to multimodal package ([#1931](https://github.com/Lightning-AI/torchmetrics/pull/1931))
- Added new property `metric_state` to all metrics for users to investigate currently stored tensors in memory ([#2006](https://github.com/Lightning-AI/torchmetrics/pull/2006))
## [1.0.3] - 2023-08-08
### Added
- Added warning to `MeanAveragePrecision` if too many detections are observed ([#1978](https://github.com/Lightning-AI/torchmetrics/pull/1978))
### Fixed
- Fix support for int input for when `multidim_average="samplewise"` in classification metrics ([#1977](https://github.com/Lightning-AI/torchmetrics/pull/1977))
- Fixed x/y labels when plotting confusion matrices ([#1976](https://github.com/Lightning-AI/torchmetrics/pull/1976))
- Fixed IOU compute in cuda ([#1982](https://github.com/Lightning-AI/torchmetrics/pull/1982))
## [1.0.2] - 2023-08-02
### Added
- Added warning to `PearsonCorrCoeff` if input has a very small variance for its given dtype ([#1926](https://github.com/Lightning-AI/torchmetrics/pull/1926))
### Changed
- Changed all non-task specific classification metrics to be true subtypes of `Metric` ([#1963](https://github.com/Lightning-AI/torchmetrics/pull/1963))
### Fixed
- Fixed bug in `CalibrationError` where calculations for double precision input was performed in float precision ([#1919](https://github.com/Lightning-AI/torchmetrics/pull/1919))
- Fixed bug related to the `prefix/postfix` arguments in `MetricCollection` and `ClasswiseWrapper` being duplicated ([#1918](https://github.com/Lightning-AI/torchmetrics/pull/1918))
- Fixed missing AUC score when plotting classification metrics that support the `score` argument ([#1948](https://github.com/Lightning-AI/torchmetrics/pull/1948))
## [1.0.1] - 2023-07-13
### Fixed
- Fixes corner case when using `MetricCollection` together with aggregation metrics ([#1896](https://github.com/Lightning-AI/torchmetrics/pull/1896))
- Fixed the use of `max_fpr` in `AUROC` metric when only one class is present ([#1895](https://github.com/Lightning-AI/torchmetrics/pull/1895))
- Fixed bug related to empty predictions for `IntersectionOverUnion` metric ([#1892](https://github.com/Lightning-AI/torchmetrics/pull/1892))
- Fixed bug related to `MeanMetric` and broadcasting of weights when Nans are present ([#1898](https://github.com/Lightning-AI/torchmetrics/pull/1898))
- Fixed bug related to expected input format of pycoco in `MeanAveragePrecision` ([#1913](https://github.com/Lightning-AI/torchmetrics/pull/1913))
## [1.0.0] - 2023-07-04
### Added
- Added `prefix` and `postfix` arguments to `ClasswiseWrapper` ([#1866](https://github.com/Lightning-AI/torchmetrics/pull/1866))
- Added speech-to-reverberation modulation energy ratio (SRMR) metric ([#1792](https://github.com/Lightning-AI/torchmetrics/pull/1792), [#1872](https://github.com/Lightning-AI/torchmetrics/pull/1872))
- Added new global arg `compute_with_cache` to control caching behaviour after `compute` method ([#1754](https://github.com/Lightning-AI/torchmetrics/pull/1754))
- Added `ComplexScaleInvariantSignalNoiseRatio` for audio package ([#1785](https://github.com/Lightning-AI/torchmetrics/pull/1785))
- Added `Running` wrapper for calculate running statistics ([#1752](https://github.com/Lightning-AI/torchmetrics/pull/1752))
- Added`RelativeAverageSpectralError` and `RootMeanSquaredErrorUsingSlidingWindow` to image package ([#816](https://github.com/PyTorchLightning/metrics/pull/816))
- Added support for `SpecificityAtSensitivity` Metric ([#1432](https://github.com/Lightning-AI/metrics/pull/1432))
- Added support for plotting of metrics through `.plot()` method (
[#1328](https://github.com/Lightning-AI/metrics/pull/1328),
[#1481](https://github.com/Lightning-AI/metrics/pull/1481),
[#1480](https://github.com/Lightning-AI/metrics/pull/1480),
[#1490](https://github.com/Lightning-AI/metrics/pull/1490),
[#1581](https://github.com/Lightning-AI/metrics/pull/1581),
[#1585](https://github.com/Lightning-AI/metrics/pull/1585),
[#1593](https://github.com/Lightning-AI/metrics/pull/1593),
[#1600](https://github.com/Lightning-AI/metrics/pull/1600),
[#1605](https://github.com/Lightning-AI/metrics/pull/1605),
[#1610](https://github.com/Lightning-AI/metrics/pull/1610),
[#1609](https://github.com/Lightning-AI/metrics/pull/1609),
[#1621](https://github.com/Lightning-AI/metrics/pull/1621),
[#1624](https://github.com/Lightning-AI/metrics/pull/1624),
[#1623](https://github.com/Lightning-AI/metrics/pull/1623),
[#1638](https://github.com/Lightning-AI/metrics/pull/1638),
[#1631](https://github.com/Lightning-AI/metrics/pull/1631),
[#1650](https://github.com/Lightning-AI/metrics/pull/1650),
[#1639](https://github.com/Lightning-AI/metrics/pull/1639),
[#1660](https://github.com/Lightning-AI/metrics/pull/1660),
[#1682](https://github.com/Lightning-AI/torchmetrics/pull/1682),
[#1786](https://github.com/Lightning-AI/torchmetrics/pull/1786),
)
- Added support for plotting of audio metrics through `.plot()` method ([#1434](https://github.com/Lightning-AI/metrics/pull/1434))
- Added `classes` to output from `MAP` metric ([#1419](https://github.com/Lightning-AI/metrics/pull/1419))
- Added Binary group fairness metrics to classification package ([#1404](https://github.com/Lightning-AI/metrics/pull/1404))
- Added `MinkowskiDistance` to regression package ([#1362](https://github.com/Lightning-AI/metrics/pull/1362))
- Added `pairwise_minkowski_distance` to pairwise package ([#1362](https://github.com/Lightning-AI/metrics/pull/1362))
- Added new detection metric `PanopticQuality` (
[#929](https://github.com/PyTorchLightning/metrics/pull/929),
[#1527](https://github.com/PyTorchLightning/metrics/pull/1527),
)
- Added `PSNRB` metric ([#1421](https://github.com/Lightning-AI/metrics/pull/1421))
- Added `ClassificationTask` Enum and use in metrics ([#1479](https://github.com/Lightning-AI/metrics/pull/1479))
- Added `ignore_index` option to `exact_match` metric ([#1540](https://github.com/Lightning-AI/metrics/pull/1540))
- Add parameter `top_k` to `RetrievalMAP` ([#1501](https://github.com/Lightning-AI/metrics/pull/1501))
- Added support for deterministic evaluation on GPU for metrics that uses `torch.cumsum` operator ([#1499](https://github.com/Lightning-AI/metrics/pull/1499))
- Added support for plotting of aggregation metrics through `.plot()` method ([#1485](https://github.com/Lightning-AI/metrics/pull/1485))
- Added support for python 3.11 ([#1612](https://github.com/Lightning-AI/metrics/pull/1612))
- Added support for auto clamping of input for metrics that uses the `data_range` ([#1606](argument https://github.com/Lightning-AI/metrics/pull/1606))
- Added `ModifiedPanopticQuality` metric to detection package ([#1627](https://github.com/Lightning-AI/metrics/pull/1627))
- Added `PrecisionAtFixedRecall` metric to classification package ([#1683](https://github.com/Lightning-AI/torchmetrics/pull/1683))
- Added multiple metrics to detection package ([#1284](https://github.com/Lightning-AI/metrics/pull/1284))
* `IntersectionOverUnion`
* `GeneralizedIntersectionOverUnion`
* `CompleteIntersectionOverUnion`
* `DistanceIntersectionOverUnion`
- Added `MultitaskWrapper` to wrapper package ([#1762](https://github.com/Lightning-AI/torchmetrics/pull/1762))
- Added `RelativeSquaredError` metric to regression package ([#1765](https://github.com/Lightning-AI/torchmetrics/pull/1765))
- Added `MemorizationInformedFrechetInceptionDistance` metric to image package ([#1580](https://github.com/Lightning-AI/torchmetrics/pull/1580))
### Changed
- Changed `permutation_invariant_training` to allow using a `'permutation-wise'` metric function ([#1794](https://github.com/Lightning-AI/metrics/pull/1794))
- Changed `update_count` and `update_called` from private to public methods ([#1370](https://github.com/Lightning-AI/metrics/pull/1370))
- Raise exception for invalid kwargs in Metric base class ([#1427](https://github.com/Lightning-AI/metrics/pull/1427))
- Extend `EnumStr` raising `ValueError` for invalid value ([#1479](https://github.com/Lightning-AI/metrics/pull/1479))
- Improve speed and memory consumption of binned `PrecisionRecallCurve` with large number of samples ([#1493](https://github.com/Lightning-AI/metrics/pull/1493))
- Changed `__iter__` method from raising `NotImplementedError` to `TypeError` by setting to `None` ([#1538](https://github.com/Lightning-AI/metrics/pull/1538))
- `FID` metric will now raise an error if too few samples are provided ([#1655](https://github.com/Lightning-AI/metrics/pull/1655))
- Allowed FID with `torch.float64` ([#1628](https://github.com/Lightning-AI/metrics/pull/1628))
- Changed `LPIPS` implementation to no more rely on third-party package ([#1575](https://github.com/Lightning-AI/metrics/pull/1575))
- Changed FID matrix square root calculation from `scipy` to `torch` ([#1708](https://github.com/Lightning-AI/torchmetrics/pull/1708))
- Changed calculation in `PearsonCorrCoeff` to be more robust in certain cases ([#1729](https://github.com/Lightning-AI/torchmetrics/pull/1729))
- Changed `MeanAveragePrecision` to `pycocotools` backend ([#1832](https://github.com/Lightning-AI/torchmetrics/pull/1832))
### Deprecated
- Deprecated domain metrics import from package root (
[#1685](https://github.com/Lightning-AI/metrics/pull/1685),
[#1694](https://github.com/Lightning-AI/metrics/pull/1694),
[#1696](https://github.com/Lightning-AI/metrics/pull/1696),
[#1699](https://github.com/Lightning-AI/metrics/pull/1699),
[#1703](https://github.com/Lightning-AI/metrics/pull/1703),
)
### Removed
- Support for python 3.7 ([#1640](https://github.com/Lightning-AI/metrics/pull/1640))
### Fixed
- Fixed support in `MetricTracker` for `MultioutputWrapper` and nested structures ([#1608](https://github.com/Lightning-AI/metrics/pull/1608))
- Fixed restrictive check in `PearsonCorrCoef` ([#1649](https://github.com/Lightning-AI/metrics/pull/1649))
- Fixed integration with `jsonargparse` and `LightningCLI` ([#1651](https://github.com/Lightning-AI/metrics/pull/1651))
- Fixed corner case in calibration error for zero confidence input ([#1648](https://github.com/Lightning-AI/metrics/pull/1648))
- Fix precision-recall curve based computations for float target ([#1642](https://github.com/Lightning-AI/metrics/pull/1642))
- Fixed missing kwarg squeeze in `MultiOutputWrapper` ([#1675](https://github.com/Lightning-AI/torchmetrics/pull/1675))
- Fixed padding removal for 3d input in `MSSSIM` ([#1674](https://github.com/Lightning-AI/torchmetrics/pull/1674))
- Fixed `max_det_threshold` in MAP detection ([#1712](https://github.com/Lightning-AI/torchmetrics/pull/1712))
- Fixed states being saved in metrics that use `register_buffer` ([#1728](https://github.com/Lightning-AI/torchmetrics/pull/1728))
- Fixed states not being correctly synced and device transferred in `MeanAveragePrecision` for `iou_type="segm"` ([#1763](https://github.com/Lightning-AI/torchmetrics/pull/1763))
- Fixed use of `prefix` and `postfix` in nested `MetricCollection` ([#1773](https://github.com/Lightning-AI/torchmetrics/pull/1773))
- Fixed `ax` plotting logging in `MetricCollection ([#1783](https://github.com/Lightning-AI/torchmetrics/pull/1783))
- Fixed lookup for punkt sources being downloaded in `RougeScore` ([#1789](https://github.com/Lightning-AI/torchmetrics/pull/1789))
- Fixed integration with lightning for `CompositionalMetric` ([#1761](https://github.com/Lightning-AI/torchmetrics/pull/1761))
- Fixed several bugs in `SpectralDistortionIndex` metric ([#1808](https://github.com/Lightning-AI/torchmetrics/pull/1808))
- Fixed bug for corner cases in `MatthewsCorrCoef` (
[#1812](https://github.com/Lightning-AI/torchmetrics/pull/1812),
[#1863](https://github.com/Lightning-AI/torchmetrics/pull/1863)
)
- Fixed support for half precision in `PearsonCorrCoef` ([#1819](https://github.com/Lightning-AI/torchmetrics/pull/1819))
- Fixed number of bugs related to `average="macro"` in classification metrics ([#1821](https://github.com/Lightning-AI/torchmetrics/pull/1821))
- Fixed off-by-one issue when `ignore_index = num_classes + 1` in Multiclass-jaccard ([#1860](https://github.com/Lightning-AI/torchmetrics/pull/1860))
## [0.11.4] - 2023-03-10
### Fixed
- Fixed evaluation of `R2Score` with near constant target ([#1576](https://github.com/Lightning-AI/metrics/pull/1576))
- Fixed dtype conversion when metric is submodule ([#1583](https://github.com/Lightning-AI/metrics/pull/1583))
- Fixed bug related to `top_k>1` and `ignore_index!=None` in `StatScores` based metrics ([#1589](https://github.com/Lightning-AI/metrics/pull/1589))
- Fixed corner case for `PearsonCorrCoef` when running in ddp mode but only on single device ([#1587](https://github.com/Lightning-AI/metrics/pull/1587))
- Fixed overflow error for specific cases in `MAP` when big areas are calculated ([#1607](https://github.com/Lightning-AI/metrics/pull/1607))
## [0.11.3] - 2023-02-28
### Fixed
- Fixed classification metrics for `byte` input ([#1521](https://github.com/Lightning-AI/metrics/pull/1474))
- Fixed the use of `ignore_index` in `MulticlassJaccardIndex` ([#1386](https://github.com/Lightning-AI/metrics/pull/1386))
## [0.11.2] - 2023-02-21
### Fixed
- Fixed compatibility between XLA in `_bincount` function ([#1471](https://github.com/Lightning-AI/metrics/pull/1471))
- Fixed type hints in methods belonging to `MetricTracker` wrapper ([#1472](https://github.com/Lightning-AI/metrics/pull/1472))
- Fixed `multilabel` in `ExactMatch` ([#1474](https://github.com/Lightning-AI/metrics/pull/1474))
## [0.11.1] - 2023-01-30
### Fixed
- Fixed type checking on the `maximize` parameter at the initialization of `MetricTracker` ([#1428](https://github.com/Lightning-AI/metrics/issues/1428))
- Fixed mixed precision autocast for `SSIM` metric ([#1454](https://github.com/Lightning-AI/metrics/pull/1454))
- Fixed checking for `nltk.punkt` in `RougeScore` if a machine is not online ([#1456](https://github.com/Lightning-AI/metrics/pull/1456))
- Fixed wrongly reset method in `MultioutputWrapper` ([#1460](https://github.com/Lightning-AI/metrics/issues/1460))
- Fixed dtype checking in `PrecisionRecallCurve` for `target` tensor ([#1457](https://github.com/Lightning-AI/metrics/pull/1457))
## [0.11.0] - 2022-11-30
### Added
- Added `MulticlassExactMatch` to classification metrics ([#1343](https://github.com/Lightning-AI/metrics/pull/1343))
- Added `TotalVariation` to image package ([#978](https://github.com/Lightning-AI/metrics/pull/978))
- Added `CLIPScore` to new multimodal package ([#1314](https://github.com/Lightning-AI/metrics/pull/1314))
- Added regression metrics:
* `KendallRankCorrCoef` ([#1271](https://github.com/Lightning-AI/metrics/pull/1271))
* `LogCoshError` ([#1316](https://github.com/Lightning-AI/metrics/pull/1316))
- Added new nominal metrics:
* `CramersV` ([#1298](https://github.com/Lightning-AI/metrics/pull/1298))
* `PearsonsContingencyCoefficient` ([#1334](https://github.com/Lightning-AI/metrics/pull/1334))
* `TschuprowsT` ([#1334](https://github.com/Lightning-AI/metrics/pull/1334))
* `TheilsU` ([#1337](https://github.com/Lightning-AI/metrics/pull/1334))
- Added option to pass `distributed_available_fn` to metrics to allow checks for custom communication backend for making `dist_sync_fn` actually useful ([#1301](https://github.com/Lightning-AI/metrics/pull/1301))
- Added `normalize` argument to `Inception`, `FID`, `KID` metrics ([#1246](https://github.com/Lightning-AI/metrics/pull/1246))
### Changed
- Changed minimum Pytorch version to be 1.8 ([#1263](https://github.com/Lightning-AI/metrics/pull/1263))
- Changed interface for all functional and modular classification metrics after refactor ([#1252](https://github.com/Lightning-AI/metrics/pull/1252))
### Removed
- Removed deprecated `BinnedAveragePrecision`, `BinnedPrecisionRecallCurve`, `RecallAtFixedPrecision` ([#1251](https://github.com/Lightning-AI/metrics/pull/1251))
- Removed deprecated `LabelRankingAveragePrecision`, `LabelRankingLoss` and `CoverageError` ([#1251](https://github.com/Lightning-AI/metrics/pull/1251))
- Removed deprecated `KLDivergence` and `AUC` ([#1251](https://github.com/Lightning-AI/metrics/pull/1251))
### Fixed
- Fixed precision bug in `pairwise_euclidean_distance` ([#1352](https://github.com/Lightning-AI/metrics/pull/1352))
## [0.10.3] - 2022-11-16
### Fixed
- Fixed bug in `Metrictracker.best_metric` when `return_step=False` ([#1306](https://github.com/Lightning-AI/metrics/pull/1306))
- Fixed bug to prevent users from going into an infinite loop if trying to iterate of a single metric ([#1320](https://github.com/Lightning-AI/metrics/pull/1320))
## [0.10.2] - 2022-10-31
### Changed
- Changed in-place operation to out-of-place operation in `pairwise_cosine_similarity` ([#1288](https://github.com/Lightning-AI/metrics/pull/1288))
### Fixed
- Fixed high memory usage for certain classification metrics when `average='micro'` ([#1286](https://github.com/Lightning-AI/metrics/pull/1286))
- Fixed precision problems when `structural_similarity_index_measure` was used with autocast ([#1291](https://github.com/Lightning-AI/metrics/pull/1291))
- Fixed slow performance for confusion matrix based metrics ([#1302](https://github.com/Lightning-AI/metrics/pull/1302))
- Fixed restrictive dtype checking in `spearman_corrcoef` when used with autocast ([#1303](https://github.com/Lightning-AI/metrics/pull/1303))
## [0.10.1] - 2022-10-21
### Fixed
- Fixed broken clone method for classification metrics ([#1250](https://github.com/Lightning-AI/metrics/pull/1250))
- Fixed unintentional downloading of `nltk.punkt` when `lsum` not in `rouge_keys` ([#1258](https://github.com/Lightning-AI/metrics/pull/1258))
- Fixed type casting in `MAP` metric between `bool` and `float32` ([#1150](https://github.com/Lightning-AI/metrics/pull/1150))
## [0.10.0] - 2022-10-04
### Added
- Added a new NLP metric `InfoLM` ([#915](https://github.com/Lightning-AI/metrics/pull/915))
- Added `Perplexity` metric ([#922](https://github.com/Lightning-AI/metrics/pull/922))
- Added `ConcordanceCorrCoef` metric to regression package ([#1201](https://github.com/Lightning-AI/metrics/pull/1201))
- Added argument `normalize` to `LPIPS` metric ([#1216](https://github.com/Lightning-AI/metrics/pull/1216))
- Added support for multiprocessing of batches in `PESQ` metric ([#1227](https://github.com/Lightning-AI/metrics/pull/1227))
- Added support for multioutput in `PearsonCorrCoef` and `SpearmanCorrCoef` ([#1200](https://github.com/Lightning-AI/metrics/pull/1200))
### Changed
- Classification refactor (
[#1054](https://github.com/Lightning-AI/metrics/pull/1054),
[#1143](https://github.com/Lightning-AI/metrics/pull/1143),
[#1145](https://github.com/Lightning-AI/metrics/pull/1145),
[#1151](https://github.com/Lightning-AI/metrics/pull/1151),
[#1159](https://github.com/Lightning-AI/metrics/pull/1159),
[#1163](https://github.com/Lightning-AI/metrics/pull/1163),
[#1167](https://github.com/Lightning-AI/metrics/pull/1167),
[#1175](https://github.com/Lightning-AI/metrics/pull/1175),
[#1189](https://github.com/Lightning-AI/metrics/pull/1189),
[#1197](https://github.com/Lightning-AI/metrics/pull/1197),
[#1215](https://github.com/Lightning-AI/metrics/pull/1215),
[#1195](https://github.com/Lightning-AI/metrics/pull/1195)
)
- Changed update in `FID` metric to be done in online fashion to save memory ([#1199](https://github.com/Lightning-AI/metrics/pull/1199))
- Improved performance of retrieval metrics ([#1242](https://github.com/Lightning-AI/metrics/pull/1242))
- Changed `SSIM` and `MSSSIM` update to be online to reduce memory usage ([#1231](https://github.com/Lightning-AI/metrics/pull/1231))
### Deprecated
- Deprecated `BinnedAveragePrecision`, `BinnedPrecisionRecallCurve`, `BinnedRecallAtFixedPrecision` ([#1163](https://github.com/Lightning-AI/metrics/pull/1163))
* `BinnedAveragePrecision` -> use `AveragePrecision` with `thresholds` arg
* `BinnedPrecisionRecallCurve` -> use `AveragePrecisionRecallCurve` with `thresholds` arg
* `BinnedRecallAtFixedPrecision` -> use `RecallAtFixedPrecision` with `thresholds` arg
- Renamed and refactored `LabelRankingAveragePrecision`, `LabelRankingLoss` and `CoverageError` ([#1167](https://github.com/Lightning-AI/metrics/pull/1167))
* `LabelRankingAveragePrecision` -> `MultilabelRankingAveragePrecision`
* `LabelRankingLoss` -> `MultilabelRankingLoss`
* `CoverageError` -> `MultilabelCoverageError`
- Deprecated `KLDivergence` and `AUC` from classification package ([#1189](https://github.com/Lightning-AI/metrics/pull/1189))
* `KLDivergence` moved to `regression` package
* Instead of `AUC` use `torchmetrics.utils.compute.auc`
### Fixed
- Fixed a bug in `ssim` when `return_full_image=True` where the score was still reduced ([#1204](https://github.com/Lightning-AI/metrics/pull/1204))
- Fixed MPS support for:
* MAE metric ([#1210](https://github.com/Lightning-AI/metrics/pull/1210))
* Jaccard index ([#1205](https://github.com/Lightning-AI/metrics/pull/1205))
- Fixed bug in `ClasswiseWrapper` such that `compute` gave wrong result ([#1225](https://github.com/Lightning-AI/metrics/pull/1225))
- Fixed synchronization of empty list states ([#1219](https://github.com/Lightning-AI/metrics/pull/1219))
## [0.9.3] - 2022-08-22
### Added
- Added global option `sync_on_compute` to disable automatic synchronization when `compute` is called ([#1107](https://github.dev/Lightning-AI/metrics/pull/1107))
### Fixed
- Fixed missing reset in `ClasswiseWrapper` ([#1129](https://github.com/Lightning-AI/metrics/pull/1129))
- Fixed `JaccardIndex` multi-label compute ([#1125](https://github.com/Lightning-AI/metrics/pull/1125))
- Fix SSIM propagate device if `gaussian_kernel` is False, add test ([#1149](https://github.com/Lightning-AI/metrics/pull/1149))
## [0.9.2] - 2022-06-29
### Fixed
- Fixed mAP calculation for areas with 0 predictions ([#1080](https://github.com/Lightning-AI/metrics/pull/1080))
- Fixed bug where avg precision state and auroc state was not merge when using MetricCollections ([#1086](https://github.com/Lightning-AI/metrics/pull/1086))
- Skip box conversion if no boxes are present in `MeanAveragePrecision` ([#1097](https://github.com/Lightning-AI/metrics/pull/1097))
- Fixed inconsistency in docs and code when setting `average="none"` in `AveragePrecision` metric ([#1116](https://github.com/Lightning-AI/metrics/pull/1116))
## [0.9.1] - 2022-06-08
### Added
- Added specific `RuntimeError` when metric object is on the wrong device ([#1056](https://github.com/Lightning-AI/metrics/pull/1056))
- Added an option to specify own n-gram weights for `BLEUScore` and `SacreBLEUScore` instead of using uniform weights only. ([#1075](https://github.com/Lightning-AI/metrics/pull/1075))
### Fixed
- Fixed aggregation metrics when input only contains zero ([#1070](https://github.com/Lightning-AI/metrics/pull/1070))
- Fixed `TypeError` when providing superclass arguments as `kwargs` ([#1069](https://github.com/Lightning-AI/metrics/pull/1069))
- Fixed bug related to state reference in metric collection when using compute groups ([#1076](https://github.com/Lightning-AI/metrics/pull/1076))
## [0.9.0] - 2022-05-30
### Added
- Added `RetrievalPrecisionRecallCurve` and `RetrievalRecallAtFixedPrecision` to retrieval package ([#951](https://github.com/Lightning-AI/metrics/pull/951))
- Added class property `full_state_update` that determines `forward` should call `update` once or twice (
[#984](https://github.com/Lightning-AI/metrics/pull/984),
[#1033](https://github.com/Lightning-AI/metrics/pull/1033))
- Added support for nested metric collections ([#1003](https://github.com/Lightning-AI/metrics/pull/1003))
- Added `Dice` to classification package ([#1021](https://github.com/Lightning-AI/metrics/pull/1021))
- Added support to segmentation type `segm` as IOU for mean average precision ([#822](https://github.com/Lightning-AI/metrics/pull/822))
### Changed
- Renamed `reduction` argument to `average` in Jaccard score and added additional options ([#874](https://github.com/Lightning-AI/metrics/pull/874))
### Removed
- Removed deprecated `compute_on_step` argument (
[#962](https://github.com/Lightning-AI/metrics/pull/962),
[#967](https://github.com/Lightning-AI/metrics/pull/967),
[#979](https://github.com/Lightning-AI/metrics/pull/979),
[#990](https://github.com/Lightning-AI/metrics/pull/990),
[#991](https://github.com/Lightning-AI/metrics/pull/991),
[#993](https://github.com/Lightning-AI/metrics/pull/993),
[#1005](https://github.com/Lightning-AI/metrics/pull/1005),
[#1004](https://github.com/Lightning-AI/metrics/pull/1004),
[#1007](https://github.com/Lightning-AI/metrics/pull/1007)
)
### Fixed
- Fixed non-empty state dict for a few metrics ([#1012](https://github.com/Lightning-AI/metrics/pull/1012))
- Fixed bug when comparing states while finding compute groups ([#1022](https://github.com/Lightning-AI/metrics/pull/1022))
- Fixed `torch.double` support in stat score metrics ([#1023](https://github.com/Lightning-AI/metrics/pull/1023))
- Fixed `FID` calculation for non-equal size real and fake input ([#1028](https://github.com/Lightning-AI/metrics/pull/1028))
- Fixed case where `KLDivergence` could output `Nan` ([#1030](https://github.com/Lightning-AI/metrics/pull/1030))
- Fixed deterministic for PyTorch<1.8 ([#1035](https://github.com/Lightning-AI/metrics/pull/1035))
- Fixed default value for `mdmc_average` in `Accuracy` ([#1036](https://github.com/Lightning-AI/metrics/pull/1036))
- Fixed missing copy of property when using compute groups in `MetricCollection` ([#1052](https://github.com/Lightning-AI/metrics/pull/1052))
## [0.8.2] - 2022-05-06
### Fixed
- Fixed multi device aggregation in `PearsonCorrCoef` ([#998](https://github.com/Lightning-AI/metrics/pull/998))
- Fixed MAP metric when using custom list of thresholds ([#995](https://github.com/Lightning-AI/metrics/pull/995))
- Fixed compatibility between compute groups in `MetricCollection` and prefix/postfix arg ([#1007](https://github.com/Lightning-AI/metrics/pull/1008))
- Fixed compatibility with future Pytorch 1.12 in `safe_matmul` ([#1011](https://github.com/Lightning-AI/metrics/pull/1011), [#1014](https://github.com/Lightning-AI/metrics/pull/1014))
## [0.8.1] - 2022-04-27
### Changed
- Reimplemented the `signal_distortion_ratio` metric, which removed the absolute requirement of `fast-bss-eval` ([#964](https://github.com/Lightning-AI/metrics/pull/964))
### Fixed
- Fixed "Sort currently does not support bool dtype on CUDA" error in MAP for empty preds ([#983](https://github.com/Lightning-AI/metrics/pull/983))
- Fixed `BinnedPrecisionRecallCurve` when `thresholds` argument is not provided ([#968](https://github.com/Lightning-AI/metrics/pull/968))
- Fixed `CalibrationError` to work on logit input ([#985](https://github.com/Lightning-AI/metrics/pull/985))
## [0.8.0] - 2022-04-14
### Added
- Added `WeightedMeanAbsolutePercentageError` to regression package ([#948](https://github.com/Lightning-AI/metrics/pull/948))
- Added new classification metrics:
* `CoverageError` ([#787](https://github.com/Lightning-AI/metrics/pull/787))
* `LabelRankingAveragePrecision` and `LabelRankingLoss` ([#787](https://github.com/Lightning-AI/metrics/pull/787))
- Added new image metric:
* `SpectralAngleMapper` ([#885](https://github.com/Lightning-AI/metrics/pull/885))
* `ErrorRelativeGlobalDimensionlessSynthesis` ([#894](https://github.com/Lightning-AI/metrics/pull/894))
* `UniversalImageQualityIndex` ([#824](https://github.com/Lightning-AI/metrics/pull/824))
* `SpectralDistortionIndex` ([#873](https://github.com/Lightning-AI/metrics/pull/873))
- Added support for `MetricCollection` in `MetricTracker` ([#718](https://github.com/Lightning-AI/metrics/pull/718))
- Added support for 3D image and uniform kernel in `StructuralSimilarityIndexMeasure` ([#818](https://github.com/Lightning-AI/metrics/pull/818))
- Added smart update of `MetricCollection` ([#709](https://github.com/Lightning-AI/metrics/pull/709))
- Added `ClasswiseWrapper` for better logging of classification metrics with multiple output values ([#832](https://github.com/Lightning-AI/metrics/pull/832))
- Added `**kwargs` argument for passing additional arguments to base class ([#833](https://github.com/Lightning-AI/metrics/pull/833))
- Added negative `ignore_index` for the Accuracy metric ([#362](https://github.com/Lightning-AI/metrics/pull/362))
- Added `adaptive_k` for the `RetrievalPrecision` metric ([#910](https://github.com/Lightning-AI/metrics/pull/910))
- Added `reset_real_features` argument image quality assessment metrics ([#722](https://github.com/Lightning-AI/metrics/pull/722))
- Added new keyword argument `compute_on_cpu` to all metrics ([#867](https://github.com/Lightning-AI/metrics/pull/867))
### Changed
- Made `num_classes` in `jaccard_index` a required argument ([#853](https://github.com/Lightning-AI/metrics/pull/853), [#914](https://github.com/Lightning-AI/metrics/pull/914))
- Added normalizer, tokenizer to ROUGE metric ([#838](https://github.com/Lightning-AI/metrics/pull/838))
- Improved shape checking of `permutation_invariant_training` ([#864](https://github.com/Lightning-AI/metrics/pull/864))
- Allowed reduction `None` ([#891](https://github.com/Lightning-AI/metrics/pull/891))
- `MetricTracker.best_metric` will now give a warning when computing on metric that do not have a best ([#913](https://github.com/Lightning-AI/metrics/pull/913))
### Deprecated
- Deprecated argument `compute_on_step` ([#792](https://github.com/Lightning-AI/metrics/pull/792))
- Deprecated passing in `dist_sync_on_step`, `process_group`, `dist_sync_fn` direct argument ([#833](https://github.com/Lightning-AI/metrics/pull/833))
### Removed
- Removed support for versions of [Pytorch-Lightning](https://github.com/Lightning-AI/lightning) lower than v1.5 ([#788](https://github.com/Lightning-AI/metrics/pull/788))
- Removed deprecated functions, and warnings in Text ([#773](https://github.com/Lightning-AI/metrics/pull/773))
* `WER` and `functional.wer`
- Removed deprecated functions and warnings in Image ([#796](https://github.com/Lightning-AI/metrics/pull/796))
* `SSIM` and `functional.ssim`
* `PSNR` and `functional.psnr`
- Removed deprecated functions, and warnings in classification and regression ([#806](https://github.com/Lightning-AI/metrics/pull/806))
* `FBeta` and `functional.fbeta`
* `F1` and `functional.f1`
* `Hinge` and `functional.hinge`
* `IoU` and `functional.iou`
* `MatthewsCorrcoef`
* `PearsonCorrcoef`
* `SpearmanCorrcoef`
- Removed deprecated functions, and warnings in detection and pairwise ([#804](https://github.com/Lightning-AI/metrics/pull/804))
* `MAP` and `functional.pairwise.manhatten`
- Removed deprecated functions, and warnings in Audio ([#805](https://github.com/Lightning-AI/metrics/pull/805))
* `PESQ` and `functional.audio.pesq`
* `PIT` and `functional.audio.pit`
* `SDR` and `functional.audio.sdr` and `functional.audio.si_sdr`
* `SNR` and `functional.audio.snr` and `functional.audio.si_snr`
* `STOI` and `functional.audio.stoi`
- Removed unused `get_num_classes` from `torchmetrics.utilities.data` ([#914](https://github.com/Lightning-AI/metrics/pull/914))
### Fixed
- Fixed device mismatch for `MAP` metric in specific cases ([#950](https://github.com/Lightning-AI/metrics/pull/950))
- Improved testing speed ([#820](https://github.com/Lightning-AI/metrics/pull/820))
- Fixed compatibility of `ClasswiseWrapper` with the `prefix` argument of `MetricCollection` ([#843](https://github.com/Lightning-AI/metrics/pull/843))
- Fixed `BestScore` on GPU ([#912](https://github.com/Lightning-AI/metrics/pull/912))
- Fixed Lsum computation for `ROUGEScore` ([#944](https://github.com/Lightning-AI/metrics/pull/944))
## [0.7.3] - 2022-03-23
### Fixed
- Fixed unsafe log operation in `TweedieDeviace` for power=1 ([#847](https://github.com/Lightning-AI/metrics/pull/847))
- Fixed bug in MAP metric related to either no ground truth or no predictions ([#884](https://github.com/Lightning-AI/metrics/pull/884))
- Fixed `ConfusionMatrix`, `AUROC` and `AveragePrecision` on GPU when running in deterministic mode ([#900](https://github.com/Lightning-AI/metrics/pull/900))
- Fixed NaN or Inf results returned by `signal_distortion_ratio` ([#899](https://github.com/Lightning-AI/metrics/pull/899))
- Fixed memory leak when using `update` method with tensor where `requires_grad=True` ([#902](https://github.com/Lightning-AI/metrics/pull/902))
## [0.7.2] - 2022-02-10
### Fixed
- Minor patches in JOSS paper.
## [0.7.1] - 2022-02-03
### Changed
- Used `torch.bucketize` in calibration error when `torch>1.8` for faster computations ([#769](https://github.com/Lightning-AI/metrics/pull/769))
- Improve mAP performance ([#742](https://github.com/Lightning-AI/metrics/pull/742))
### Fixed
- Fixed check for available modules ([#772](https://github.com/Lightning-AI/metrics/pull/772))
- Fixed Matthews correlation coefficient when the denominator is 0 ([#781](https://github.com/Lightning-AI/metrics/pull/781))
## [0.7.0] - 2022-01-17
### Added
- Added NLP metrics:
- `MatchErrorRate` ([#619](https://github.com/Lightning-AI/metrics/pull/619))
- `WordInfoLost` and `WordInfoPreserved` ([#630](https://github.com/Lightning-AI/metrics/pull/630))
- `SQuAD` ([#623](https://github.com/Lightning-AI/metrics/pull/623))
- `CHRFScore` ([#641](https://github.com/Lightning-AI/metrics/pull/641))
- `TranslationEditRate` ([#646](https://github.com/Lightning-AI/metrics/pull/646))
- `ExtendedEditDistance` ([#668](https://github.com/Lightning-AI/metrics/pull/668))
- Added `MultiScaleSSIM` into image metrics ([#679](https://github.com/Lightning-AI/metrics/pull/679))
- Added Signal to Distortion Ratio (`SDR`) to audio package ([#565](https://github.com/Lightning-AI/metrics/pull/565))
- Added `MinMaxMetric` to wrappers ([#556](https://github.com/Lightning-AI/metrics/pull/556))
- Added `ignore_index` to retrieval metrics ([#676](https://github.com/Lightning-AI/metrics/pull/676))
- Added support for multi references in `ROUGEScore` ([#680](https://github.com/Lightning-AI/metrics/pull/680))
- Added a default VSCode devcontainer configuration ([#621](https://github.com/Lightning-AI/metrics/pull/621))
### Changed
- Scalar metrics will now consistently have additional dimensions squeezed ([#622](https://github.com/Lightning-AI/metrics/pull/622))
- Metrics having third party dependencies removed from global import ([#463](https://github.com/Lightning-AI/metrics/pull/463))
- Untokenized for `BLEUScore` input stay consistent with all the other text metrics ([#640](https://github.com/Lightning-AI/metrics/pull/640))
- Arguments reordered for `TER`, `BLEUScore`, `SacreBLEUScore`, `CHRFScore` now expect input order as predictions first and target second ([#696](https://github.com/Lightning-AI/metrics/pull/696))
- Changed dtype of metric state from `torch.float` to `torch.long` in `ConfusionMatrix` to accommodate larger values ([#715](https://github.com/Lightning-AI/metrics/pull/715))
- Unify `preds`, `target` input argument's naming across all text metrics ([#723](https://github.com/Lightning-AI/metrics/pull/723), [#727](https://github.com/Lightning-AI/metrics/pull/727))
* `bert`, `bleu`, `chrf`, `sacre_bleu`, `wip`, `wil`, `cer`, `ter`, `wer`, `mer`, `rouge`, `squad`
### Deprecated
- Renamed IoU -> Jaccard Index ([#662](https://github.com/Lightning-AI/metrics/pull/662))
- Renamed text WER metric ([#714](https://github.com/Lightning-AI/metrics/pull/714))
* `functional.wer` -> `functional.word_error_rate`
* `WER` -> `WordErrorRate`
- Renamed correlation coefficient classes: ([#710](https://github.com/Lightning-AI/metrics/pull/710))
* `MatthewsCorrcoef` -> `MatthewsCorrCoef`
* `PearsonCorrcoef` -> `PearsonCorrCoef`
* `SpearmanCorrcoef` -> `SpearmanCorrCoef`
- Renamed audio STOI metric: ([#753](https://github.com/Lightning-AI/metrics/pull/753), [#758](https://github.com/Lightning-AI/metrics/pull/758))
* `audio.STOI` to `audio.ShortTimeObjectiveIntelligibility`
* `functional.audio.stoi` to `functional.audio.short_time_objective_intelligibility`
- Renamed audio PESQ metrics: ([#751](https://github.com/Lightning-AI/metrics/pull/751))
* `functional.audio.pesq` -> `functional.audio.perceptual_evaluation_speech_quality`
* `audio.PESQ` -> `audio.PerceptualEvaluationSpeechQuality`
- Renamed audio SDR metrics: ([#711](https://github.com/Lightning-AI/metrics/pull/711))
* `functional.sdr` -> `functional.signal_distortion_ratio`
* `functional.si_sdr` -> `functional.scale_invariant_signal_distortion_ratio`
* `SDR` -> `SignalDistortionRatio`
* `SI_SDR` -> `ScaleInvariantSignalDistortionRatio`
- Renamed audio SNR metrics: ([#712](https://github.com/Lightning-AI/metrics/pull/712))
* `functional.snr` -> `functional.signal_distortion_ratio`
* `functional.si_snr` -> `functional.scale_invariant_signal_noise_ratio`
* `SNR` -> `SignalNoiseRatio`
* `SI_SNR` -> `ScaleInvariantSignalNoiseRatio`
- Renamed F-score metrics: ([#731](https://github.com/Lightning-AI/metrics/pull/731), [#740](https://github.com/Lightning-AI/metrics/pull/740))
* `functional.f1` -> `functional.f1_score`
* `F1` -> `F1Score`
* `functional.fbeta` -> `functional.fbeta_score`
* `FBeta` -> `FBetaScore`
- Renamed Hinge metric: ([#734](https://github.com/Lightning-AI/metrics/pull/734))
* `functional.hinge` -> `functional.hinge_loss`
* `Hinge` -> `HingeLoss`
- Renamed image PSNR metrics ([#732](https://github.com/Lightning-AI/metrics/pull/732))
* `functional.psnr` -> `functional.peak_signal_noise_ratio`
* `PSNR` -> `PeakSignalNoiseRatio`
- Renamed image PIT metric: ([#737](https://github.com/Lightning-AI/metrics/pull/737))
* `functional.pit` -> `functional.permutation_invariant_training`
* `PIT` -> `PermutationInvariantTraining`
- Renamed image SSIM metric: ([#747](https://github.com/Lightning-AI/metrics/pull/747))
* `functional.ssim` -> `functional.scale_invariant_signal_noise_ratio`
* `SSIM` -> `StructuralSimilarityIndexMeasure`
- Renamed detection `MAP` to `MeanAveragePrecision` metric ([#754](https://github.com/Lightning-AI/metrics/pull/754))
- Renamed Fidelity & LPIPS image metric: ([#752](https://github.com/Lightning-AI/metrics/pull/752))
* `image.FID` -> `image.FrechetInceptionDistance`
* `image.KID` -> `image.KernelInceptionDistance`
* `image.LPIPS` -> `image.LearnedPerceptualImagePatchSimilarity`
### Removed
- Removed `embedding_similarity` metric ([#638](https://github.com/Lightning-AI/metrics/pull/638))
- Removed argument `concatenate_texts` from `wer` metric ([#638](https://github.com/Lightning-AI/metrics/pull/638))
- Removed arguments `newline_sep` and `decimal_places` from `rouge` metric ([#638](https://github.com/Lightning-AI/metrics/pull/638))
### Fixed
- Fixed MetricCollection kwargs filtering when no `kwargs` are present in update signature ([#707](https://github.com/Lightning-AI/metrics/pull/707))
## [0.6.2] - 2021-12-15
### Fixed
- Fixed `torch.sort` currently does not support bool `dtype` on CUDA ([#665](https://github.com/Lightning-AI/metrics/pull/665))
- Fixed mAP properly checks if ground truths are empty ([#684](https://github.com/Lightning-AI/metrics/pull/684))
- Fixed initialization of tensors to be on correct device for `MAP` metric ([#673](https://github.com/Lightning-AI/metrics/pull/673))
## [0.6.1] - 2021-12-06
### Changed
- Migrate MAP metrics from pycocotools to PyTorch ([#632](https://github.com/Lightning-AI/metrics/pull/632))
- Use `torch.topk` instead of `torch.argsort` in retrieval precision for speedup ([#627](https://github.com/Lightning-AI/metrics/pull/627))
### Fixed
- Fix empty predictions in MAP metric ([#594](https://github.com/Lightning-AI/metrics/pull/594), [#610](https://github.com/Lightning-AI/metrics/pull/610), [#624](https://github.com/Lightning-AI/metrics/pull/624))
- Fix edge case of AUROC with `average=weighted` on GPU ([#606](https://github.com/Lightning-AI/metrics/pull/606))
- Fixed `forward` in compositional metrics ([#645](https://github.com/Lightning-AI/metrics/pull/645))
## [0.6.0] - 2021-10-28
### Added
- Added audio metrics:
- Perceptual Evaluation of Speech Quality (PESQ) ([#353](https://github.com/Lightning-AI/metrics/pull/353))
- Short-Time Objective Intelligibility (STOI) ([#353](https://github.com/Lightning-AI/metrics/pull/353))
- Added Information retrieval metrics:
- `RetrievalRPrecision` ([#577](https://github.com/Lightning-AI/metrics/pull/577))
- `RetrievalHitRate` ([#576](https://github.com/Lightning-AI/metrics/pull/576))
- Added NLP metrics:
- `SacreBLEUScore` ([#546](https://github.com/Lightning-AI/metrics/pull/546))
- `CharErrorRate` ([#575](https://github.com/Lightning-AI/metrics/pull/575))
- Added other metrics:
- Tweedie Deviance Score ([#499](https://github.com/Lightning-AI/metrics/pull/499))
- Learned Perceptual Image Patch Similarity (LPIPS) ([#431](https://github.com/Lightning-AI/metrics/pull/431))
- Added `MAP` (mean average precision) metric to new detection package ([#467](https://github.com/Lightning-AI/metrics/pull/467))
- Added support for float targets in `nDCG` metric ([#437](https://github.com/Lightning-AI/metrics/pull/437))
- Added `average` argument to `AveragePrecision` metric for reducing multi-label and multi-class problems ([#477](https://github.com/Lightning-AI/metrics/pull/477))
- Added `MultioutputWrapper` ([#510](https://github.com/Lightning-AI/metrics/pull/510))
- Added metric sweeping:
- `higher_is_better` as constant attribute ([#544](https://github.com/Lightning-AI/metrics/pull/544))
- `higher_is_better` to rest of codebase ([#584](https://github.com/Lightning-AI/metrics/pull/584))
- Added simple aggregation metrics: `SumMetric`, `MeanMetric`, `CatMetric`, `MinMetric`, `MaxMetric` ([#506](https://github.com/Lightning-AI/metrics/pull/506))
- Added pairwise submodule with metrics ([#553](https://github.com/Lightning-AI/metrics/pull/553))
- `pairwise_cosine_similarity`
- `pairwise_euclidean_distance`
- `pairwise_linear_similarity`
- `pairwise_manhatten_distance`
### Changed
- `AveragePrecision` will now as default output the `macro` average for multilabel and multiclass problems ([#477](https://github.com/Lightning-AI/metrics/pull/477))
- `half`, `double`, `float` will no longer change the dtype of the metric states. Use `metric.set_dtype` instead ([#493](https://github.com/Lightning-AI/metrics/pull/493))
- Renamed `AverageMeter` to `MeanMetric` ([#506](https://github.com/Lightning-AI/metrics/pull/506))
- Changed `is_differentiable` from property to a constant attribute ([#551](https://github.com/Lightning-AI/metrics/pull/551))
- `ROC` and `AUROC` will no longer throw an error when either the positive or negative class is missing. Instead return 0 score and give a warning
### Deprecated
- Deprecated `functional.self_supervised.embedding_similarity` in favour of new pairwise submodule
### Removed
- Removed `dtype` property ([#493](https://github.com/Lightning-AI/metrics/pull/493))
### Fixed
- Fixed bug in `F1` with `average='macro'` and `ignore_index!=None` ([#495](https://github.com/Lightning-AI/metrics/pull/495))
- Fixed bug in `pit` by using the returned first result to initialize device and type ([#533](https://github.com/Lightning-AI/metrics/pull/533))
- Fixed `SSIM` metric using too much memory ([#539](https://github.com/Lightning-AI/metrics/pull/539))
- Fixed bug where `device` property was not properly update when metric was a child of a module (#542)
## [0.5.1] - 2021-08-30
### Added
- Added `device` and `dtype` properties ([#462](https://github.com/Lightning-AI/metrics/pull/462))
- Added `TextTester` class for robustly testing text metrics ([#450](https://github.com/Lightning-AI/metrics/pull/450))
### Changed
- Added support for float targets in `nDCG` metric ([#437](https://github.com/Lightning-AI/metrics/pull/437))
### Removed
- Removed `rouge-score` as dependency for text package ([#443](https://github.com/Lightning-AI/metrics/pull/443))
- Removed `jiwer` as dependency for text package ([#446](https://github.com/Lightning-AI/metrics/pull/446))
- Removed `bert-score` as dependency for text package ([#473](https://github.com/Lightning-AI/metrics/pull/473))
### Fixed
- Fixed ranking of samples in `SpearmanCorrCoef` metric ([#448](https://github.com/Lightning-AI/metrics/pull/448))
- Fixed bug where compositional metrics where unable to sync because of type mismatch ([#454](https://github.com/Lightning-AI/metrics/pull/454))
- Fixed metric hashing ([#478](https://github.com/Lightning-AI/metrics/pull/478))
- Fixed `BootStrapper` metrics not working on GPU ([#462](https://github.com/Lightning-AI/metrics/pull/462))
- Fixed the semantic ordering of kernel height and width in `SSIM` metric ([#474](https://github.com/Lightning-AI/metrics/pull/474))
## [0.5.0] - 2021-08-09
### Added
- Added **Text-related (NLP) metrics**:
- Word Error Rate (WER) ([#383](https://github.com/Lightning-AI/metrics/pull/383))
- ROUGE ([#399](https://github.com/Lightning-AI/metrics/pull/399))
- BERT score ([#424](https://github.com/Lightning-AI/metrics/pull/424))
- BLUE score ([#360](https://github.com/Lightning-AI/metrics/pull/360))
- Added `MetricTracker` wrapper metric for keeping track of the same metric over multiple epochs ([#238](https://github.com/Lightning-AI/metrics/pull/238))
- Added other metrics:
- Symmetric Mean Absolute Percentage error (SMAPE) ([#375](https://github.com/Lightning-AI/metrics/pull/375))
- Calibration error ([#394](https://github.com/Lightning-AI/metrics/pull/394))
- Permutation Invariant Training (PIT) ([#384](https://github.com/Lightning-AI/metrics/pull/384))
- Added support in `nDCG` metric for target with values larger than 1 ([#349](https://github.com/Lightning-AI/metrics/pull/349))
- Added support for negative targets in `nDCG` metric ([#378](https://github.com/Lightning-AI/metrics/pull/378))
- Added `None` as reduction option in `CosineSimilarity` metric ([#400](https://github.com/Lightning-AI/metrics/pull/400))
- Allowed passing labels in (n_samples, n_classes) to `AveragePrecision` ([#386](https://github.com/Lightning-AI/metrics/pull/386))
### Changed
- Moved `psnr` and `ssim` from `functional.regression.*` to `functional.image.*` ([#382](https://github.com/Lightning-AI/metrics/pull/382))
- Moved `image_gradient` from `functional.image_gradients` to `functional.image.gradients` ([#381](https://github.com/Lightning-AI/metrics/pull/381))
- Moved `R2Score` from `regression.r2score` to `regression.r2` ([#371](https://github.com/Lightning-AI/metrics/pull/371))
- Pearson metric now only store 6 statistics instead of all predictions and targets ([#380](https://github.com/Lightning-AI/metrics/pull/380))
- Use `torch.argmax` instead of `torch.topk` when `k=1` for better performance ([#419](https://github.com/Lightning-AI/metrics/pull/419))
- Moved check for number of samples in R2 score to support single sample updating ([#426](https://github.com/Lightning-AI/metrics/pull/426))
### Deprecated
- Rename `r2score` >> `r2_score` and `kldivergence` >> `kl_divergence` in `functional` ([#371](https://github.com/Lightning-AI/metrics/pull/371))
- Moved `bleu_score` from `functional.nlp` to `functional.text.bleu` ([#360](https://github.com/Lightning-AI/metrics/pull/360))
### Removed
- Removed restriction that `threshold` has to be in (0,1) range to support logit input (
[#351](https://github.com/Lightning-AI/metrics/pull/351)
[#401](https://github.com/Lightning-AI/metrics/pull/401))
- Removed restriction that `preds` could not be bigger than `num_classes` to support logit input ([#357](https://github.com/Lightning-AI/metrics/pull/357))
- Removed module `regression.psnr` and `regression.ssim` ([#382](https://github.com/Lightning-AI/metrics/pull/382)):
- Removed ([#379](https://github.com/Lightning-AI/metrics/pull/379)):
* function `functional.mean_relative_error`
* `num_thresholds` argument in `BinnedPrecisionRecallCurve`
### Fixed
- Fixed bug where classification metrics with `average='macro'` would lead to wrong result if a class was missing ([#303](https://github.com/Lightning-AI/metrics/pull/303))
- Fixed `weighted`, `multi-class` AUROC computation to allow for 0 observations of some class, as contribution to final AUROC is 0 ([#376](https://github.com/Lightning-AI/metrics/pull/376))
- Fixed that `_forward_cache` and `_computed` attributes are also moved to the correct device if metric is moved ([#413](https://github.com/Lightning-AI/metrics/pull/413))
- Fixed calculation in `IoU` metric when using `ignore_index` argument ([#328](https://github.com/Lightning-AI/metrics/pull/328))
## [0.4.1] - 2021-07-05
### Changed
- Extend typing ([#330](https://github.com/Lightning-AI/metrics/pull/330),
[#332](https://github.com/Lightning-AI/metrics/pull/332),
[#333](https://github.com/Lightning-AI/metrics/pull/333),
[#335](https://github.com/Lightning-AI/metrics/pull/335),
[#314](https://github.com/Lightning-AI/metrics/pull/314))
### Fixed
- Fixed DDP by `is_sync` logic to `Metric` ([#339](https://github.com/Lightning-AI/metrics/pull/339))
## [0.4.0] - 2021-06-29
### Added
- Added **Image-related metrics**:
- Fréchet inception distance (FID) ([#213](https://github.com/Lightning-AI/metrics/pull/213))
- Kernel Inception Distance (KID) ([#301](https://github.com/Lightning-AI/metrics/pull/301))
- Inception Score ([#299](https://github.com/Lightning-AI/metrics/pull/299))
- KL divergence ([#247](https://github.com/Lightning-AI/metrics/pull/247))
- Added **Audio metrics**: SNR, SI_SDR, SI_SNR ([#292](https://github.com/Lightning-AI/metrics/pull/292))
- Added other metrics:
- Cosine Similarity ([#305](https://github.com/Lightning-AI/metrics/pull/305))
- Specificity ([#210](https://github.com/Lightning-AI/metrics/pull/210))
- Mean Absolute Percentage error (MAPE) ([#248](https://github.com/Lightning-AI/metrics/pull/248))
- Added `add_metrics` method to `MetricCollection` for adding additional metrics after initialization ([#221](https://github.com/Lightning-AI/metrics/pull/221))
- Added pre-gather reduction in the case of `dist_reduce_fx="cat"` to reduce communication cost ([#217](https://github.com/Lightning-AI/metrics/pull/217))
- Added better error message for `AUROC` when `num_classes` is not provided for multiclass input ([#244](https://github.com/Lightning-AI/metrics/pull/244))
- Added support for unnormalized scores (e.g. logits) in `Accuracy`, `Precision`, `Recall`, `FBeta`, `F1`, `StatScore`, `Hamming`, `ConfusionMatrix` metrics ([#200](https://github.com/Lightning-AI/metrics/pull/200))
- Added `squared` argument to `MeanSquaredError` for computing `RMSE` ([#249](https://github.com/Lightning-AI/metrics/pull/249))
- Added `is_differentiable` property to `ConfusionMatrix`, `F1`, `FBeta`, `Hamming`, `Hinge`, `IOU`, `MatthewsCorrcoef`, `Precision`, `Recall`, `PrecisionRecallCurve`, `ROC`, `StatScores` ([#253](https://github.com/Lightning-AI/metrics/pull/253))
- Added `sync` and `sync_context` methods for manually controlling when metric states are synced ([#302](https://github.com/Lightning-AI/metrics/pull/302))
### Changed
- Forward cache is reset when `reset` method is called ([#260](https://github.com/Lightning-AI/metrics/pull/260))
- Improved per-class metric handling for imbalanced datasets for `precision`, `recall`, `precision_recall`, `fbeta`, `f1`, `accuracy`, and `specificity` ([#204](https://github.com/Lightning-AI/metrics/pull/204))
- Decorated `torch.jit.unused` to `MetricCollection` forward ([#307](https://github.com/Lightning-AI/metrics/pull/307))
- Renamed `thresholds` argument to binned metrics for manually controlling the thresholds ([#322](https://github.com/Lightning-AI/metrics/pull/322))
- Extend typing ([#324](https://github.com/Lightning-AI/metrics/pull/324),
[#326](https://github.com/Lightning-AI/metrics/pull/326),
[#327](https://github.com/Lightning-AI/metrics/pull/327))
### Deprecated
- Deprecated `functional.mean_relative_error`, use `functional.mean_absolute_percentage_error` ([#248](https://github.com/Lightning-AI/metrics/pull/248))
- Deprecated `num_thresholds` argument in `BinnedPrecisionRecallCurve` ([#322](https://github.com/Lightning-AI/metrics/pull/322))
### Removed
- Removed argument `is_multiclass` ([#319](https://github.com/Lightning-AI/metrics/pull/319))
### Fixed
- AUC can also support more dimensional inputs when all but one dimension are of size 1 ([#242](https://github.com/Lightning-AI/metrics/pull/242))
- Fixed `dtype` of modular metrics after reset has been called ([#243](https://github.com/Lightning-AI/metrics/pull/243))
- Fixed calculation in `matthews_corrcoef` to correctly match formula ([#321](https://github.com/Lightning-AI/metrics/pull/321))
## [0.3.2] - 2021-05-10
### Added
- Added `is_differentiable` property:
* To `AUC`, `AUROC`, `CohenKappa` and `AveragePrecision` ([#178](https://github.com/Lightning-AI/metrics/pull/178))
* To `PearsonCorrCoef`, `SpearmanCorrcoef`, `R2Score` and `ExplainedVariance` ([#225](https://github.com/Lightning-AI/metrics/pull/225))
### Changed
- `MetricCollection` should return metrics with prefix on `items()`, `keys()` ([#209](https://github.com/Lightning-AI/metrics/pull/209))
- Calling `compute` before `update` will now give warning ([#164](https://github.com/Lightning-AI/metrics/pull/164))
### Removed
- Removed `numpy` as direct dependency ([#212](https://github.com/Lightning-AI/metrics/pull/212))
### Fixed
- Fixed auc calculation and add tests ([#197](https://github.com/Lightning-AI/metrics/pull/197))
- Fixed loading persisted metric states using `load_state_dict()` ([#202](https://github.com/Lightning-AI/metrics/pull/202))
- Fixed `PSNR` not working with `DDP` ([#214](https://github.com/Lightning-AI/metrics/pull/214))
- Fixed metric calculation with unequal batch sizes ([#220](https://github.com/Lightning-AI/metrics/pull/220))
- Fixed metric concatenation for list states for zero-dim input ([#229](https://github.com/Lightning-AI/metrics/pull/229))
- Fixed numerical instability in `AUROC` metric for large input ([#230](https://github.com/Lightning-AI/metrics/pull/230))
## [0.3.1] - 2021-04-21
- Cleaning remaining inconsistency and fix PL develop integration (
[#191](https://github.com/Lightning-AI/metrics/pull/191),
[#192](https://github.com/Lightning-AI/metrics/pull/192),
[#193](https://github.com/Lightning-AI/metrics/pull/193),
[#194](https://github.com/Lightning-AI/metrics/pull/194)
)
## [0.3.0] - 2021-04-20
### Added
- Added `BootStrapper` to easily calculate confidence intervals for metrics ([#101](https://github.com/Lightning-AI/metrics/pull/101))
- Added Binned metrics ([#128](https://github.com/Lightning-AI/metrics/pull/128))
- Added metrics for Information Retrieval ([(PL^5032)](https://github.com/Lightning-AI/lightning/pull/5032)):
* `RetrievalMAP` ([PL^5032](https://github.com/Lightning-AI/lightning/pull/5032))
* `RetrievalMRR` ([#119](https://github.com/Lightning-AI/metrics/pull/119))
* `RetrievalPrecision` ([#139](https://github.com/Lightning-AI/metrics/pull/139))
* `RetrievalRecall` ([#146](https://github.com/Lightning-AI/metrics/pull/146))
* `RetrievalNormalizedDCG` ([#160](https://github.com/Lightning-AI/metrics/pull/160))
* `RetrievalFallOut` ([#161](https://github.com/Lightning-AI/metrics/pull/161))
- Added other metrics:
* `CohenKappa` ([#69](https://github.com/Lightning-AI/metrics/pull/69))
* `MatthewsCorrcoef` ([#98](https://github.com/Lightning-AI/metrics/pull/98))
* `PearsonCorrcoef` ([#157](https://github.com/Lightning-AI/metrics/pull/157))
* `SpearmanCorrcoef` ([#158](https://github.com/Lightning-AI/metrics/pull/158))
* `Hinge` ([#120](https://github.com/Lightning-AI/metrics/pull/120))
- Added `average='micro'` as an option in AUROC for multilabel problems ([#110](https://github.com/Lightning-AI/metrics/pull/110))
- Added multilabel support to `ROC` metric ([#114](https://github.com/Lightning-AI/metrics/pull/114))
- Added testing for `half` precision ([#77](https://github.com/Lightning-AI/metrics/pull/77),
[#135](https://github.com/Lightning-AI/metrics/pull/135)
)
- Added `AverageMeter` for ad-hoc averages of values ([#138](https://github.com/Lightning-AI/metrics/pull/138))
- Added `prefix` argument to `MetricCollection` ([#70](https://github.com/Lightning-AI/metrics/pull/70))
- Added `__getitem__` as metric arithmetic operation ([#142](https://github.com/Lightning-AI/metrics/pull/142))
- Added property `is_differentiable` to metrics and test for differentiability ([#154](https://github.com/Lightning-AI/metrics/pull/154))
- Added support for `average`, `ignore_index` and `mdmc_average` in `Accuracy` metric ([#166](https://github.com/Lightning-AI/metrics/pull/166))
- Added `postfix` arg to `MetricCollection` ([#188](https://github.com/Lightning-AI/metrics/pull/188))
### Changed
- Changed `ExplainedVariance` from storing all preds/targets to tracking 5 statistics ([#68](https://github.com/Lightning-AI/metrics/pull/68))
- Changed behaviour of `confusionmatrix` for multilabel data to better match `multilabel_confusion_matrix` from sklearn ([#134](https://github.com/Lightning-AI/metrics/pull/134))
- Updated FBeta arguments ([#111](https://github.com/Lightning-AI/metrics/pull/111))
- Changed `reset` method to use `detach.clone()` instead of `deepcopy` when resetting to default ([#163](https://github.com/Lightning-AI/metrics/pull/163))
- Metrics passed as dict to `MetricCollection` will now always be in deterministic order ([#173](https://github.com/Lightning-AI/metrics/pull/173))
- Allowed `MetricCollection` pass metrics as arguments ([#176](https://github.com/Lightning-AI/metrics/pull/176))
### Deprecated
- Rename argument `is_multiclass` -> `multiclass` ([#162](https://github.com/Lightning-AI/metrics/pull/162))
### Removed
- Prune remaining deprecated ([#92](https://github.com/Lightning-AI/metrics/pull/92))
### Fixed
- Fixed when `_stable_1d_sort` to work when `n>=N` ([PL^6177](https://github.com/Lightning-AI/lightning/pull/6177))
- Fixed `_computed` attribute not being correctly reset ([#147](https://github.com/Lightning-AI/metrics/pull/147))
- Fixed to Blau score ([#165](https://github.com/Lightning-AI/metrics/pull/165))
- Fixed backwards compatibility for logging with older version of pytorch-lightning ([#182](https://github.com/Lightning-AI/metrics/pull/182))
## [0.2.0] - 2021-03-12
### Changed
- Decoupled PL dependency ([#13](https://github.com/Lightning-AI/metrics/pull/13))
- Refactored functional - mimic the module-like structure: classification, regression, etc. ([#16](https://github.com/Lightning-AI/metrics/pull/16))
- Refactored utilities - split to topics/submodules ([#14](https://github.com/Lightning-AI/metrics/pull/14))
- Refactored `MetricCollection` ([#19](https://github.com/Lightning-AI/metrics/pull/19))
### Removed
- Removed deprecated metrics from PL base ([#12](https://github.com/Lightning-AI/metrics/pull/12),
[#15](https://github.com/Lightning-AI/metrics/pull/15))
## [0.1.0] - 2021-02-22
- Added `Accuracy` metric now generalizes to Top-k accuracy for (multi-dimensional) multi-class inputs using the `top_k` parameter ([PL^4838](https://github.com/Lightning-AI/lightning/pull/4838))
- Added `Accuracy` metric now enables the computation of subset accuracy for multi-label or multi-dimensional multi-class inputs with the `subset_accuracy` parameter ([PL^4838](https://github.com/Lightning-AI/lightning/pull/4838))
- Added `HammingDistance` metric to compute the hamming distance (loss) ([PL^4838](https://github.com/Lightning-AI/lightning/pull/4838))
- Added `StatScores` metric to compute the number of true positives, false positives, true negatives and false negatives ([PL^4839](https://github.com/Lightning-AI/lightning/pull/4839))
- Added `R2Score` metric ([PL^5241](https://github.com/Lightning-AI/lightning/pull/5241))
- Added `MetricCollection` ([PL^4318](https://github.com/Lightning-AI/lightning/pull/4318))
- Added `.clone()` method to metrics ([PL^4318](https://github.com/Lightning-AI/lightning/pull/4318))
- Added `IoU` class interface ([PL^4704](https://github.com/Lightning-AI/lightning/pull/4704))
- The `Recall` and `Precision` metrics (and their functional counterparts `recall` and `precision`) can now be generalized to Recall@K and Precision@K with the use of `top_k` parameter ([PL^4842](https://github.com/Lightning-AI/lightning/pull/4842))
- Added compositional metrics ([PL^5464](https://github.com/Lightning-AI/lightning/pull/5464))
- Added AUC/AUROC class interface ([PL^5479](https://github.com/Lightning-AI/lightning/pull/5479))
- Added `QuantizationAwareTraining` callback ([PL^5706](https://github.com/Lightning-AI/lightning/pull/5706))
- Added `ConfusionMatrix` class interface ([PL^4348](https://github.com/Lightning-AI/lightning/pull/4348))
- Added multiclass AUROC metric ([PL^4236](https://github.com/Lightning-AI/lightning/pull/4236))
- Added `PrecisionRecallCurve, ROC, AveragePrecision` class metric ([PL^4549](https://github.com/Lightning-AI/lightning/pull/4549))
- Classification metrics overhaul ([PL^4837](https://github.com/Lightning-AI/lightning/pull/4837))
- Added `F1` class metric ([PL^4656](https://github.com/Lightning-AI/lightning/pull/4656))
- Added metrics aggregation in Horovod and fixed early stopping ([PL^3775](https://github.com/Lightning-AI/lightning/pull/3775))
- Added `persistent(mode)` method to metrics, to enable and disable metric states being added to `state_dict` ([PL^4482](https://github.com/Lightning-AI/lightning/pull/4482))
- Added unification of regression metrics ([PL^4166](https://github.com/Lightning-AI/lightning/pull/4166))
- Added persistent flag to `Metric.add_state` ([PL^4195](https://github.com/Lightning-AI/lightning/pull/4195))
- Added classification metrics ([PL^4043](https://github.com/Lightning-AI/lightning/pull/4043))
- Added new Metrics API. ([PL^3868](https://github.com/Lightning-AI/lightning/pull/3868), [PL^3921](https://github.com/Lightning-AI/lightning/pull/3921))
- Added EMB similarity ([PL^3349](https://github.com/Lightning-AI/lightning/pull/3349))
- Added SSIM metrics ([PL^2671](https://github.com/Lightning-AI/lightning/pull/2671))
- Added BLEU metrics ([PL^2535](https://github.com/Lightning-AI/lightning/pull/2535))
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/audio.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
# this need to be the same as used inside speechmetrics
pesq @ git+https://github.com/ludlows/python-pesq
pystoi >=0.3.0, <=0.3.3
torchaudio >=0.10.0
gammatone @ https://github.com/detly/gammatone/archive/master.zip#egg=Gammatone
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/nominal_test.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
pandas >1.0.0, <=2.0.3 # cannot pin version due to numpy version incompatibility
dython <=0.7.4
scipy >1.0.0, <1.11.0 # cannot pin version due to some version conflicts with `oldest` CI configuration
statsmodels >0.13.5, <=0.14.0
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/_docs.txt | sphinx ==5.3.0
myst-parser ==1.0.0
nbsphinx ==0.9.3
pandoc ==2.3
docutils ==0.19
sphinxcontrib-fulltoc >=1.0
sphinxcontrib-mockautodoc
lai-sphinx-theme # need to be downloaded from s3://sphinx-packages/
sphinx-autodoc-typehints ==1.23.0
sphinx-paramlinks ==0.6.0
sphinx-togglebutton ==0.3.2
sphinx-copybutton ==0.5.2
lightning >=1.8.0, <2.2.0
lightning-utilities >=0.9.0, <0.10.0
pydantic > 1.0.0, < 3.0.0
# integrations
-r _integrate.txt
-r visual.txt
-r audio.txt
-r detection.txt
-r image.txt
-r multimodal.txt
-r text.txt
-r text_test.txt
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/text_test.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
jiwer >=2.3.0, <3.1.0
rouge-score >0.1.0, <=0.1.2
bert_score ==0.3.13
huggingface-hub <0.19 # hotfix, failing SDR for latest PT 1.11
sacrebleu >=2.3.0, <2.4.0
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/_integrate.txt | # contentiously validated integration with these expected ranges
# ToDo: investigate and add validation with 2.0+ on GPU
pytorch-lightning >=1.9.0, <2.0.0
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/classification_test.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
pandas >=1.4.0, <=2.0.3
netcal >1.0.0, <=1.3.5 # calibration_error
numpy <1.25.0
fairlearn # group_fairness
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/audio_test.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
pypesq @ git+https://github.com/vBaiCai/python-pesq
mir-eval >=0.6, <=0.7
speechmetrics @ git+https://github.com/aliutkus/speechmetrics
fast-bss-eval >=0.1.0, <0.1.5
torch_complex <=0.4.3 # needed for fast-bss-eval
srmrpy @ git+https://github.com/jfsantos/SRMRpy
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/detection_test.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
faster-coco-eval >=1.3.3
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/_devel.txt | # use mandatory dependencies
-r base.txt
# add the testing dependencies
-r _tests.txt
# add extra requirements
-r image.txt
-r text.txt
-r detection.txt
-r audio.txt
-r multimodal.txt
-r visual.txt
# add extra testing
-r image_test.txt
-r text_test.txt
-r audio_test.txt
-r detection_test.txt
-r classification_test.txt
-r nominal_test.txt
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/image.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
scipy >1.0.0, <1.11.0
torchvision >=0.8, <0.17.0
torch-fidelity <=0.4.0 # bumping to allow install version from master, now used in testing
lpips <=0.1.4
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/text.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
nltk >=3.6, <=3.8.1
tqdm >=4.41.0, <=4.66.1
regex >=2021.9.24, <=2023.10.3
transformers >4.4.0, <4.34.2
mecab-python3 >=1.0.6, <1.1.0
mecab-ko >=1.0.0, <1.1.0
mecab-ko-dic >=1.0.0, <1.1.0
ipadic >=1.0.0, <1.1.0
sentencepiece >=0.1.98, <=0.1.99
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/_tests.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
coverage ==7.3.2
pytest ==7.4.3
pytest-cov ==4.1.0
pytest-doctestplus ==1.0.0
pytest-rerunfailures ==12.0
pytest-timeout ==2.2.0
phmdoctest ==1.4.0
psutil <5.10.0
requests <=2.31.0
fire <=0.5.0
cloudpickle >1.3, <=3.0.0
scikit-learn >=1.1.1, <1.4.0
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/image_test.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
scikit-image >=0.19.0, <=0.21.0
kornia >=0.6.7, <0.7.1
pytorch-msssim ==1.0.0
sewar >=0.4.4, <=0.4.6
numpy <1.25.0
torch-fidelity @ git+https://github.com/toshas/torch-fidelity@master
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/_doctest.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
pytest >=6.0.0, <7.5.0
pytest-doctestplus >=0.9.0, <=1.0.0
pytest-rerunfailures >=10.0, <13.0
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/visual.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
matplotlib >=3.2.0, <3.8.0
SciencePlots >= 2.0.0, <= 2.1.0
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/README.md | # Project Requirements
This folder contains all requirements files for the project. The base requirements are located in the `base.txt` file.
Files prefixed with `_` are only meant for development and testing purposes. In general, each subdomain of the project
has a `<domain>.txt` file that contains the necessary requirements for using that subdomain and a `<domain>_test.txt`
file that contains the necessary requirements for testing that subdomain.
To install all extra requirements such that all tests can be run, use the following command:
```bash
pip install -r requirements/_devel.txt # unittests
pip install -r requiremnets/_integrate.txt # integration tests
```
To install all extra requirements so that the documentation can be built, use the following command:
```bash
pip install -r requirements/_docs.txt
# OR just run `make docs`
```
## CI/CD upper bounds automation
For CI stability, we have set for all package versions' upper bounds (the latest version), so with any sudden release,
we won't put our development on fire. Dependabot manages the continuous updates of these upper bounds.
Note that these upper bounds are lifters when installing a package from the source or as a package.
If you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment.
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/typing.txt | mypy ==1.6.1
torch ==2.1.0
types-PyYAML
types-emoji
types-protobuf
types-requests
types-setuptools
types-six
types-tabulate
types-protobuf
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/detection.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
torchvision >=0.8, <0.17.0
pycocotools >2.0.0, <=2.0.7
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/multimodal.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
transformers >=4.10.0, <4.34.2
piq <=0.8.0
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/requirements/base.txt | # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
numpy >1.20.0
packaging >17.1
torch >=1.10.0, <=2.0.1
torch >=1.10.0, <=2.1.0
typing-extensions; python_version < '3.9'
lightning-utilities >=0.8.0, <0.10.0
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/dockers/README.md | # Docker images
## Build images from Dockerfiles
You can build it on your own, note it takes lots of time, be prepared.
```bash
git clone https://github.com/Lightning-AI/torchmetrics.git
# build with the default arguments
docker image build -t torchmetrics:latest -f dockers/ubuntu-cuda/Dockerfile .
# build with specific arguments
docker image build -t torchmetrics:ubuntu-cuda11.7.1-py3.9-torch1.13 \
-f dockers/base-cuda/Dockerfile \
--build-arg PYTHON_VERSION=3.9 \
--build-arg PYTORCH_VERSION=1.13 \
--build-arg CUDA_VERSION=11.7.1 \
.
```
To run your docker use
```bash
docker image list
docker run --rm -it torchmetrics:latest bash
```
and if you do not need it anymore, just clean it:
```bash
docker image list
docker image rm torchmetrics:latest
```
## Run docker image with GPUs
To run docker image with access to your GPUs, you need to install
```bash
# Add the package repositories
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit
sudo systemctl restart docker
```
and later run the docker image with `--gpus all`. For example,
```bash
docker run --rm -it --gpus all torchmetrics:ubuntu-cuda11.7.1-py3.9-torch1.12
```
| 0 |
public_repos/torchmetrics/dockers | public_repos/torchmetrics/dockers/ubuntu-cuda/Dockerfile | # Copyright The Lightning AI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ARG UBUNTU_VERSION=22.04
ARG CUDA_VERSION=11.7.1
FROM nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
ARG PYTHON_VERSION=3.10
ARG PYTORCH_VERSION=2.0
SHELL ["/bin/bash", "-c"]
# https://techoverflow.net/2019/05/18/how-to-fix-configuring-tzdata-interactive-input-when-building-docker-images/
ENV \
DEBIAN_FRONTEND="noninteractive" \
TZ="Etc/UTC" \
PATH="$PATH:/root/.local/bin" \
CUDA_TOOLKIT_ROOT_DIR="/usr/local/cuda" \
MKL_THREADING_LAYER="GNU" \
# MAKEFLAGS="-j$(nproc)"
MAKEFLAGS="-j2"
RUN \
apt-get -y update --fix-missing && \
apt-get install -y --no-install-recommends --allow-downgrades --allow-change-held-packages \
build-essential \
pkg-config \
cmake \
git \
wget \
curl \
unzip \
g++ \
cmake \
ffmpeg \
git \
libsndfile1 \
ca-certificates \
software-properties-common \
libopenmpi-dev \
openmpi-bin \
ssh \
&& \
# Install python
add-apt-repository ppa:deadsnakes/ppa && \
apt-get install -y \
python${PYTHON_VERSION} \
python${PYTHON_VERSION}-distutils \
python${PYTHON_VERSION}-dev \
&& \
update-alternatives --install /usr/bin/python${PYTHON_VERSION%%.*} python${PYTHON_VERSION%%.*} /usr/bin/python${PYTHON_VERSION} 1 && \
update-alternatives --install /usr/bin/python python /usr/bin/python${PYTHON_VERSION} 1 && \
curl https://bootstrap.pypa.io/get-pip.py | python && \
# Cleaning
apt-get autoremove -y && \
apt-get clean && \
rm -rf /root/.cache && \
rm -rf /var/lib/apt/lists/*
ENV PYTHONPATH="/usr/lib/python${PYTHON_VERSION}/site-packages"
COPY requirements/ requirements/
RUN \
# set particular PyTorch version
pip install -q wget packaging && \
python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py && \
for fpath in `ls requirements/*.txt`; do \
python ./adjust-torch-versions.py $fpath ${PYTORCH_VERSION}; \
done && \
# trying to resolve pesq installation issue
pip install -q "numpy<1.24" && \
CUDA_VERSION_MM=${CUDA_VERSION%.*} && \
CU_VERSION_MM=${CUDA_VERSION_MM//'.'/''} && \
pip install --no-cache-dir -r requirements/_devel.txt \
--find-links "https://download.pytorch.org/whl/cu${CU_VERSION_MM}/torch_stable.html" && \
rm -rf requirements/
RUN \
# Show what we have
pip --version && \
pip list && \
python -c "import sys; ver = sys.version_info ; assert f'{ver.major}.{ver.minor}' == '$PYTHON_VERSION', ver" && \
python -c "import torch; assert torch.__version__.startswith('$PYTORCH_VERSION'), torch.__version__"
| 0 |
public_repos/torchmetrics/src | public_repos/torchmetrics/src/torchmetrics/__about__.py | __version__ = "1.3.0dev"
__author__ = "Lightning-AI et al."
__author_email__ = "name@pytorchlightning.ai"
__license__ = "Apache-2.0"
__copyright__ = f"Copyright (c) 2020-2023, {__author__}."
__homepage__ = "https://github.com/Lightning-AI/torchmetrics"
__docs__ = "PyTorch native Metrics"
__docs_url__ = "https://lightning.ai/docs/torchmetrics/stable/"
__long_doc__ = """
Torchmetrics is a metrics API created for easy metric development and usage in both PyTorch and
[PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of
Pytorch Lightning, but got split off so users could take advantage of the large collection of metrics
implemented without having to install Pytorch Lightning (even though we would love for you to try it out).
We currently have around 100+ metrics implemented and we continuously are adding more metrics, both within
already covered domains (classification, regression etc.) but also new domains (object detection etc.).
We make sure that all our metrics are rigorously tested such that you can trust them.
"""
__all__ = [
"__author__",
"__author_email__",
"__copyright__",
"__docs__",
"__docs_url__",
"__homepage__",
"__license__",
"__version__",
]
| 0 |
public_repos/torchmetrics/src | public_repos/torchmetrics/src/torchmetrics/aggregation.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
from torchmetrics.wrappers.running import Running
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["SumMetric.plot", "MeanMetric.plot", "MaxMetric.plot", "MinMetric.plot"]
class BaseAggregator(Metric):
"""Base class for aggregation metrics.
Args:
fn: string specifying the reduction function
default_value: default tensor value to use for the metric state
nan_strategy: options:
- ``'error'``: if any `nan` values are encountered will give a RuntimeError
- ``'warn'``: if any `nan` values are encountered will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impute any `nan` values with this value
state_name: name of the metric state
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
"""
is_differentiable = None
higher_is_better = None
full_state_update: bool = False
def __init__(
self,
fn: Union[Callable, str],
default_value: Union[Tensor, List],
nan_strategy: Union[str, float] = "error",
state_name: str = "value",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
allowed_nan_strategy = ("error", "warn", "ignore")
if nan_strategy not in allowed_nan_strategy and not isinstance(nan_strategy, float):
raise ValueError(
f"Arg `nan_strategy` should either be a float or one of {allowed_nan_strategy}"
f" but got {nan_strategy}."
)
self.nan_strategy = nan_strategy
self.add_state(state_name, default=default_value, dist_reduce_fx=fn)
self.state_name = state_name
def _cast_and_nan_check_input(
self, x: Union[float, Tensor], weight: Optional[Union[float, Tensor]] = None
) -> Tuple[Tensor, Tensor]:
"""Convert input ``x`` to a tensor and check for Nans."""
if not isinstance(x, Tensor):
x = torch.as_tensor(x, dtype=torch.float32, device=self.device)
if weight is not None and not isinstance(weight, Tensor):
weight = torch.as_tensor(weight, dtype=torch.float32, device=self.device)
nans = torch.isnan(x)
if weight is not None:
nans_weight = torch.isnan(weight)
else:
nans_weight = torch.zeros_like(nans).bool()
weight = torch.ones_like(x)
if nans.any() or nans_weight.any():
if self.nan_strategy == "error":
raise RuntimeError("Encountered `nan` values in tensor")
if self.nan_strategy in ("ignore", "warn"):
if self.nan_strategy == "warn":
rank_zero_warn("Encountered `nan` values in tensor. Will be removed.", UserWarning)
x = x[~(nans | nans_weight)]
weight = weight[~(nans | nans_weight)]
else:
if not isinstance(self.nan_strategy, float):
raise ValueError(f"`nan_strategy` shall be float but you pass {self.nan_strategy}")
x[nans | nans_weight] = self.nan_strategy
weight[nans | nans_weight] = self.nan_strategy
return x.float(), weight.float()
def update(self, value: Union[float, Tensor]) -> None:
"""Overwrite in child class."""
def compute(self) -> Tensor:
"""Compute the aggregated value."""
return getattr(self, self.state_name)
class MaxMetric(BaseAggregator):
"""Aggregate a stream of value into their maximum value.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with
arbitrary shape ``(...,)``.
As output of `forward` and `compute` the metric returns the following output
- ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated maximum value over all inputs received
Args:
nan_strategy: options:
- ``'error'``: if any `nan` values are encountered will give a RuntimeError
- ``'warn'``: if any `nan` values are encountered will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impute any `nan` values with this value
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
Example:
>>> from torch import tensor
>>> from torchmetrics.aggregation import MaxMetric
>>> metric = MaxMetric()
>>> metric.update(1)
>>> metric.update(tensor([2, 3]))
>>> metric.compute()
tensor(3.)
"""
full_state_update: bool = True
max_value: Tensor
def __init__(
self,
nan_strategy: Union[str, float] = "warn",
**kwargs: Any,
) -> None:
super().__init__(
"max",
-torch.tensor(float("inf")),
nan_strategy,
state_name="max_value",
**kwargs,
)
def update(self, value: Union[float, Tensor]) -> None:
"""Update state with data.
Args:
value: Either a float or tensor containing data. Additional tensor
dimensions will be flattened
"""
value, _ = self._cast_and_nan_check_input(value)
if value.numel(): # make sure tensor not empty
self.max_value = torch.max(self.max_value, torch.max(value))
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.aggregation import MaxMetric
>>> metric = MaxMetric()
>>> metric.update([1, 2, 3])
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.aggregation import MaxMetric
>>> metric = MaxMetric()
>>> values = [ ]
>>> for i in range(10):
... values.append(metric(i))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MinMetric(BaseAggregator):
"""Aggregate a stream of value into their minimum value.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with
arbitrary shape ``(...,)``.
As output of `forward` and `compute` the metric returns the following output
- ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated minimum value over all inputs received
Args:
nan_strategy: options:
- ``'error'``: if any `nan` values are encountered will give a RuntimeError
- ``'warn'``: if any `nan` values are encountered will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impute any `nan` values with this value
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
Example:
>>> from torch import tensor
>>> from torchmetrics.aggregation import MinMetric
>>> metric = MinMetric()
>>> metric.update(1)
>>> metric.update(tensor([2, 3]))
>>> metric.compute()
tensor(1.)
"""
full_state_update: bool = True
min_value: Tensor
def __init__(
self,
nan_strategy: Union[str, float] = "warn",
**kwargs: Any,
) -> None:
super().__init__(
"min",
torch.tensor(float("inf")),
nan_strategy,
state_name="min_value",
**kwargs,
)
def update(self, value: Union[float, Tensor]) -> None:
"""Update state with data.
Args:
value: Either a float or tensor containing data. Additional tensor
dimensions will be flattened
"""
value, _ = self._cast_and_nan_check_input(value)
if value.numel(): # make sure tensor not empty
self.min_value = torch.min(self.min_value, torch.min(value))
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.aggregation import MinMetric
>>> metric = MinMetric()
>>> metric.update([1, 2, 3])
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.aggregation import MinMetric
>>> metric = MinMetric()
>>> values = [ ]
>>> for i in range(10):
... values.append(metric(i))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class SumMetric(BaseAggregator):
"""Aggregate a stream of value into their sum.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with
arbitrary shape ``(...,)``.
As output of `forward` and `compute` the metric returns the following output
- ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated sum over all inputs received
Args:
nan_strategy: options:
- ``'error'``: if any `nan` values are encountered will give a RuntimeError
- ``'warn'``: if any `nan` values are encountered will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impute any `nan` values with this value
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
Example:
>>> from torch import tensor
>>> from torchmetrics.aggregation import SumMetric
>>> metric = SumMetric()
>>> metric.update(1)
>>> metric.update(tensor([2, 3]))
>>> metric.compute()
tensor(6.)
"""
sum_value: Tensor
def __init__(
self,
nan_strategy: Union[str, float] = "warn",
**kwargs: Any,
) -> None:
super().__init__(
"sum",
torch.tensor(0.0),
nan_strategy,
state_name="sum_value",
**kwargs,
)
def update(self, value: Union[float, Tensor]) -> None:
"""Update state with data.
Args:
value: Either a float or tensor containing data. Additional tensor
dimensions will be flattened
"""
value, _ = self._cast_and_nan_check_input(value)
if value.numel():
self.sum_value += value.sum()
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.aggregation import SumMetric
>>> metric = SumMetric()
>>> metric.update([1, 2, 3])
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torch import rand, randint
>>> from torchmetrics.aggregation import SumMetric
>>> metric = SumMetric()
>>> values = [ ]
>>> for i in range(10):
... values.append(metric([i, i+1]))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class CatMetric(BaseAggregator):
"""Concatenate a stream of values.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with
arbitrary shape ``(...,)``.
As output of `forward` and `compute` the metric returns the following output
- ``agg`` (:class:`~torch.Tensor`): scalar float tensor with concatenated values over all input received
Args:
nan_strategy: options:
- ``'error'``: if any `nan` values are encountered will give a RuntimeError
- ``'warn'``: if any `nan` values are encountered will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impute any `nan` values with this value
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
Example:
>>> from torch import tensor
>>> from torchmetrics.aggregation import CatMetric
>>> metric = CatMetric()
>>> metric.update(1)
>>> metric.update(tensor([2, 3]))
>>> metric.compute()
tensor([1., 2., 3.])
"""
value: Tensor
def __init__(
self,
nan_strategy: Union[str, float] = "warn",
**kwargs: Any,
) -> None:
super().__init__("cat", [], nan_strategy, **kwargs)
def update(self, value: Union[float, Tensor]) -> None:
"""Update state with data.
Args:
value: Either a float or tensor containing data. Additional tensor
dimensions will be flattened
"""
value, _ = self._cast_and_nan_check_input(value)
if value.numel():
self.value.append(value)
def compute(self) -> Tensor:
"""Compute the aggregated value."""
if isinstance(self.value, list) and self.value:
return dim_zero_cat(self.value)
return self.value
class MeanMetric(BaseAggregator):
"""Aggregate a stream of value into their mean value.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with
arbitrary shape ``(...,)``.
- ``weight`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float value with
arbitrary shape ``(...,)``. Needs to be broadcastable with the shape of ``value`` tensor.
As output of `forward` and `compute` the metric returns the following output
- ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated (weighted) mean over all inputs received
Args:
nan_strategy: options:
- ``'error'``: if any `nan` values are encountered will give a RuntimeError
- ``'warn'``: if any `nan` values are encountered will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impute any `nan` values with this value
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
Example:
>>> from torchmetrics.aggregation import MeanMetric
>>> metric = MeanMetric()
>>> metric.update(1)
>>> metric.update(torch.tensor([2, 3]))
>>> metric.compute()
tensor(2.)
"""
mean_value: Tensor
def __init__(
self,
nan_strategy: Union[str, float] = "warn",
**kwargs: Any,
) -> None:
super().__init__(
"sum",
torch.tensor(0.0),
nan_strategy,
state_name="mean_value",
**kwargs,
)
self.add_state("weight", default=torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0) -> None:
"""Update state with data.
Args:
value: Either a float or tensor containing data. Additional tensor
dimensions will be flattened
weight: Either a float or tensor containing weights for calculating
the average. Shape of weight should be able to broadcast with
the shape of `value`. Default to `1.0` corresponding to simple
harmonic average.
"""
# broadcast weight to value shape
if not isinstance(value, Tensor):
value = torch.as_tensor(value, dtype=torch.float32, device=self.device)
if weight is not None and not isinstance(weight, Tensor):
weight = torch.as_tensor(weight, dtype=torch.float32, device=self.device)
weight = torch.broadcast_to(weight, value.shape)
value, weight = self._cast_and_nan_check_input(value, weight)
if value.numel() == 0:
return
self.mean_value += (value * weight).sum()
self.weight += weight.sum()
def compute(self) -> Tensor:
"""Compute the aggregated value."""
return self.mean_value / self.weight
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.aggregation import MeanMetric
>>> metric = MeanMetric()
>>> metric.update([1, 2, 3])
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.aggregation import MeanMetric
>>> metric = MeanMetric()
>>> values = [ ]
>>> for i in range(10):
... values.append(metric([i, i+1]))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class RunningMean(Running):
"""Aggregate a stream of value into their mean over a running window.
Using this metric compared to `MeanMetric` allows for calculating metrics over a running window of values, instead
of the whole history of values. This is beneficial when you want to get a better estimate of the metric during
training and don't want to wait for the whole training to finish to get epoch level estimates.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with
arbitrary shape ``(...,)``.
As output of `forward` and `compute` the metric returns the following output
- ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated sum over all inputs received
Args:
window: The size of the running window.
nan_strategy: options:
- ``'error'``: if any `nan` values are encountered will give a RuntimeError
- ``'warn'``: if any `nan` values are encountered will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impute any `nan` values with this value
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
Example:
>>> from torch import tensor
>>> from torchmetrics.aggregation import RunningMean
>>> metric = RunningMean(window=3)
>>> for i in range(6):
... current_val = metric(tensor([i]))
... running_val = metric.compute()
... total_val = tensor(sum(list(range(i+1)))) / (i+1) # total mean over all samples
... print(f"{current_val=}, {running_val=}, {total_val=}")
current_val=tensor(0.), running_val=tensor(0.), total_val=tensor(0.)
current_val=tensor(1.), running_val=tensor(0.5000), total_val=tensor(0.5000)
current_val=tensor(2.), running_val=tensor(1.), total_val=tensor(1.)
current_val=tensor(3.), running_val=tensor(2.), total_val=tensor(1.5000)
current_val=tensor(4.), running_val=tensor(3.), total_val=tensor(2.)
current_val=tensor(5.), running_val=tensor(4.), total_val=tensor(2.5000)
"""
def __init__(
self,
window: int = 5,
nan_strategy: Union[str, float] = "warn",
**kwargs: Any,
) -> None:
super().__init__(base_metric=MeanMetric(nan_strategy=nan_strategy, **kwargs), window=window)
class RunningSum(Running):
"""Aggregate a stream of value into their sum over a running window.
Using this metric compared to `SumMetric` allows for calculating metrics over a running window of values, instead
of the whole history of values. This is beneficial when you want to get a better estimate of the metric during
training and don't want to wait for the whole training to finish to get epoch level estimates.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with
arbitrary shape ``(...,)``.
As output of `forward` and `compute` the metric returns the following output
- ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated sum over all inputs received
Args:
window: The size of the running window.
nan_strategy: options:
- ``'error'``: if any `nan` values are encountered will give a RuntimeError
- ``'warn'``: if any `nan` values are encountered will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impute any `nan` values with this value
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
Example:
>>> from torch import tensor
>>> from torchmetrics.aggregation import RunningSum
>>> metric = RunningSum(window=3)
>>> for i in range(6):
... current_val = metric(tensor([i]))
... running_val = metric.compute()
... total_val = tensor(sum(list(range(i+1)))) # total sum over all samples
... print(f"{current_val=}, {running_val=}, {total_val=}")
current_val=tensor(0.), running_val=tensor(0.), total_val=tensor(0)
current_val=tensor(1.), running_val=tensor(1.), total_val=tensor(1)
current_val=tensor(2.), running_val=tensor(3.), total_val=tensor(3)
current_val=tensor(3.), running_val=tensor(6.), total_val=tensor(6)
current_val=tensor(4.), running_val=tensor(9.), total_val=tensor(10)
current_val=tensor(5.), running_val=tensor(12.), total_val=tensor(15)
"""
def __init__(
self,
window: int = 5,
nan_strategy: Union[str, float] = "warn",
**kwargs: Any,
) -> None:
super().__init__(base_metric=SumMetric(nan_strategy=nan_strategy, **kwargs), window=window)
| 0 |
public_repos/torchmetrics/src | public_repos/torchmetrics/src/torchmetrics/collections.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this is just a bypass for this module name collision with built-in one
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Dict, Hashable, Iterable, Iterator, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torch.nn import ModuleDict
from typing_extensions import Literal
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.data import _flatten_dict, allclose
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE, plot_single_or_multi_val
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MetricCollection.plot", "MetricCollection.plot_all"]
class MetricCollection(ModuleDict):
"""MetricCollection class can be used to chain metrics that have the same call pattern into one single class.
Args:
metrics: One of the following
* list or tuple (sequence): if metrics are passed in as a list or tuple, will use the metrics class name
as key for output dict. Therefore, two metrics of the same class cannot be chained this way.
* arguments: similar to passing in as a list, metrics passed in as arguments will use their metric
class name as key for the output dict.
* dict: if metrics are passed in as a dict, will use each key in the dict as key for output dict.
Use this format if you want to chain together multiple of the same metric with different parameters.
Note that the keys in the output dict will be sorted alphabetically.
prefix: a string to append in front of the keys of the output dict
postfix: a string to append after the keys of the output dict
compute_groups:
By default the MetricCollection will try to reduce the computations needed for the metrics in the collection
by checking if they belong to the same **compute group**. All metrics in a compute group share the same
metric state and are therefore only different in their compute step e.g. accuracy, precision and recall
can all be computed from the true positives/negatives and false positives/negatives. By default,
this argument is ``True`` which enables this feature. Set this argument to `False` for disabling
this behaviour. Can also be set to a list of lists of metrics for setting the compute groups yourself.
.. note::
The compute groups feature can significantly speedup the calculation of metrics under the right conditions.
First, the feature is only available when calling the ``update`` method and not when calling ``forward`` method
due to the internal logic of ``forward`` preventing this. Secondly, since we compute groups share metric
states by reference, calling ``.items()``, ``.values()`` etc. on the metric collection will break this
reference and a copy of states are instead returned in this case (reference will be reestablished on the next
call to ``update``).
.. note::
Metric collections can be nested at initialization (see last example) but the output of the collection will
still be a single flatten dictionary combining the prefix and postfix arguments from the nested collection.
Raises:
ValueError:
If one of the elements of ``metrics`` is not an instance of ``pl.metrics.Metric``.
ValueError:
If two elements in ``metrics`` have the same ``name``.
ValueError:
If ``metrics`` is not a ``list``, ``tuple`` or a ``dict``.
ValueError:
If ``metrics`` is ``dict`` and additional_metrics are passed in.
ValueError:
If ``prefix`` is set and it is not a string.
ValueError:
If ``postfix`` is set and it is not a string.
Example::
In the most basic case, the metrics can be passed in as a list or tuple. The keys of the output dict will be
the same as the class name of the metric:
>>> from torch import tensor
>>> from pprint import pprint
>>> from torchmetrics import MetricCollection
>>> from torchmetrics.regression import MeanSquaredError
>>> from torchmetrics.classification import MulticlassAccuracy, MulticlassPrecision, MulticlassRecall
>>> target = tensor([0, 2, 0, 2, 0, 1, 0, 2])
>>> preds = tensor([2, 1, 2, 0, 1, 2, 2, 2])
>>> metrics = MetricCollection([MulticlassAccuracy(num_classes=3, average='micro'),
... MulticlassPrecision(num_classes=3, average='macro'),
... MulticlassRecall(num_classes=3, average='macro')])
>>> metrics(preds, target) # doctest: +NORMALIZE_WHITESPACE
{'MulticlassAccuracy': tensor(0.1250),
'MulticlassPrecision': tensor(0.0667),
'MulticlassRecall': tensor(0.1111)}
Example::
Alternatively, metrics can be passed in as arguments. The keys of the output dict will be the same as the
class name of the metric:
>>> metrics = MetricCollection(MulticlassAccuracy(num_classes=3, average='micro'),
... MulticlassPrecision(num_classes=3, average='macro'),
... MulticlassRecall(num_classes=3, average='macro'))
>>> metrics(preds, target) # doctest: +NORMALIZE_WHITESPACE
{'MulticlassAccuracy': tensor(0.1250),
'MulticlassPrecision': tensor(0.0667),
'MulticlassRecall': tensor(0.1111)}
Example::
If multiple of the same metric class (with different parameters) should be chained together, metrics can be
passed in as a dict and the output dict will have the same keys as the input dict:
>>> metrics = MetricCollection({'micro_recall': MulticlassRecall(num_classes=3, average='micro'),
... 'macro_recall': MulticlassRecall(num_classes=3, average='macro')})
>>> same_metric = metrics.clone()
>>> pprint(metrics(preds, target))
{'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)}
>>> pprint(same_metric(preds, target))
{'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)}
Example::
Metric collections can also be nested up to a single time. The output of the collection will still be a single
dict with the prefix and postfix arguments from the nested collection:
>>> metrics = MetricCollection([
... MetricCollection([
... MulticlassAccuracy(num_classes=3, average='macro'),
... MulticlassPrecision(num_classes=3, average='macro')
... ], postfix='_macro'),
... MetricCollection([
... MulticlassAccuracy(num_classes=3, average='micro'),
... MulticlassPrecision(num_classes=3, average='micro')
... ], postfix='_micro'),
... ], prefix='valmetrics/')
>>> pprint(metrics(preds, target)) # doctest: +NORMALIZE_WHITESPACE
{'valmetrics/MulticlassAccuracy_macro': tensor(0.1111),
'valmetrics/MulticlassAccuracy_micro': tensor(0.1250),
'valmetrics/MulticlassPrecision_macro': tensor(0.0667),
'valmetrics/MulticlassPrecision_micro': tensor(0.1250)}
Example::
The `compute_groups` argument allow you to specify which metrics should share metric state. By default, this
will automatically be derived but can also be set manually.
>>> metrics = MetricCollection(
... MulticlassRecall(num_classes=3, average='macro'),
... MulticlassPrecision(num_classes=3, average='macro'),
... MeanSquaredError(),
... compute_groups=[['MulticlassRecall', 'MulticlassPrecision'], ['MeanSquaredError']]
... )
>>> metrics.update(preds, target)
>>> pprint(metrics.compute())
{'MeanSquaredError': tensor(2.3750), 'MulticlassPrecision': tensor(0.0667), 'MulticlassRecall': tensor(0.1111)}
>>> pprint(metrics.compute_groups)
{0: ['MulticlassRecall', 'MulticlassPrecision'], 1: ['MeanSquaredError']}
"""
_modules: Dict[str, Metric] # type: ignore[assignment]
_groups: Dict[int, List[str]]
def __init__(
self,
metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]],
*additional_metrics: Metric,
prefix: Optional[str] = None,
postfix: Optional[str] = None,
compute_groups: Union[bool, List[List[str]]] = True,
) -> None:
super().__init__()
self.prefix = self._check_arg(prefix, "prefix")
self.postfix = self._check_arg(postfix, "postfix")
self._enable_compute_groups = compute_groups
self._groups_checked: bool = False
self._state_is_copy: bool = False
self.add_metrics(metrics, *additional_metrics)
@torch.jit.unused
def forward(self, *args: Any, **kwargs: Any) -> Dict[str, Any]:
"""Call forward for each metric sequentially.
Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs)
will be filtered based on the signature of the individual metric.
"""
return self._compute_and_reduce("forward", *args, **kwargs)
def update(self, *args: Any, **kwargs: Any) -> None:
"""Call update for each metric sequentially.
Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs)
will be filtered based on the signature of the individual metric.
"""
# Use compute groups if already initialized and checked
if self._groups_checked:
for cg in self._groups.values():
# only update the first member
m0 = getattr(self, cg[0])
m0.update(*args, **m0._filter_kwargs(**kwargs))
if self._state_is_copy:
# If we have deep copied state in between updates, reestablish link
self._compute_groups_create_state_ref()
self._state_is_copy = False
else: # the first update always do per metric to form compute groups
for m in self.values(copy_state=False):
m_kwargs = m._filter_kwargs(**kwargs)
m.update(*args, **m_kwargs)
if self._enable_compute_groups:
self._merge_compute_groups()
# create reference between states
self._compute_groups_create_state_ref()
self._groups_checked = True
def _merge_compute_groups(self) -> None:
"""Iterate over the collection of metrics, checking if the state of each metric matches another.
If so, their compute groups will be merged into one. The complexity of the method is approximately
``O(number_of_metrics_in_collection ** 2)``, as all metrics need to be compared to all other metrics.
"""
num_groups = len(self._groups)
while True:
for cg_idx1, cg_members1 in deepcopy(self._groups).items():
for cg_idx2, cg_members2 in deepcopy(self._groups).items():
if cg_idx1 == cg_idx2:
continue
metric1 = getattr(self, cg_members1[0])
metric2 = getattr(self, cg_members2[0])
if self._equal_metric_states(metric1, metric2):
self._groups[cg_idx1].extend(self._groups.pop(cg_idx2))
break
# Start over if we merged groups
if len(self._groups) != num_groups:
break
# Stop when we iterate over everything and do not merge any groups
if len(self._groups) == num_groups:
break
num_groups = len(self._groups)
# Re-index groups
temp = deepcopy(self._groups)
self._groups = {}
for idx, values in enumerate(temp.values()):
self._groups[idx] = values
@staticmethod
def _equal_metric_states(metric1: Metric, metric2: Metric) -> bool:
"""Check if the metric state of two metrics are the same."""
# empty state
if len(metric1._defaults) == 0 or len(metric2._defaults) == 0:
return False
if metric1._defaults.keys() != metric2._defaults.keys():
return False
for key in metric1._defaults:
state1 = getattr(metric1, key)
state2 = getattr(metric2, key)
if type(state1) != type(state2):
return False
if isinstance(state1, Tensor) and isinstance(state2, Tensor):
return state1.shape == state2.shape and allclose(state1, state2)
if isinstance(state1, list) and isinstance(state2, list):
return all(s1.shape == s2.shape and allclose(s1, s2) for s1, s2 in zip(state1, state2))
return True
def _compute_groups_create_state_ref(self, copy: bool = False) -> None:
"""Create reference between metrics in the same compute group.
Args:
copy: If `True` the metric state will between members will be copied instead
of just passed by reference
"""
if not self._state_is_copy:
for cg in self._groups.values():
m0 = getattr(self, cg[0])
for i in range(1, len(cg)):
mi = getattr(self, cg[i])
for state in m0._defaults:
m0_state = getattr(m0, state)
# Determine if we just should set a reference or a full copy
setattr(mi, state, deepcopy(m0_state) if copy else m0_state)
mi._update_count = deepcopy(m0._update_count) if copy else m0._update_count
self._state_is_copy = copy
def compute(self) -> Dict[str, Any]:
"""Compute the result for each metric in the collection."""
return self._compute_and_reduce("compute")
def _compute_and_reduce(
self, method_name: Literal["compute", "forward"], *args: Any, **kwargs: Any
) -> Dict[str, Any]:
"""Compute result from collection and reduce into a single dictionary.
Args:
method_name: The method to call on each metric in the collection.
Should be either `compute` or `forward`.
args: Positional arguments to pass to each metric (if method_name is `forward`)
kwargs: Keyword arguments to pass to each metric (if method_name is `forward`)
Raises:
ValueError:
If method_name is not `compute` or `forward`.
"""
result = {}
for k, m in self.items(keep_base=True, copy_state=False):
if method_name == "compute":
res = m.compute()
elif method_name == "forward":
res = m(*args, **m._filter_kwargs(**kwargs))
else:
raise ValueError("method_name should be either 'compute' or 'forward', but got {method_name}")
result[k] = res
_, duplicates = _flatten_dict(result)
flattened_results = {}
for k, m in self.items(keep_base=True, copy_state=False):
res = result[k]
if isinstance(res, dict):
for key, v in res.items():
# if duplicates of keys we need to add unique prefix to each key
if duplicates:
stripped_k = k.replace(getattr(m, "prefix", ""), "")
stripped_k = stripped_k.replace(getattr(m, "postfix", ""), "")
key = f"{stripped_k}_{key}"
if getattr(m, "_from_collection", None) and m.prefix is not None:
key = f"{m.prefix}{key}"
if getattr(m, "_from_collection", None) and m.postfix is not None:
key = f"{key}{m.postfix}"
flattened_results[key] = v
else:
flattened_results[k] = res
return {self._set_name(k): v for k, v in flattened_results.items()}
def reset(self) -> None:
"""Call reset for each metric sequentially."""
for m in self.values(copy_state=False):
m.reset()
if self._enable_compute_groups and self._groups_checked:
# reset state reference
self._compute_groups_create_state_ref()
def clone(self, prefix: Optional[str] = None, postfix: Optional[str] = None) -> "MetricCollection":
"""Make a copy of the metric collection.
Args:
prefix: a string to append in front of the metric keys
postfix: a string to append after the keys of the output dict.
"""
mc = deepcopy(self)
if prefix:
mc.prefix = self._check_arg(prefix, "prefix")
if postfix:
mc.postfix = self._check_arg(postfix, "postfix")
return mc
def persistent(self, mode: bool = True) -> None:
"""Change if metric states should be saved to its state_dict after initialization."""
for m in self.values(copy_state=False):
m.persistent(mode)
def add_metrics(
self, metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]], *additional_metrics: Metric
) -> None:
"""Add new metrics to Metric Collection."""
if isinstance(metrics, Metric):
# set compatible with original type expectations
metrics = [metrics]
if isinstance(metrics, Sequence):
# prepare for optional additions
metrics = list(metrics)
remain: list = []
for m in additional_metrics:
sel = metrics if isinstance(m, Metric) else remain
sel.append(m)
if remain:
rank_zero_warn(
f"You have passes extra arguments {remain} which are not `Metric` so they will be ignored."
)
elif additional_metrics:
raise ValueError(
f"You have passes extra arguments {additional_metrics} which are not compatible"
f" with first passed dictionary {metrics} so they will be ignored."
)
if isinstance(metrics, dict):
# Check all values are metrics
# Make sure that metrics are added in deterministic order
for name in sorted(metrics.keys()):
metric = metrics[name]
if not isinstance(metric, (Metric, MetricCollection)):
raise ValueError(
f"Value {metric} belonging to key {name} is not an instance of"
" `torchmetrics.Metric` or `torchmetrics.MetricCollection`"
)
if isinstance(metric, Metric):
self[name] = metric
else:
for k, v in metric.items(keep_base=False):
v.postfix = metric.postfix
v.prefix = metric.prefix
v._from_collection = True
self[f"{name}_{k}"] = v
elif isinstance(metrics, Sequence):
for metric in metrics:
if not isinstance(metric, (Metric, MetricCollection)):
raise ValueError(
f"Input {metric} to `MetricCollection` is not a instance of"
" `torchmetrics.Metric` or `torchmetrics.MetricCollection`"
)
if isinstance(metric, Metric):
name = metric.__class__.__name__
if name in self:
raise ValueError(f"Encountered two metrics both named {name}")
self[name] = metric
else:
for k, v in metric.items(keep_base=False):
v.postfix = metric.postfix
v.prefix = metric.prefix
v._from_collection = True
self[k] = v
else:
raise ValueError(
"Unknown input to MetricCollection. Expected, `Metric`, `MetricCollection` or `dict`/`sequence` of the"
f" previous, but got {metrics}"
)
self._groups_checked = False
if self._enable_compute_groups:
self._init_compute_groups()
else:
self._groups = {}
def _init_compute_groups(self) -> None:
"""Initialize compute groups.
If user provided a list, we check that all metrics in the list are also in the collection. If set to `True` we
simply initialize each metric in the collection as its own group
"""
if isinstance(self._enable_compute_groups, list):
self._groups = dict(enumerate(self._enable_compute_groups))
for v in self._groups.values():
for metric in v:
if metric not in self:
raise ValueError(
f"Input {metric} in `compute_groups` argument does not match a metric in the collection."
f" Please make sure that {self._enable_compute_groups} matches {self.keys(keep_base=True)}"
)
self._groups_checked = True
else:
# Initialize all metrics as their own compute group
self._groups = {i: [str(k)] for i, k in enumerate(self.keys(keep_base=True))}
@property
def compute_groups(self) -> Dict[int, List[str]]:
"""Return a dict with the current compute groups in the collection."""
return self._groups
def _set_name(self, base: str) -> str:
"""Adjust name of metric with both prefix and postfix."""
name = base if self.prefix is None else self.prefix + base
return name if self.postfix is None else name + self.postfix
def _to_renamed_ordered_dict(self) -> OrderedDict:
od = OrderedDict()
for k, v in self._modules.items():
od[self._set_name(k)] = v
return od
def __iter__(self) -> Iterator[Hashable]:
"""Return an iterator over the keys of the MetricDict."""
return iter(self.keys())
# TODO: redefine this as native python dict
def keys(self, keep_base: bool = False) -> Iterable[Hashable]:
r"""Return an iterable of the ModuleDict key.
Args:
keep_base: Whether to add prefix/postfix on the items collection.
"""
if keep_base:
return self._modules.keys()
return self._to_renamed_ordered_dict().keys()
def items(self, keep_base: bool = False, copy_state: bool = True) -> Iterable[Tuple[str, Metric]]:
r"""Return an iterable of the ModuleDict key/value pairs.
Args:
keep_base: Whether to add prefix/postfix on the collection.
copy_state:
If metric states should be copied between metrics in the same compute group or just passed by reference
"""
self._compute_groups_create_state_ref(copy_state)
if keep_base:
return self._modules.items()
return self._to_renamed_ordered_dict().items()
def values(self, copy_state: bool = True) -> Iterable[Metric]:
"""Return an iterable of the ModuleDict values.
Args:
copy_state:
If metric states should be copied between metrics in the same compute group or just passed by reference
"""
self._compute_groups_create_state_ref(copy_state)
return self._modules.values()
def __getitem__(self, key: str, copy_state: bool = True) -> Metric:
"""Retrieve a single metric from the collection.
Args:
key: name of metric to retrieve
copy_state:
If metric states should be copied between metrics in the same compute group or just passed by reference
"""
self._compute_groups_create_state_ref(copy_state)
return self._modules[key]
@staticmethod
def _check_arg(arg: Optional[str], name: str) -> Optional[str]:
if arg is None or isinstance(arg, str):
return arg
raise ValueError(f"Expected input `{name}` to be a string, but got {type(arg)}")
def __repr__(self) -> str:
"""Return the representation of the metric collection including all metrics in the collection."""
repr_str = super().__repr__()[:-2]
if self.prefix:
repr_str += f",\n prefix={self.prefix}{',' if self.postfix else ''}"
if self.postfix:
repr_str += f"{',' if not self.prefix else ''}\n postfix={self.postfix}"
return repr_str + "\n)"
def set_dtype(self, dst_type: Union[str, torch.dtype]) -> "MetricCollection":
"""Transfer all metric state to specific dtype. Special version of standard `type` method.
Arguments:
dst_type: the desired type as ``torch.dtype`` or string.
"""
for m in self.values(copy_state=False):
m.set_dtype(dst_type)
return self
def plot(
self,
val: Optional[Union[Dict, Sequence[Dict]]] = None,
ax: Optional[Union[_AX_TYPE, Sequence[_AX_TYPE]]] = None,
together: bool = False,
) -> Sequence[_PLOT_OUT_TYPE]:
"""Plot a single or multiple values from the metric.
The plot method has two modes of operation. If argument `together` is set to `False` (default), the `.plot`
method of each metric will be called individually and the result will be list of figures. If `together` is set
to `True`, the values of all metrics will instead be plotted in the same figure.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: Either a single instance of matplotlib axis object or an sequence of matplotlib axis objects. If
provided, will add the plots to the provided axis objects. If not provided, will create a new. If
argument `together` is set to `True`, a single object is expected. If `together` is set to `False`,
the number of axis objects needs to be the same length as the number of metrics in the collection.
together: If `True`, will plot all metrics in the same axis. If `False`, will plot each metric in a separate
Returns:
Either install tuple of Figure and Axes object or an sequence of tuples with Figure and Axes object for each
metric in the collection.
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
ValueError:
If `together` is not an bool
ValueError:
If `ax` is not an instance of matplotlib axis object or a sequence of matplotlib axis objects
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics import MetricCollection
>>> from torchmetrics.classification import BinaryAccuracy, BinaryPrecision, BinaryRecall
>>> metrics = MetricCollection([BinaryAccuracy(), BinaryPrecision(), BinaryRecall()])
>>> metrics.update(torch.rand(10), torch.randint(2, (10,)))
>>> fig_ax_ = metrics.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics import MetricCollection
>>> from torchmetrics.classification import BinaryAccuracy, BinaryPrecision, BinaryRecall
>>> metrics = MetricCollection([BinaryAccuracy(), BinaryPrecision(), BinaryRecall()])
>>> values = []
>>> for _ in range(10):
... values.append(metrics(torch.rand(10), torch.randint(2, (10,))))
>>> fig_, ax_ = metrics.plot(values, together=True)
"""
if not isinstance(together, bool):
raise ValueError(f"Expected argument `together` to be a boolean, but got {type(together)}")
if ax is not None:
if together and not isinstance(ax, _AX_TYPE):
raise ValueError(
f"Expected argument `ax` to be a matplotlib axis object, but got {type(ax)} when `together=True`"
)
if not together and not (
isinstance(ax, Sequence) and all(isinstance(a, _AX_TYPE) for a in ax) and len(ax) == len(self)
):
raise ValueError(
f"Expected argument `ax` to be a sequence of matplotlib axis objects with the same length as the "
f"number of metrics in the collection, but got {type(ax)} with len {len(ax)} when `together=False`"
)
val = val or self.compute()
if together:
return plot_single_or_multi_val(val, ax=ax)
fig_axs = []
for i, (k, m) in enumerate(self.items(keep_base=True, copy_state=False)):
if isinstance(val, dict):
f, a = m.plot(val[k], ax=ax[i] if ax is not None else ax)
elif isinstance(val, Sequence):
f, a = m.plot([v[k] for v in val], ax=ax[i] if ax is not None else ax)
fig_axs.append((f, a))
return fig_axs
| 0 |
public_repos/torchmetrics/src | public_repos/torchmetrics/src/torchmetrics/metric.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# It is needed to distinguish between native float and Metric's' function called float.
# later, this function was used instead of the built-in float type...
import builtins
import functools
import inspect
from abc import ABC, abstractmethod
from contextlib import contextmanager
from copy import deepcopy
from typing import Any, Callable, ClassVar, Dict, Generator, List, Optional, Sequence, Tuple, Union
import torch
from lightning_utilities import apply_to_collection
from torch import Tensor
from torch.nn import Module
from torchmetrics.utilities.data import (
_flatten,
_squeeze_if_scalar,
dim_zero_cat,
dim_zero_max,
dim_zero_mean,
dim_zero_min,
dim_zero_sum,
)
from torchmetrics.utilities.distributed import gather_all_tensors
from torchmetrics.utilities.exceptions import TorchMetricsUserError
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE, plot_single_or_multi_val
from torchmetrics.utilities.prints import rank_zero_warn
def jit_distributed_available() -> bool:
"""Determine if distributed mode is initialized."""
return torch.distributed.is_available() and torch.distributed.is_initialized()
class Metric(Module, ABC):
"""Base class for all metrics present in the Metrics API.
This class is inherited by all metrics and implements the following functionality:
1. Handles the transfer of metric states to correct device
2. Handles the synchronization of metric states across processes
The three core methods of the base class are
* ``add_state()``
* ``forward()``
* ``reset()``
which should almost never be overwritten by child classes. Instead, the following methods should be overwritten
* ``update()``
* ``compute()``
Args:
kwargs: additional keyword arguments, see :ref:`Metric kwargs` for more info.
- compute_on_cpu: If metric state should be stored on CPU during computations. Only works for list states.
- dist_sync_on_step: If metric state should synchronize on ``forward()``. Default is ``False``
- process_group: The process group on which the synchronization is called. Default is the world.
- dist_sync_fn: Function that performs the allgather option on the metric state. Default is an custom
implementation that calls ``torch.distributed.all_gather`` internally.
- distributed_available_fn: Function that checks if the distributed backend is available. Defaults to a
check of ``torch.distributed.is_available()`` and ``torch.distributed.is_initialized()``.
- sync_on_compute: If metric state should synchronize when ``compute`` is called. Default is ``True``
- compute_with_cache: If results from ``compute`` should be cached. Default is ``False``
"""
__jit_ignored_attributes__: ClassVar[List[str]] = ["device"]
__jit_unused_properties__: ClassVar[List[str]] = [
"is_differentiable",
"higher_is_better",
"plot_lower_bound",
"plot_upper_bound",
"plot_legend_name",
"metric_state",
"_update_called",
]
is_differentiable: Optional[bool] = None
higher_is_better: Optional[bool] = None
full_state_update: Optional[bool] = None
plot_lower_bound: Optional[float] = None
plot_upper_bound: Optional[float] = None
plot_legend_name: Optional[str] = None
def __init__(
self,
**kwargs: Any,
) -> None:
super().__init__()
# see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/
# torch/nn/modules/module.py#L227)
torch._C._log_api_usage_once(f"torchmetrics.metric.{self.__class__.__name__}")
self._device = torch.device("cpu")
self.compute_on_cpu = kwargs.pop("compute_on_cpu", False)
if not isinstance(self.compute_on_cpu, bool):
raise ValueError(
f"Expected keyword argument `compute_on_cpu` to be an `bool` but got {self.compute_on_cpu}"
)
self.dist_sync_on_step = kwargs.pop("dist_sync_on_step", False)
if not isinstance(self.dist_sync_on_step, bool):
raise ValueError(
f"Expected keyword argument `dist_sync_on_step` to be an `bool` but got {self.dist_sync_on_step}"
)
self.process_group = kwargs.pop("process_group", None)
self.dist_sync_fn = kwargs.pop("dist_sync_fn", None)
if self.dist_sync_fn is not None and not callable(self.dist_sync_fn):
raise ValueError(
f"Expected keyword argument `dist_sync_fn` to be an callable function but got {self.dist_sync_fn}"
)
self.distributed_available_fn = kwargs.pop("distributed_available_fn", None) or jit_distributed_available
self.sync_on_compute = kwargs.pop("sync_on_compute", True)
if not isinstance(self.sync_on_compute, bool):
raise ValueError(
f"Expected keyword argument `sync_on_compute` to be a `bool` but got {self.sync_on_compute}"
)
self.compute_with_cache = kwargs.pop("compute_with_cache", True)
if not isinstance(self.compute_with_cache, bool):
raise ValueError(
f"Expected keyword argument `compute_with_cache` to be a `bool` but got {self.compute_with_cache}"
)
if kwargs:
kwargs_ = [f"`{a}`" for a in sorted(kwargs)]
raise ValueError(f"Unexpected keyword arguments: {', '.join(kwargs_)}")
# initialize
self._update_signature = inspect.signature(self.update)
self.update: Callable = self._wrap_update(self.update) # type: ignore[method-assign]
self.compute: Callable = self._wrap_compute(self.compute) # type: ignore[method-assign]
self._computed = None
self._forward_cache = None
self._update_count = 0
self._to_sync = self.sync_on_compute
self._should_unsync = True
self._enable_grad = False
self._dtype_convert = False
# initialize state
self._defaults: Dict[str, Union[List, Tensor]] = {}
self._persistent: Dict[str, bool] = {}
self._reductions: Dict[str, Union[str, Callable[..., Any], None]] = {}
# state management
self._is_synced = False
self._cache: Optional[Dict[str, Union[List[Tensor], Tensor]]] = None
@property
def _update_called(self) -> bool:
rank_zero_warn(
"This property will be removed in 2.0.0. Use `Metric.updated_called` instead.",
DeprecationWarning,
stacklevel=2,
)
return self.update_called
@property
def update_called(self) -> bool:
"""Returns `True` if `update` or `forward` has been called initialization or last `reset`."""
return self._update_count > 0
@property
def update_count(self) -> int:
"""Get the number of times `update` and/or `forward` has been called since initialization or last `reset`."""
return self._update_count
@property
def metric_state(self) -> Dict[str, Union[List[Tensor], Tensor]]:
"""Get the current state of the metric."""
return {attr: getattr(self, attr) for attr in self._defaults}
def add_state(
self,
name: str,
default: Union[list, Tensor],
dist_reduce_fx: Optional[Union[str, Callable]] = None,
persistent: bool = False,
) -> None:
"""Add metric state variable. Only used by subclasses.
Metric state variables are either `:class:`~torch.Tensor` or an empty list, which can be appended to by the
metric. Each state variable must have a unique name associated with it. State variables are accessible as
attributes of the metric i.e, if ``name`` is ``"my_state"`` then its value can be accessed from an instance
``metric`` as ``metric.my_state``. Metric states behave like buffers and parameters of :class:`~torch.nn.Module`
as they are also updated when ``.to()`` is called. Unlike parameters and buffers, metric states are not by
default saved in the modules :attr:`~torch.nn.Module.state_dict`.
Args:
name: The name of the state variable. The variable will then be accessible at ``self.name``.
default: Default value of the state; can either be a :class:`~torch.Tensor` or an empty list.
The state will be reset to this value when ``self.reset()`` is called.
dist_reduce_fx (Optional): Function to reduce state across multiple processes in distributed mode.
If value is ``"sum"``, ``"mean"``, ``"cat"``, ``"min"`` or ``"max"`` we will use ``torch.sum``,
``torch.mean``, ``torch.cat``, ``torch.min`` and ``torch.max``` respectively, each with argument
``dim=0``. Note that the ``"cat"`` reduction only makes sense if the state is a list, and not
a tensor. The user can also pass a custom function in this parameter.
persistent (Optional): whether the state will be saved as part of the modules ``state_dict``.
Default is ``False``.
Note:
Setting ``dist_reduce_fx`` to None will return the metric state synchronized across different processes.
However, there won't be any reduction function applied to the synchronized metric state.
The metric states would be synced as follows
- If the metric state is :class:`~torch.Tensor`, the synced value will be a stacked :class:`~torch.Tensor`
across the process dimension if the metric state was a :class:`~torch.Tensor`. The original
:class:`~torch.Tensor` metric state retains dimension and hence the synchronized output will be of shape
``(num_process, ...)``.
- If the metric state is a ``list``, the synced value will be a ``list`` containing the
combined elements from all processes.
Note:
When passing a custom function to ``dist_reduce_fx``, expect the synchronized metric state to follow
the format discussed in the above note.
Raises:
ValueError:
If ``default`` is not a ``tensor`` or an ``empty list``.
ValueError:
If ``dist_reduce_fx`` is not callable or one of ``"mean"``, ``"sum"``, ``"cat"``, ``"min"``,
``"max"`` or ``None``.
"""
if not isinstance(default, (Tensor, list)) or (isinstance(default, list) and default):
raise ValueError("state variable must be a tensor or any empty list (where you can append tensors)")
if dist_reduce_fx == "sum":
dist_reduce_fx = dim_zero_sum
elif dist_reduce_fx == "mean":
dist_reduce_fx = dim_zero_mean
elif dist_reduce_fx == "max":
dist_reduce_fx = dim_zero_max
elif dist_reduce_fx == "min":
dist_reduce_fx = dim_zero_min
elif dist_reduce_fx == "cat":
dist_reduce_fx = dim_zero_cat
elif dist_reduce_fx is not None and not callable(dist_reduce_fx):
raise ValueError("`dist_reduce_fx` must be callable or one of ['mean', 'sum', 'cat', 'min', 'max', None]")
if isinstance(default, Tensor):
default = default.contiguous()
setattr(self, name, default)
self._defaults[name] = deepcopy(default)
self._persistent[name] = persistent
self._reductions[name] = dist_reduce_fx
@torch.jit.unused
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Aggregate and evaluate batch input directly.
Serves the dual purpose of both computing the metric on the current batch of inputs but also add the batch
statistics to the overall accumululating metric state. Input arguments are the exact same as corresponding
``update`` method. The returned output is the exact same as the output of ``compute``.
Args:
args: Any arguments as required by the metric ``update`` method.
kwargs: Any keyword arguments as required by the metric ``update`` method.
Returns:
The output of the ``compute`` method evaluated on the current batch.
Raises:
TorchMetricsUserError:
If the metric is already synced and ``forward`` is called again.
"""
# check if states are already synced
if self._is_synced:
raise TorchMetricsUserError(
"The Metric shouldn't be synced when performing ``forward``. "
"HINT: Did you forget to call ``unsync`` ?."
)
if self.full_state_update or self.full_state_update is None or self.dist_sync_on_step:
self._forward_cache = self._forward_full_state_update(*args, **kwargs)
else:
self._forward_cache = self._forward_reduce_state_update(*args, **kwargs)
return self._forward_cache
def _forward_full_state_update(self, *args: Any, **kwargs: Any) -> Any:
"""Forward computation using two calls to `update`.
Doing this secures that metrics that need access to the full metric state during `update` works as expected.
This is the most safe method to use for any metric but also the slower version of the two forward
implementations.
"""
# global accumulation
self.update(*args, **kwargs)
_update_count = self._update_count
self._to_sync = self.dist_sync_on_step
# skip restore cache operation from compute as cache is stored below.
self._should_unsync = False
# skip computing on cpu for the batch
_temp_compute_on_cpu = self.compute_on_cpu
self.compute_on_cpu = False
# save context before switch
cache = {attr: getattr(self, attr) for attr in self._defaults}
# call reset, update, compute, on single batch
self._enable_grad = True # allow grads for batch computation
self.reset()
self.update(*args, **kwargs)
batch_val = self.compute()
# restore context
for attr, val in cache.items():
setattr(self, attr, val)
self._update_count = _update_count
# restore context
self._is_synced = False
self._should_unsync = True
self._to_sync = self.sync_on_compute
self._computed = None
self._enable_grad = False
self.compute_on_cpu = _temp_compute_on_cpu
if self.compute_on_cpu:
self._move_list_states_to_cpu()
return batch_val
def _forward_reduce_state_update(self, *args: Any, **kwargs: Any) -> Any:
"""Forward computation using single call to `update`.
This can be done when the global metric state is a sinple reduction of batch states. This can be unsafe for
certain metric cases but is also the fastest way to both accumulate globally and compute locally.
"""
# store global state and reset to default
global_state = {attr: getattr(self, attr) for attr in self._defaults}
_update_count = self._update_count
self.reset()
# local synchronization settings
self._to_sync = self.dist_sync_on_step
self._should_unsync = False
_temp_compute_on_cpu = self.compute_on_cpu
self.compute_on_cpu = False
self._enable_grad = True # allow grads for batch computation
# calculate batch state and compute batch value
self.update(*args, **kwargs)
batch_val = self.compute()
# reduce batch and global state
self._update_count = _update_count + 1
with torch.no_grad():
self._reduce_states(global_state)
# restore context
self._is_synced = False
self._should_unsync = True
self._to_sync = self.sync_on_compute
self._computed = None
self._enable_grad = False
self.compute_on_cpu = _temp_compute_on_cpu
if self.compute_on_cpu:
self._move_list_states_to_cpu()
return batch_val
def _reduce_states(self, incoming_state: Dict[str, Any]) -> None:
"""Add an incoming metric state to the current state of the metric.
Args:
incoming_state: a dict containing a metric state similar metric itself
"""
for attr in self._defaults:
local_state = getattr(self, attr)
global_state = incoming_state[attr]
reduce_fn = self._reductions[attr]
if reduce_fn == dim_zero_sum:
reduced = global_state + local_state
elif reduce_fn == dim_zero_mean:
reduced = ((self._update_count - 1) * global_state + local_state).float() / self._update_count
elif reduce_fn == dim_zero_max:
reduced = torch.max(global_state, local_state)
elif reduce_fn == dim_zero_min:
reduced = torch.min(global_state, local_state)
elif reduce_fn == dim_zero_cat:
reduced = global_state + local_state
elif reduce_fn is None and isinstance(global_state, Tensor):
reduced = torch.stack([global_state, local_state])
elif reduce_fn is None and isinstance(global_state, list):
reduced = _flatten([global_state, local_state])
elif reduce_fn and callable(reduce_fn):
reduced = reduce_fn(torch.stack([global_state, local_state]))
else:
raise TypeError(f"Unsupported reduce_fn: {reduce_fn}")
setattr(self, attr, reduced)
def _sync_dist(self, dist_sync_fn: Callable = gather_all_tensors, process_group: Optional[Any] = None) -> None:
input_dict = {attr: getattr(self, attr) for attr in self._reductions}
for attr, reduction_fn in self._reductions.items():
# pre-concatenate metric states that are lists to reduce number of all_gather operations
if reduction_fn == dim_zero_cat and isinstance(input_dict[attr], list) and len(input_dict[attr]) > 1:
input_dict[attr] = [dim_zero_cat(input_dict[attr])]
output_dict = apply_to_collection(
input_dict,
Tensor,
dist_sync_fn,
group=process_group or self.process_group,
)
for attr, reduction_fn in self._reductions.items():
# pre-processing ops (stack or flatten for inputs)
if isinstance(output_dict[attr], list) and len(output_dict[attr]) == 0:
setattr(self, attr, [])
continue
if isinstance(output_dict[attr][0], Tensor):
output_dict[attr] = torch.stack(output_dict[attr])
elif isinstance(output_dict[attr][0], list):
output_dict[attr] = _flatten(output_dict[attr])
if not (callable(reduction_fn) or reduction_fn is None):
raise TypeError("reduction_fn must be callable or None")
reduced = reduction_fn(output_dict[attr]) if reduction_fn is not None else output_dict[attr]
setattr(self, attr, reduced)
def _wrap_update(self, update: Callable) -> Callable:
@functools.wraps(update)
def wrapped_func(*args: Any, **kwargs: Any) -> None:
self._computed = None
self._update_count += 1
with torch.set_grad_enabled(self._enable_grad):
try:
update(*args, **kwargs)
except RuntimeError as err:
if "Expected all tensors to be on" in str(err):
raise RuntimeError(
"Encountered different devices in metric calculation (see stacktrace for details)."
" This could be due to the metric class not being on the same device as input."
f" Instead of `metric={self.__class__.__name__}(...)` try to do"
f" `metric={self.__class__.__name__}(...).to(device)` where"
" device corresponds to the device of the input."
) from err
raise err
if self.compute_on_cpu:
self._move_list_states_to_cpu()
return wrapped_func
def _move_list_states_to_cpu(self) -> None:
"""Move list states to cpu to save GPU memory."""
for key in self._defaults:
current_val = getattr(self, key)
if isinstance(current_val, Sequence):
setattr(self, key, [cur_v.to("cpu") for cur_v in current_val])
def sync(
self,
dist_sync_fn: Optional[Callable] = None,
process_group: Optional[Any] = None,
should_sync: bool = True,
distributed_available: Optional[Callable] = None,
) -> None:
"""Sync function for manually controlling when metrics states should be synced across processes.
Args:
dist_sync_fn: Function to be used to perform states synchronization
process_group:
Specify the process group on which synchronization is called.
default: `None` (which selects the entire world)
should_sync: Whether to apply to state synchronization. This will have an impact
only when running in a distributed setting.
distributed_available: Function to determine if we are running inside a distributed setting
Raises:
TorchMetricsUserError:
If the metric is already synced and ``sync`` is called again.
"""
if self._is_synced and should_sync:
raise TorchMetricsUserError("The Metric has already been synced.")
if distributed_available is None and self.distributed_available_fn is not None:
distributed_available = self.distributed_available_fn
is_distributed = distributed_available() if callable(distributed_available) else None
if not should_sync or not is_distributed:
return
if dist_sync_fn is None:
dist_sync_fn = gather_all_tensors
# cache prior to syncing
self._cache = {attr: getattr(self, attr) for attr in self._defaults}
# sync
self._sync_dist(dist_sync_fn, process_group=process_group)
self._is_synced = True
def unsync(self, should_unsync: bool = True) -> None:
"""Unsync function for manually controlling when metrics states should be reverted back to their local states.
Args:
should_unsync: Whether to perform unsync
"""
if not should_unsync:
return
if not self._is_synced:
raise TorchMetricsUserError("The Metric has already been un-synced.")
if self._cache is None:
raise TorchMetricsUserError("The internal cache should exist to unsync the Metric.")
# if we synced, restore to cache so that we can continue to accumulate un-synced state
for attr, val in self._cache.items():
setattr(self, attr, val)
self._is_synced = False
self._cache = None
@contextmanager
def sync_context(
self,
dist_sync_fn: Optional[Callable] = None,
process_group: Optional[Any] = None,
should_sync: bool = True,
should_unsync: bool = True,
distributed_available: Optional[Callable] = None,
) -> Generator:
"""Context manager to synchronize states.
This context manager is used in distributed setting and makes sure that the local cache states are restored
after yielding the synchronized state.
Args:
dist_sync_fn: Function to be used to perform states synchronization
process_group:
Specify the process group on which synchronization is called.
default: `None` (which selects the entire world)
should_sync: Whether to apply to state synchronization. This will have an impact
only when running in a distributed setting.
should_unsync: Whether to restore the cache state so that the metrics can
continue to be accumulated.
distributed_available: Function to determine if we are running inside a distributed setting
"""
self.sync(
dist_sync_fn=dist_sync_fn,
process_group=process_group,
should_sync=should_sync,
distributed_available=distributed_available,
)
yield
self.unsync(should_unsync=self._is_synced and should_unsync)
def _wrap_compute(self, compute: Callable) -> Callable:
@functools.wraps(compute)
def wrapped_func(*args: Any, **kwargs: Any) -> Any:
if self._update_count == 0:
rank_zero_warn(
f"The ``compute`` method of metric {self.__class__.__name__}"
" was called before the ``update`` method which may lead to errors,"
" as metric states have not yet been updated.",
UserWarning,
)
# return cached value
if self._computed is not None:
return self._computed
# compute relies on the sync context manager to gather the states across processes and apply reduction
# if synchronization happened, the current rank accumulated states will be restored to keep
# accumulation going if ``should_unsync=True``,
with self.sync_context(
dist_sync_fn=self.dist_sync_fn,
should_sync=self._to_sync,
should_unsync=self._should_unsync,
):
value = _squeeze_if_scalar(compute(*args, **kwargs))
if self.compute_with_cache:
self._computed = value
return value
return wrapped_func
@abstractmethod
def update(self, *_: Any, **__: Any) -> None:
"""Override this method to update the state variables of your metric class."""
@abstractmethod
def compute(self) -> Any:
"""Override this method to compute the final metric value.
This method will automatically synchronize state variables when running in distributed backend.
"""
def plot(self, *_: Any, **__: Any) -> Any:
"""Override this method plot the metric value."""
raise NotImplementedError
def _plot(
self,
val: Optional[Union[Tensor, Sequence[Tensor], Dict[str, Tensor], Sequence[Dict[str, Tensor]]]] = None,
ax: Optional[_AX_TYPE] = None,
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
"""
val = val if val is not None else self.compute()
fig, ax = plot_single_or_multi_val(
val,
ax=ax,
higher_is_better=self.higher_is_better,
name=self.__class__.__name__,
lower_bound=self.plot_lower_bound,
upper_bound=self.plot_upper_bound,
legend_name=self.plot_legend_name,
)
return fig, ax
def reset(self) -> None:
"""Reset metric state variables to their default value."""
self._update_count = 0
self._forward_cache = None
self._computed = None
for attr, default in self._defaults.items():
current_val = getattr(self, attr)
if isinstance(default, Tensor):
setattr(self, attr, default.detach().clone().to(current_val.device))
else:
setattr(self, attr, [])
# reset internal states
self._cache = None
self._is_synced = False
def clone(self) -> "Metric":
"""Make a copy of the metric."""
return deepcopy(self)
def __getstate__(self) -> Dict[str, Any]:
"""Get the current state, including all metric states, for the metric.
Used for loading and saving a metric.
"""
# ignore update and compute functions for pickling
return {k: v for k, v in self.__dict__.items() if k not in ["update", "compute", "_update_signature"]}
def __setstate__(self, state: Dict[str, Any]) -> None:
"""Set the state of the metric, based on a input state.
Used for loading and saving a metric.
"""
# manually restore update and compute functions for pickling
self.__dict__.update(state)
self._update_signature = inspect.signature(self.update)
self.update: Callable = self._wrap_update(self.update) # type: ignore[method-assign]
self.compute: Callable = self._wrap_compute(self.compute) # type: ignore[method-assign]
def __setattr__(self, name: str, value: Any) -> None:
"""Overwrite default method to prevent specific attributes from being set by user."""
if name in (
"higher_is_better",
"is_differentiable",
"full_state_update",
"plot_lower_bound",
"plot_upper_bound",
"plot_legend_name",
):
raise RuntimeError(f"Can't change const `{name}`.")
super().__setattr__(name, value)
@property
def device(self) -> "torch.device":
"""Return the device of the metric."""
return self._device
def type(self, dst_type: Union[str, torch.dtype]) -> "Metric": # noqa: A003
"""Override default and prevent dtype casting.
Please use :meth:`Metric.set_dtype` instead.
"""
return self
def float(self) -> "Metric": # noqa: A003
"""Override default and prevent dtype casting.
Please use :meth:`Metric.set_dtype` instead.
"""
return self
def double(self) -> "Metric":
"""Override default and prevent dtype casting.
Please use :meth:`Metric.set_dtype` instead.
"""
return self
def half(self) -> "Metric":
"""Override default and prevent dtype casting.
Please use :meth:`Metric.set_dtype` instead.
"""
return self
def set_dtype(self, dst_type: Union[str, torch.dtype]) -> "Metric":
"""Transfer all metric state to specific dtype. Special version of standard `type` method.
Arguments:
dst_type: the desired type as string or dtype object
"""
self._dtype_convert = True
out = super().type(dst_type)
out._dtype_convert = False
return out
def _apply(self, fn: Callable, exclude_state: Sequence[str] = "") -> Module:
"""Overwrite `_apply` function such that we can also move metric states to the correct device.
This method is called by the base ``nn.Module`` class whenever `.to`, `.cuda`, `.float`, `.half` etc. methods
are called. Dtype conversion is garded and will only happen through the special `set_dtype` method.
Args:
fn: the function to apply
exclude_state: list of state variables to exclude from applying the function, that then needs to be handled
by the metric class itself.
"""
this = super()._apply(fn)
fs = str(fn)
cond = any(f in fs for f in ["Module.type", "Module.half", "Module.float", "Module.double", "Module.bfloat16"])
if not self._dtype_convert and cond:
return this
# Also apply fn to metric states and defaults
for key, value in this._defaults.items():
if key in exclude_state:
continue
if isinstance(value, Tensor):
this._defaults[key] = fn(value)
elif isinstance(value, Sequence):
this._defaults[key] = [fn(v) for v in value]
current_val = getattr(this, key)
if isinstance(current_val, Tensor):
setattr(this, key, fn(current_val))
elif isinstance(current_val, Sequence):
setattr(this, key, [fn(cur_v) for cur_v in current_val])
else:
raise TypeError(
f"Expected metric state to be either a Tensor or a list of Tensor, but encountered {current_val}"
)
# make sure to update the device attribute
# if the dummy tensor moves device by fn function we should also update the attribute
self._device = fn(torch.zeros(1, device=self.device)).device
# Additional apply to forward cache and computed attributes (may be nested)
if this._computed is not None:
this._computed = apply_to_collection(this._computed, Tensor, fn)
if this._forward_cache is not None:
this._forward_cache = apply_to_collection(this._forward_cache, Tensor, fn)
return this
def persistent(self, mode: bool = False) -> None:
"""Change post-init if metric states should be saved to its state_dict."""
for key in self._persistent:
self._persistent[key] = mode
def state_dict( # type: ignore[override] # todo
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
"""Get the current state of metric as an dictionary.
Args:
destination: Optional dictionary, that if provided, the state of module will be updated into the dict and
the same object is returned. Otherwise, an ``OrderedDict`` will be created and returned.
prefix: optional string, a prefix added to parameter and buffer names to compose the keys in state_dict.
keep_vars: by default the :class:`~torch.Tensor` returned in the state dict are detached from autograd.
If set to ``True``, detaching will not be performed.
"""
destination: Dict[str, Union[torch.Tensor, List, Any]] = super().state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars # type: ignore[arg-type]
)
# Register metric states to be part of the state_dict
for key in self._defaults:
if not self._persistent[key]:
continue
current_val = getattr(self, key)
if not keep_vars:
if isinstance(current_val, Tensor):
current_val = current_val.detach()
elif isinstance(current_val, list):
current_val = [cur_v.detach() if isinstance(cur_v, Tensor) else cur_v for cur_v in current_val]
destination[prefix + key] = deepcopy(current_val)
return destination
def _load_from_state_dict(
self,
state_dict: dict,
prefix: str,
local_metadata: dict,
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
) -> None:
"""Load metric states from state_dict."""
for key in self._defaults:
name = prefix + key
if name in state_dict:
setattr(self, key, state_dict.pop(name))
super()._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
def _filter_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
"""Filter kwargs such that they match the update signature of the metric."""
# filter all parameters based on update signature except those of
# types `VAR_POSITIONAL` for `* args` and `VAR_KEYWORD` for `** kwargs`
_params = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)
_sign_params = self._update_signature.parameters
filtered_kwargs = {
k: v for k, v in kwargs.items() if (k in _sign_params and _sign_params[k].kind not in _params)
}
exists_var_keyword = any(v.kind == inspect.Parameter.VAR_KEYWORD for v in _sign_params.values())
# if no kwargs filtered, return all kwargs as default
if not filtered_kwargs and not exists_var_keyword:
# no kwargs in update signature -> don't return any kwargs
return {}
if exists_var_keyword:
# kwargs found in update signature -> return all kwargs to be sure to not omit any.
# filtering logic is likely implemented within the update call.
return kwargs
return filtered_kwargs
def __hash__(self) -> int:
"""Return an unique hash of the metric.
The hash depends on both the class itself but also the current metric state, which therefore enforces that two
instances of the same metrics never have the same hash even if they have been updated on the same data.
"""
# we need to add the id here, since PyTorch requires a module hash to be unique.
# Internally, PyTorch nn.Module relies on that for children discovery
# (see https://github.com/pytorch/pytorch/blob/v1.9.0/torch/nn/modules/module.py#L1544)
# For metrics that include tensors it is not a problem,
# since their hash is unique based on the memory location but we cannot rely on that for every metric.
hash_vals = [self.__class__.__name__, id(self)]
for key in self._defaults:
val = getattr(self, key)
# Special case: allow list values, so long
# as their elements are hashable
if hasattr(val, "__iter__") and not isinstance(val, Tensor):
hash_vals.extend(val)
else:
hash_vals.append(val)
return hash(tuple(hash_vals))
def __add__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the addition operator."""
return CompositionalMetric(torch.add, self, other)
def __and__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the logical and operator."""
return CompositionalMetric(torch.bitwise_and, self, other)
def __eq__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric": # type: ignore[override]
"""Construct compositional metric using the equal operator."""
return CompositionalMetric(torch.eq, self, other)
def __floordiv__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the floor division operator."""
return CompositionalMetric(torch.floor_divide, self, other)
def __ge__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric": # type: ignore[misc]
"""Construct compositional metric using the greater than or equal operator."""
return CompositionalMetric(torch.ge, self, other)
def __gt__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric": # type: ignore[misc]
"""Construct compositional metric using the greater than operator."""
return CompositionalMetric(torch.gt, self, other)
def __le__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric": # type: ignore[misc]
"""Construct compositional metric using the less than or equal operator."""
return CompositionalMetric(torch.le, self, other)
def __lt__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric": # type: ignore[misc]
"""Construct compositional metric using the less than operator."""
return CompositionalMetric(torch.lt, self, other)
def __matmul__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the matrix multiplication operator."""
return CompositionalMetric(torch.matmul, self, other)
def __mod__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the remainder operator."""
return CompositionalMetric(torch.fmod, self, other)
def __mul__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the multiplication operator."""
return CompositionalMetric(torch.mul, self, other)
def __ne__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric": # type: ignore[override]
"""Construct compositional metric using the not equal operator."""
return CompositionalMetric(torch.ne, self, other)
def __or__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the logical or operator."""
return CompositionalMetric(torch.bitwise_or, self, other)
def __pow__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the exponential/power operator."""
return CompositionalMetric(torch.pow, self, other)
def __radd__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric": # type: ignore[misc]
"""Construct compositional metric using the addition operator."""
return CompositionalMetric(torch.add, other, self)
def __rand__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the logical and operator."""
# swap them since bitwise_and only supports that way and it's commutative
return CompositionalMetric(torch.bitwise_and, self, other)
def __rfloordiv__(self, other: "CompositionalMetric") -> "Metric":
"""Construct compositional metric using the floor division operator."""
return CompositionalMetric(torch.floor_divide, other, self)
def __rmatmul__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the matrix multiplication operator."""
return CompositionalMetric(torch.matmul, other, self)
def __rmod__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric": # type: ignore[misc]
"""Construct compositional metric using the remainder operator."""
return CompositionalMetric(torch.fmod, other, self)
def __rmul__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric": # type: ignore[misc]
"""Construct compositional metric using the multiplication operator."""
return CompositionalMetric(torch.mul, other, self)
def __ror__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the logical or operator."""
return CompositionalMetric(torch.bitwise_or, other, self)
def __rpow__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the exponential/power operator."""
return CompositionalMetric(torch.pow, other, self)
def __rsub__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric": # type: ignore[misc]
"""Construct compositional metric using the subtraction operator."""
return CompositionalMetric(torch.sub, other, self)
def __rtruediv__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric": # type: ignore[misc]
"""Construct compositional metric using the true divide operator."""
return CompositionalMetric(torch.true_divide, other, self)
def __rxor__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the logical xor operator."""
return CompositionalMetric(torch.bitwise_xor, other, self)
def __sub__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the subtraction operator."""
return CompositionalMetric(torch.sub, self, other)
def __truediv__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the true divide operator."""
return CompositionalMetric(torch.true_divide, self, other)
def __xor__(self, other: Union["Metric", builtins.float, Tensor]) -> "CompositionalMetric":
"""Construct compositional metric using the logical xor operator."""
return CompositionalMetric(torch.bitwise_xor, self, other)
def __abs__(self) -> "CompositionalMetric":
"""Construct compositional metric using the absolute operator."""
return CompositionalMetric(torch.abs, self, None)
def __inv__(self) -> "CompositionalMetric":
"""Construct compositional metric using the not operator."""
return CompositionalMetric(torch.bitwise_not, self, None)
def __invert__(self) -> "CompositionalMetric":
"""Construct compositional metric using the not operator."""
return self.__inv__()
def __neg__(self) -> "CompositionalMetric":
"""Construct compositional metric using absolute negative operator."""
return CompositionalMetric(_neg, self, None)
def __pos__(self) -> "CompositionalMetric":
"""Construct compositional metric using absolute operator."""
return CompositionalMetric(torch.abs, self, None)
def __getitem__(self, idx: int) -> "CompositionalMetric":
"""Construct compositional metric using the get item operator."""
return CompositionalMetric(lambda x: x[idx], self, None)
def __getnewargs__(self) -> Tuple:
"""Needed method for construction of new metrics __new__ method."""
return tuple(
Metric.__str__(self),
)
__iter__ = None
def _neg(x: Tensor) -> Tensor:
return -torch.abs(x)
class CompositionalMetric(Metric):
"""Composition of two metrics with a specific operator which will be executed upon metrics compute."""
def __init__(
self,
operator: Callable,
metric_a: Union[Metric, float, Tensor],
metric_b: Union[Metric, float, Tensor, None],
) -> None:
"""Class for creating compositions of metrics.
This metric class is the output of adding, multiplying etc. any other metric. The metric re-implements the
standard ``update``, ``forward``, ``reset`` and ``compute`` methods to redirect the arguments to the metrics
that formed this composition.
Args:
operator:
The operator taking in one (if metric_b is None) or two arguments. Will be applied to outputs of
metric_a.compute() and (optionally if metric_b is not None) metric_b.compute()
metric_a:
First metric whose compute() result is the first argument of operator
metric_b: second metric whose compute() result is the second argument of operator.
For operators taking in only one input, this should be None.
"""
super().__init__()
self.op = operator
if isinstance(metric_a, Tensor):
self.register_buffer("metric_a", metric_a, persistent=False)
else:
self.metric_a = metric_a
if isinstance(metric_b, Tensor):
self.register_buffer("metric_b", metric_b, persistent=False)
else:
self.metric_b = metric_b
def _sync_dist(self, dist_sync_fn: Optional[Callable] = None, process_group: Optional[Any] = None) -> None:
"""No syncing required here.
syncing will be done in metric_a and metric_b.
"""
def update(self, *args: Any, **kwargs: Any) -> None:
"""Redirect the call to the input which the conposition was formed from."""
if isinstance(self.metric_a, Metric):
self.metric_a.update(*args, **self.metric_a._filter_kwargs(**kwargs))
if isinstance(self.metric_b, Metric):
self.metric_b.update(*args, **self.metric_b._filter_kwargs(**kwargs))
def compute(self) -> Any:
"""Redirect the call to the input which the conposition was formed from."""
# also some parsing for kwargs?
val_a = self.metric_a.compute() if isinstance(self.metric_a, Metric) else self.metric_a
val_b = self.metric_b.compute() if isinstance(self.metric_b, Metric) else self.metric_b
if val_b is None:
return self.op(val_a)
return self.op(val_a, val_b)
@torch.jit.unused
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Calculate metric on current batch and accumulate to global state."""
val_a = (
self.metric_a(*args, **self.metric_a._filter_kwargs(**kwargs))
if isinstance(self.metric_a, Metric)
else self.metric_a
)
val_b = (
self.metric_b(*args, **self.metric_b._filter_kwargs(**kwargs))
if isinstance(self.metric_b, Metric)
else self.metric_b
)
if val_a is None:
self._forward_cache = None
return self._forward_cache
if val_b is None:
if isinstance(self.metric_b, Metric):
self._forward_cache = None
return self._forward_cache
# Unary op
self._forward_cache = self.op(val_a)
return self._forward_cache
# Binary op
self._forward_cache = self.op(val_a, val_b)
return self._forward_cache
def reset(self) -> None:
"""Redirect the call to the input which the conposition was formed from."""
if isinstance(self.metric_a, Metric):
self.metric_a.reset()
if isinstance(self.metric_b, Metric):
self.metric_b.reset()
def persistent(self, mode: bool = False) -> None:
"""Change if metric state is persistent (save as part of state_dict) or not.
Args:
mode: bool indicating if all states should be persistent or not
"""
if isinstance(self.metric_a, Metric):
self.metric_a.persistent(mode=mode)
if isinstance(self.metric_b, Metric):
self.metric_b.persistent(mode=mode)
def __repr__(self) -> str:
"""Return a representation of the compositional metric, including the two inputs it was formed from."""
_op_metrics = f"(\n {self.op.__name__}(\n {self.metric_a!r},\n {self.metric_b!r}\n )\n)"
return self.__class__.__name__ + _op_metrics
def _wrap_compute(self, compute: Callable) -> Callable:
"""No wrapping necessary for compositional metrics."""
return compute
| 0 |
public_repos/torchmetrics/src | public_repos/torchmetrics/src/torchmetrics/__init__.py | """Root package info."""
import logging as __logging
import os
from lightning_utilities.core.imports import package_available
from torchmetrics.__about__ import * # noqa: F403
_logger = __logging.getLogger("torchmetrics")
_logger.addHandler(__logging.StreamHandler())
_logger.setLevel(__logging.INFO)
_PACKAGE_ROOT = os.path.dirname(__file__)
_PROJECT_ROOT = os.path.dirname(_PACKAGE_ROOT)
if package_available("PIL"):
import PIL
if not hasattr(PIL, "PILLOW_VERSION"):
PIL.PILLOW_VERSION = PIL.__version__
from torchmetrics import functional # noqa: E402
from torchmetrics.aggregation import ( # noqa: E402
CatMetric,
MaxMetric,
MeanMetric,
MinMetric,
RunningMean,
RunningSum,
SumMetric,
)
from torchmetrics.audio._deprecated import _PermutationInvariantTraining as PermutationInvariantTraining # noqa: E402
from torchmetrics.audio._deprecated import ( # noqa: E402
_ScaleInvariantSignalDistortionRatio as ScaleInvariantSignalDistortionRatio,
)
from torchmetrics.audio._deprecated import ( # noqa: E402
_ScaleInvariantSignalNoiseRatio as ScaleInvariantSignalNoiseRatio,
)
from torchmetrics.audio._deprecated import _SignalDistortionRatio as SignalDistortionRatio # noqa: E402
from torchmetrics.audio._deprecated import _SignalNoiseRatio as SignalNoiseRatio # noqa: E402
from torchmetrics.classification import ( # noqa: E402
AUROC,
ROC,
Accuracy,
AveragePrecision,
CalibrationError,
CohenKappa,
ConfusionMatrix,
Dice,
ExactMatch,
F1Score,
FBetaScore,
HammingDistance,
HingeLoss,
JaccardIndex,
MatthewsCorrCoef,
Precision,
PrecisionAtFixedRecall,
PrecisionRecallCurve,
Recall,
RecallAtFixedPrecision,
Specificity,
SpecificityAtSensitivity,
StatScores,
)
from torchmetrics.collections import MetricCollection # noqa: E402
from torchmetrics.detection._deprecated import _ModifiedPanopticQuality as ModifiedPanopticQuality # noqa: E402
from torchmetrics.detection._deprecated import _PanopticQuality as PanopticQuality # noqa: E402
from torchmetrics.image._deprecated import ( # noqa: E402
_ErrorRelativeGlobalDimensionlessSynthesis as ErrorRelativeGlobalDimensionlessSynthesis,
)
from torchmetrics.image._deprecated import ( # noqa: E402
_MultiScaleStructuralSimilarityIndexMeasure as MultiScaleStructuralSimilarityIndexMeasure,
)
from torchmetrics.image._deprecated import _PeakSignalNoiseRatio as PeakSignalNoiseRatio # noqa: E402
from torchmetrics.image._deprecated import _RelativeAverageSpectralError as RelativeAverageSpectralError # noqa: E402
from torchmetrics.image._deprecated import ( # noqa: E402
_RootMeanSquaredErrorUsingSlidingWindow as RootMeanSquaredErrorUsingSlidingWindow,
)
from torchmetrics.image._deprecated import _SpectralAngleMapper as SpectralAngleMapper # noqa: E402
from torchmetrics.image._deprecated import _SpectralDistortionIndex as SpectralDistortionIndex # noqa: E402
from torchmetrics.image._deprecated import ( # noqa: E402
_StructuralSimilarityIndexMeasure as StructuralSimilarityIndexMeasure,
)
from torchmetrics.image._deprecated import _TotalVariation as TotalVariation # noqa: E402
from torchmetrics.image._deprecated import _UniversalImageQualityIndex as UniversalImageQualityIndex # noqa: E402
from torchmetrics.metric import Metric # noqa: E402
from torchmetrics.nominal import ( # noqa: E402
CramersV,
FleissKappa,
PearsonsContingencyCoefficient,
TheilsU,
TschuprowsT,
)
from torchmetrics.regression import ( # noqa: E402
ConcordanceCorrCoef,
CosineSimilarity,
ExplainedVariance,
KendallRankCorrCoef,
KLDivergence,
LogCoshError,
MeanAbsoluteError,
MeanAbsolutePercentageError,
MeanSquaredError,
MeanSquaredLogError,
MinkowskiDistance,
PearsonCorrCoef,
R2Score,
RelativeSquaredError,
SpearmanCorrCoef,
SymmetricMeanAbsolutePercentageError,
TweedieDevianceScore,
WeightedMeanAbsolutePercentageError,
)
from torchmetrics.retrieval._deprecated import _RetrievalFallOut as RetrievalFallOut # noqa: E402
from torchmetrics.retrieval._deprecated import _RetrievalHitRate as RetrievalHitRate # noqa: E402
from torchmetrics.retrieval._deprecated import _RetrievalMAP as RetrievalMAP # noqa: E402
from torchmetrics.retrieval._deprecated import _RetrievalMRR as RetrievalMRR # noqa: E402
from torchmetrics.retrieval._deprecated import _RetrievalNormalizedDCG as RetrievalNormalizedDCG # noqa: E402
from torchmetrics.retrieval._deprecated import _RetrievalPrecision as RetrievalPrecision # noqa: E402
from torchmetrics.retrieval._deprecated import ( # noqa: E402
_RetrievalPrecisionRecallCurve as RetrievalPrecisionRecallCurve,
)
from torchmetrics.retrieval._deprecated import _RetrievalRecall as RetrievalRecall # noqa: E402
from torchmetrics.retrieval._deprecated import ( # noqa: E402
_RetrievalRecallAtFixedPrecision as RetrievalRecallAtFixedPrecision,
)
from torchmetrics.retrieval._deprecated import _RetrievalRPrecision as RetrievalRPrecision # noqa: E402
from torchmetrics.text._deprecated import _BLEUScore as BLEUScore # noqa: E402
from torchmetrics.text._deprecated import _CharErrorRate as CharErrorRate # noqa: E402
from torchmetrics.text._deprecated import _CHRFScore as CHRFScore # noqa: E402
from torchmetrics.text._deprecated import _ExtendedEditDistance as ExtendedEditDistance # noqa: E402
from torchmetrics.text._deprecated import _MatchErrorRate as MatchErrorRate # noqa: E402
from torchmetrics.text._deprecated import _Perplexity as Perplexity # noqa: E402
from torchmetrics.text._deprecated import _SacreBLEUScore as SacreBLEUScore # noqa: E402
from torchmetrics.text._deprecated import _SQuAD as SQuAD # noqa: E402
from torchmetrics.text._deprecated import _TranslationEditRate as TranslationEditRate # noqa: E402
from torchmetrics.text._deprecated import _WordErrorRate as WordErrorRate # noqa: E402
from torchmetrics.text._deprecated import _WordInfoLost as WordInfoLost # noqa: E402
from torchmetrics.text._deprecated import _WordInfoPreserved as WordInfoPreserved # noqa: E402
from torchmetrics.wrappers import ( # noqa: E402
BootStrapper,
ClasswiseWrapper,
MetricTracker,
MinMaxMetric,
MultioutputWrapper,
MultitaskWrapper,
)
__all__ = [
"functional",
"Accuracy",
"AUROC",
"AveragePrecision",
"BLEUScore",
"BootStrapper",
"CalibrationError",
"CatMetric",
"ClasswiseWrapper",
"CharErrorRate",
"CHRFScore",
"ConcordanceCorrCoef",
"CohenKappa",
"ConfusionMatrix",
"CosineSimilarity",
"CramersV",
"Dice",
"TweedieDevianceScore",
"ErrorRelativeGlobalDimensionlessSynthesis",
"ExactMatch",
"ExplainedVariance",
"ExtendedEditDistance",
"F1Score",
"FBetaScore",
"FleissKappa",
"HammingDistance",
"HingeLoss",
"JaccardIndex",
"KendallRankCorrCoef",
"KLDivergence",
"LogCoshError",
"MatchErrorRate",
"MatthewsCorrCoef",
"MaxMetric",
"MeanAbsoluteError",
"MeanAbsolutePercentageError",
"MeanMetric",
"MeanSquaredError",
"MeanSquaredLogError",
"Metric",
"MetricCollection",
"MetricTracker",
"MinkowskiDistance",
"MinMaxMetric",
"MinMetric",
"ModifiedPanopticQuality",
"MultioutputWrapper",
"MultitaskWrapper",
"MultiScaleStructuralSimilarityIndexMeasure",
"PanopticQuality",
"PearsonCorrCoef",
"PearsonsContingencyCoefficient",
"PermutationInvariantTraining",
"Perplexity",
"Precision",
"PrecisionAtFixedRecall",
"PrecisionRecallCurve",
"PeakSignalNoiseRatio",
"R2Score",
"Recall",
"RecallAtFixedPrecision",
"RelativeAverageSpectralError",
"RelativeSquaredError",
"RetrievalFallOut",
"RetrievalHitRate",
"RetrievalMAP",
"RetrievalMRR",
"RetrievalNormalizedDCG",
"RetrievalPrecision",
"RetrievalRecall",
"RetrievalRPrecision",
"RetrievalPrecisionRecallCurve",
"RetrievalRecallAtFixedPrecision",
"ROC",
"RootMeanSquaredErrorUsingSlidingWindow",
"RunningMean",
"RunningSum",
"SacreBLEUScore",
"SignalDistortionRatio",
"ScaleInvariantSignalDistortionRatio",
"ScaleInvariantSignalNoiseRatio",
"SignalNoiseRatio",
"SpearmanCorrCoef",
"Specificity",
"SpecificityAtSensitivity",
"SpectralAngleMapper",
"SpectralDistortionIndex",
"SQuAD",
"StructuralSimilarityIndexMeasure",
"StatScores",
"SumMetric",
"SymmetricMeanAbsolutePercentageError",
"TheilsU",
"TotalVariation",
"TranslationEditRate",
"TschuprowsT",
"UniversalImageQualityIndex",
"WeightedMeanAbsolutePercentageError",
"WordErrorRate",
"WordInfoLost",
"WordInfoPreserved",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/utilities/prints.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from functools import partial, wraps
from typing import Any, Callable
from torchmetrics import _logger as log
def rank_zero_only(fn: Callable) -> Callable:
"""Call a function only on rank 0 in distributed settings.
Meant to be used as an decorator.
"""
@wraps(fn)
def wrapped_fn(*args: Any, **kwargs: Any) -> Any:
if rank_zero_only.rank == 0:
return fn(*args, **kwargs)
return None
return wrapped_fn
# add the attribute to the function but don't overwrite in case Trainer has already set it
rank_zero_only.rank = getattr(rank_zero_only, "rank", int(os.environ.get("LOCAL_RANK", 0)))
def _warn(*args: Any, **kwargs: Any) -> None:
warnings.warn(*args, **kwargs) # noqa: B028
def _info(*args: Any, **kwargs: Any) -> None:
log.info(*args, **kwargs)
def _debug(*args: Any, **kwargs: Any) -> None:
log.debug(*args, **kwargs)
rank_zero_debug = rank_zero_only(_debug)
rank_zero_info = rank_zero_only(_info)
rank_zero_warn = rank_zero_only(_warn)
_future_warning = partial(warnings.warn, category=FutureWarning)
def _deprecated_root_import_class(name: str, domain: str) -> None:
"""Warn user that he is importing class from location it has been deprecated."""
_future_warning(
f"Importing `{name}` from `torchmetrics` was deprecated and will be removed in 2.0."
f" Import `{name}` from `torchmetrics.{domain}` instead."
)
def _deprecated_root_import_func(name: str, domain: str) -> None:
"""Warn user that he is importing function from location it has been deprecated."""
_future_warning(
f"Importing `{name}` from `torchmetrics.functional` was deprecated and will be removed in 2.0."
f" Import `{name}` from `torchmetrics.{domain}` instead."
)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/utilities/checks.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import multiprocessing
import os
from functools import partial
from time import perf_counter
from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Tuple, no_type_check
from unittest.mock import Mock
import torch
from torch import Tensor
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import select_topk, to_onehot
from torchmetrics.utilities.enums import DataType
_DOCTEST_DOWNLOAD_TIMEOUT = int(os.environ.get("DOCTEST_DOWNLOAD_TIMEOUT", 120))
_SKIP_SLOW_DOCTEST = bool(os.environ.get("SKIP_SLOW_DOCTEST", 0))
def _check_for_empty_tensors(preds: Tensor, target: Tensor) -> bool:
if preds.numel() == target.numel() == 0:
return True
return False
def _check_same_shape(preds: Tensor, target: Tensor) -> None:
"""Check that predictions and target have the same shape, else raise error."""
if preds.shape != target.shape:
raise RuntimeError(
f"Predictions and targets are expected to have the same shape, but got {preds.shape} and {target.shape}."
)
def _basic_input_validation(
preds: Tensor, target: Tensor, threshold: float, multiclass: Optional[bool], ignore_index: Optional[int]
) -> None:
"""Perform basic validation of inputs that does not require deducing any information of the type of inputs."""
# Skip all other checks if both preds and target are empty tensors
if _check_for_empty_tensors(preds, target):
return
if target.is_floating_point():
raise ValueError("The `target` has to be an integer tensor.")
if (ignore_index is None and target.min() < 0) or (ignore_index and ignore_index >= 0 and target.min() < 0):
raise ValueError("The `target` has to be a non-negative tensor.")
preds_float = preds.is_floating_point()
if not preds_float and preds.min() < 0:
raise ValueError("If `preds` are integers, they have to be non-negative.")
if not preds.shape[0] == target.shape[0]:
raise ValueError("The `preds` and `target` should have the same first dimension.")
if multiclass is False and target.max() > 1:
raise ValueError("If you set `multiclass=False`, then `target` should not exceed 1.")
if multiclass is False and not preds_float and preds.max() > 1:
raise ValueError("If you set `multiclass=False` and `preds` are integers, then `preds` should not exceed 1.")
def _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> Tuple[DataType, int]:
"""Check that the shape and type of inputs are consistent with each other.
The input types needs to be one of allowed input types (see the documentation of docstring of
``_input_format_classification``). It does not check for consistency of number of classes, other functions take
care of that.
It returns the name of the case in which the inputs fall, and the implied number of classes (from the ``C`` dim for
multi-class data, or extra dim(s) for multi-label data).
"""
preds_float = preds.is_floating_point()
if preds.ndim == target.ndim:
if preds.shape != target.shape:
raise ValueError(
"The `preds` and `target` should have the same shape,",
f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.",
)
if preds_float and target.numel() > 0 and target.max() > 1:
raise ValueError(
"If `preds` and `target` are of shape (N, ...) and `preds` are floats, `target` should be binary."
)
# Get the case
if preds.ndim == 1 and preds_float:
case = DataType.BINARY
elif preds.ndim == 1 and not preds_float:
case = DataType.MULTICLASS
elif preds.ndim > 1 and preds_float:
case = DataType.MULTILABEL
else:
case = DataType.MULTIDIM_MULTICLASS
implied_classes = preds[0].numel() if preds.numel() > 0 else 0
elif preds.ndim == target.ndim + 1:
if not preds_float:
raise ValueError("If `preds` have one dimension more than `target`, `preds` should be a float tensor.")
if preds.shape[2:] != target.shape[1:]:
raise ValueError(
"If `preds` have one dimension more than `target`, the shape of `preds` should be"
" (N, C, ...), and the shape of `target` should be (N, ...)."
)
implied_classes = preds.shape[1] if preds.numel() > 0 else 0
case = DataType.MULTICLASS if preds.ndim == 2 else DataType.MULTIDIM_MULTICLASS
else:
raise ValueError(
"Either `preds` and `target` both should have the (same) shape (N, ...), or `target` should be (N, ...)"
" and `preds` should be (N, C, ...)."
)
return case, implied_classes
def _check_num_classes_binary(num_classes: int, multiclass: Optional[bool]) -> None:
"""Check that the consistency of `num_classes` with the data and `multiclass` param for binary data."""
if num_classes > 2:
raise ValueError("Your data is binary, but `num_classes` is larger than 2.")
if num_classes == 2 and not multiclass:
raise ValueError(
"Your data is binary and `num_classes=2`, but `multiclass` is not True."
" Set it to True if you want to transform binary data to multi-class format."
)
if num_classes == 1 and multiclass:
raise ValueError(
"You have binary data and have set `multiclass=True`, but `num_classes` is 1."
" Either set `multiclass=None`(default) or set `num_classes=2`"
" to transform binary data to multi-class format."
)
def _check_num_classes_mc(
preds: Tensor,
target: Tensor,
num_classes: int,
multiclass: Optional[bool],
implied_classes: int,
) -> None:
"""Check consistency of `num_classes`, data and `multiclass` param for (multi-dimensional) multi-class data."""
if num_classes == 1 and multiclass is not False:
raise ValueError(
"You have set `num_classes=1`, but predictions are integers."
" If you want to convert (multi-dimensional) multi-class data with 2 classes"
" to binary/multi-label, set `multiclass=False`."
)
if num_classes > 1:
if multiclass is False and implied_classes != num_classes:
raise ValueError(
"You have set `multiclass=False`, but the implied number of classes "
" (from shape of inputs) does not match `num_classes`. If you are trying to"
" transform multi-dim multi-class data with 2 classes to multi-label, `num_classes`"
" should be either None or the product of the size of extra dimensions (...)."
" See Input Types in Metrics documentation."
)
if target.numel() > 0 and num_classes <= target.max():
raise ValueError("The highest label in `target` should be smaller than `num_classes`.")
if preds.shape != target.shape and num_classes != implied_classes:
raise ValueError("The size of C dimension of `preds` does not match `num_classes`.")
def _check_num_classes_ml(num_classes: int, multiclass: Optional[bool], implied_classes: int) -> None:
"""Check that the consistency of ``num_classes`` with the data and ``multiclass`` param for multi-label data."""
if multiclass and num_classes != 2:
raise ValueError(
"Your have set `multiclass=True`, but `num_classes` is not equal to 2."
" If you are trying to transform multi-label data to 2 class multi-dimensional"
" multi-class, you should set `num_classes` to either 2 or None."
)
if not multiclass and num_classes != implied_classes:
raise ValueError("The implied number of classes (from shape of inputs) does not match num_classes.")
def _check_top_k(top_k: int, case: str, implied_classes: int, multiclass: Optional[bool], preds_float: bool) -> None:
if case == DataType.BINARY:
raise ValueError("You can not use `top_k` parameter with binary data.")
if not isinstance(top_k, int) or top_k <= 0:
raise ValueError("The `top_k` has to be an integer larger than 0.")
if not preds_float:
raise ValueError("You have set `top_k`, but you do not have probability predictions.")
if multiclass is False:
raise ValueError("If you set `multiclass=False`, you can not set `top_k`.")
if case == DataType.MULTILABEL and multiclass:
raise ValueError(
"If you want to transform multi-label data to 2 class multi-dimensional"
"multi-class data using `multiclass=True`, you can not use `top_k`."
)
if top_k >= implied_classes:
raise ValueError("The `top_k` has to be strictly smaller than the `C` dimension of `preds`.")
def _check_classification_inputs(
preds: Tensor,
target: Tensor,
threshold: float,
num_classes: Optional[int],
multiclass: Optional[bool],
top_k: Optional[int],
ignore_index: Optional[int] = None,
) -> DataType:
"""Perform error checking on inputs for classification.
This ensures that preds and target take one of the shape/type combinations that are
specified in ``_input_format_classification`` docstring. It also checks the cases of
over-rides with ``multiclass`` by checking (for multi-class and multi-dim multi-class
cases) that there are only up to 2 distinct labels.
In case where preds are floats (probabilities), it is checked whether they are in ``[0,1]`` interval.
When ``num_classes`` is given, it is checked that it is consistent with input cases (binary,
multi-label, ...), and that, if available, the implied number of classes in the ``C``
dimension is consistent with it (as well as that max label in target is smaller than it).
When ``num_classes`` is not specified in these cases, consistency of the highest target
value against ``C`` dimension is checked for (multi-dimensional) multi-class cases.
If ``top_k`` is set (not None) for inputs that do not have probability predictions (and
are not binary), an error is raised. Similarly, if ``top_k`` is set to a number that
is higher than or equal to the ``C`` dimension of ``preds``, an error is raised.
Preds and target tensors are expected to be squeezed already - all dimensions should be
greater than 1, except perhaps the first one (``N``).
Args:
preds: Tensor with predictions (labels or probabilities)
target: Tensor with ground truth labels, always integers (labels)
threshold:
Threshold value for transforming probability/logit predictions to binary
(0,1) predictions, in the case of binary or multi-label inputs.
num_classes:
Number of classes. If not explicitly set, the number of classes will be inferred
either from the shape of inputs, or the maximum label in the ``target`` and ``preds``
tensor, where applicable.
top_k:
Number of the highest probability entries for each sample to convert to 1s - relevant
only for inputs with probability predictions. The default value (``None``) will be
interpreted as 1 for these inputs. If this parameter is set for multi-label inputs,
it will take precedence over threshold.
Should be left unset (``None``) for inputs with label predictions.
multiclass:
Used only in certain special cases, where you want to treat inputs as a different type
than what they appear to be. See the parameter's
:ref:`documentation section <pages/overview:using the multiclass parameter>`
for a more detailed explanation and examples.
ignore_index: ignore predictions where targets are equal to this number
Return:
case: The case the inputs fall in, one of 'binary', 'multi-class', 'multi-label' or
'multi-dim multi-class'
"""
# Basic validation (that does not need case/type information)
_basic_input_validation(preds, target, threshold, multiclass, ignore_index)
# Check that shape/types fall into one of the cases
case, implied_classes = _check_shape_and_type_consistency(preds, target)
# Check consistency with the `C` dimension in case of multi-class data
if preds.shape != target.shape:
if multiclass is False and implied_classes != 2:
raise ValueError(
"You have set `multiclass=False`, but have more than 2 classes in your data,"
" based on the C dimension of `preds`."
)
if target.max() >= implied_classes:
raise ValueError(
"The highest label in `target` should be smaller than the size of the `C` dimension of `preds`."
)
# Check that num_classes is consistent
if num_classes:
if case == DataType.BINARY:
_check_num_classes_binary(num_classes, multiclass)
elif case in (DataType.MULTICLASS, DataType.MULTIDIM_MULTICLASS):
_check_num_classes_mc(preds, target, num_classes, multiclass, implied_classes)
elif case.MULTILABEL:
_check_num_classes_ml(num_classes, multiclass, implied_classes)
# Check that top_k is consistent
if top_k is not None:
_check_top_k(top_k, case, implied_classes, multiclass, preds.is_floating_point())
return case
def _input_squeeze(
preds: Tensor,
target: Tensor,
) -> Tuple[Tensor, Tensor]:
"""Remove excess dimensions."""
if preds.shape[0] == 1:
preds, target = preds.squeeze().unsqueeze(0), target.squeeze().unsqueeze(0)
else:
preds, target = preds.squeeze(), target.squeeze()
return preds, target
def _input_format_classification(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
top_k: Optional[int] = None,
num_classes: Optional[int] = None,
multiclass: Optional[bool] = None,
ignore_index: Optional[int] = None,
) -> Tuple[Tensor, Tensor, DataType]:
"""Convert preds and target tensors into common format.
Preds and targets are supposed to fall into one of these categories (and are
validated to make sure this is the case):
* Both preds and target are of shape ``(N,)``, and both are integers (multi-class)
* Both preds and target are of shape ``(N,)``, and target is binary, while preds
are a float (binary)
* preds are of shape ``(N, C)`` and are floats, and target is of shape ``(N,)`` and
is integer (multi-class)
* preds and target are of shape ``(N, ...)``, target is binary and preds is a float
(multi-label)
* preds are of shape ``(N, C, ...)`` and are floats, target is of shape ``(N, ...)``
and is integer (multi-dimensional multi-class)
* preds and target are of shape ``(N, ...)`` both are integers (multi-dimensional
multi-class)
To avoid ambiguities, all dimensions of size 1, except the first one, are squeezed out.
The returned output tensors will be binary tensors of the same shape, either ``(N, C)``
of ``(N, C, X)``, the details for each case are described below. The function also returns
a ``case`` string, which describes which of the above cases the inputs belonged to - regardless
of whether this was "overridden" by other settings (like ``multiclass``).
In binary case, targets are normally returned as ``(N,1)`` tensor, while preds are transformed
into a binary tensor (elements become 1 if the probability is greater than or equal to
``threshold`` or 0 otherwise). If ``multiclass=True``, then both targets are preds
become ``(N, 2)`` tensors by a one-hot transformation; with the thresholding being applied to
preds first.
In multi-class case, normally both preds and targets become ``(N, C)`` binary tensors; targets
by a one-hot transformation and preds by selecting ``top_k`` largest entries (if their original
shape was ``(N,C)``). However, if ``multiclass=False``, then targets and preds will be
returned as ``(N,1)`` tensor.
In multi-label case, normally targets and preds are returned as ``(N, C)`` binary tensors, with
preds being binarized as in the binary case. Here the ``C`` dimension is obtained by flattening
all dimensions after the first one. However, if ``multiclass=True``, then both are returned as
``(N, 2, C)``, by an equivalent transformation as in the binary case.
In multi-dimensional multi-class case, normally both target and preds are returned as
``(N, C, X)`` tensors, with ``X`` resulting from flattening of all dimensions except ``N`` and
``C``. The transformations performed here are equivalent to the multi-class case. However, if
``multiclass=False`` (and there are up to two classes), then the data is returned as
``(N, X)`` binary tensors (multi-label).
Note:
Where a one-hot transformation needs to be performed and the number of classes
is not implicitly given by a ``C`` dimension, the new ``C`` dimension will either be
equal to ``num_classes``, if it is given, or the maximum label value in preds and
target.
Args:
preds: Tensor with predictions (labels or probabilities)
target: Tensor with ground truth labels, always integers (labels)
threshold:
Threshold value for transforming probability/logit predictions to binary
(0 or 1) predictions, in the case of binary or multi-label inputs.
num_classes:
Number of classes. If not explicitly set, the number of classes will be inferred
either from the shape of inputs, or the maximum label in the ``target`` and ``preds``
tensor, where applicable.
top_k:
Number of the highest probability entries for each sample to convert to 1s - relevant
only for (multi-dimensional) multi-class inputs with probability predictions. The
default value (``None``) will be interpreted as 1 for these inputs.
Should be left unset (``None``) for all other types of inputs.
multiclass:
Used only in certain special cases, where you want to treat inputs as a different type
than what they appear to be. See the parameter's
:ref:`documentation section <pages/overview:using the multiclass parameter>`
for a more detailed explanation and examples.
ignore_index: ignore predictions where targets are equal to this number
Returns:
preds: binary tensor of shape ``(N, C)`` or ``(N, C, X)``
target: binary tensor of shape ``(N, C)`` or ``(N, C, X)``
case: The case the inputs fall in, one of ``'binary'``, ``'multi-class'``, ``'multi-label'`` or
``'multi-dim multi-class'``
"""
# Remove excess dimensions
preds, target = _input_squeeze(preds, target)
# Convert half precision tensors to full precision, as not all ops are supported
# for example, min() is not supported
if preds.dtype == torch.float16:
preds = preds.float()
case = _check_classification_inputs(
preds,
target,
threshold=threshold,
num_classes=num_classes,
multiclass=multiclass,
top_k=top_k,
ignore_index=ignore_index,
)
if case in (DataType.BINARY, DataType.MULTILABEL) and not top_k:
preds = (preds >= threshold).int()
num_classes = num_classes if not multiclass else 2
if case == DataType.MULTILABEL and top_k:
preds = select_topk(preds, top_k)
if case in (DataType.MULTICLASS, DataType.MULTIDIM_MULTICLASS) or multiclass:
if preds.is_floating_point():
num_classes = preds.shape[1]
preds = select_topk(preds, top_k or 1)
else:
num_classes = num_classes or int(max(preds.max().item(), target.max().item()) + 1)
preds = to_onehot(preds, max(2, num_classes))
target = to_onehot(target, max(2, num_classes))
if multiclass is False:
preds, target = preds[:, 1, ...], target[:, 1, ...]
if not _check_for_empty_tensors(preds, target):
if (case in (DataType.MULTICLASS, DataType.MULTIDIM_MULTICLASS) and multiclass is not False) or multiclass:
target = target.reshape(target.shape[0], target.shape[1], -1)
preds = preds.reshape(preds.shape[0], preds.shape[1], -1)
else:
target = target.reshape(target.shape[0], -1)
preds = preds.reshape(preds.shape[0], -1)
# Some operations above create an extra dimension for MC/binary case - this removes it
if preds.ndim > 2:
preds, target = preds.squeeze(-1), target.squeeze(-1)
return preds.int(), target.int(), case
def _input_format_classification_one_hot(
num_classes: int,
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
multilabel: bool = False,
) -> Tuple[Tensor, Tensor]:
"""Convert preds and target tensors into one hot spare label tensors.
Args:
num_classes: number of classes
preds: either tensor with labels, tensor with probabilities/logits or multilabel tensor
target: tensor with ground-true labels
threshold: float used for thresholding multilabel input
multilabel: boolean flag indicating if input is multilabel
Raises:
ValueError:
If ``preds`` and ``target`` don't have the same number of dimensions
or one additional dimension for ``preds``.
Returns:
preds: one hot tensor of shape [num_classes, -1] with predicted labels
target: one hot tensors of shape [num_classes, -1] with true labels
"""
if preds.ndim not in (target.ndim, target.ndim + 1):
raise ValueError("preds and target must have same number of dimensions, or one additional dimension for preds")
if preds.ndim == target.ndim + 1:
# multi class probabilities
preds = torch.argmax(preds, dim=1)
if preds.ndim == target.ndim and preds.dtype in (torch.long, torch.int) and num_classes > 1 and not multilabel:
# multi-class
preds = to_onehot(preds, num_classes=num_classes)
target = to_onehot(target, num_classes=num_classes)
elif preds.ndim == target.ndim and preds.is_floating_point():
# binary or multilabel probabilities
preds = (preds >= threshold).long()
# transpose class as first dim and reshape
if preds.ndim > 1:
preds = preds.transpose(1, 0)
target = target.transpose(1, 0)
return preds.reshape(num_classes, -1), target.reshape(num_classes, -1)
def _check_retrieval_functional_inputs(
preds: Tensor,
target: Tensor,
allow_non_binary_target: bool = False,
) -> Tuple[Tensor, Tensor]:
"""Check ``preds`` and ``target`` tensors are of the same shape and of the correct data type.
Args:
preds: either tensor with scores/logits
target: tensor with ground true labels
allow_non_binary_target: whether to allow target to contain non-binary values
Raises:
ValueError:
If ``preds`` and ``target`` don't have the same shape, if they are empty
or not of the correct ``dtypes``.
Returns:
preds: as torch.float32
target: as torch.long if not floating point else torch.float32
"""
if preds.shape != target.shape:
raise ValueError("`preds` and `target` must be of the same shape")
if not preds.numel() or not preds.size():
raise ValueError("`preds` and `target` must be non-empty and non-scalar tensors")
return _check_retrieval_target_and_prediction_types(preds, target, allow_non_binary_target=allow_non_binary_target)
def _check_retrieval_inputs(
indexes: Tensor,
preds: Tensor,
target: Tensor,
allow_non_binary_target: bool = False,
ignore_index: Optional[int] = None,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Check ``indexes``, ``preds`` and ``target`` tensors are of the same shape and of the correct data type.
Args:
indexes: tensor with queries indexes
preds: tensor with scores/logits
target: tensor with ground true labels
allow_non_binary_target: whether to allow target to contain non-binary values
ignore_index: ignore predictions where targets are equal to this number
Raises:
ValueError:
If ``preds`` and ``target`` don't have the same shape, if they are empty or not of the correct ``dtypes``.
Returns:
indexes: as ``torch.long``
preds: as ``torch.float32``
target: as ``torch.long``
"""
if indexes.shape != preds.shape or preds.shape != target.shape:
raise ValueError("`indexes`, `preds` and `target` must be of the same shape")
if indexes.dtype is not torch.long:
raise ValueError("`indexes` must be a tensor of long integers")
# remove predictions where target is equal to `ignore_index`
if ignore_index is not None:
valid_positions = target != ignore_index
indexes, preds, target = indexes[valid_positions], preds[valid_positions], target[valid_positions]
if not indexes.numel() or not indexes.size():
raise ValueError(
"`indexes`, `preds` and `target` must be non-empty and non-scalar tensors",
)
preds, target = _check_retrieval_target_and_prediction_types(
preds, target, allow_non_binary_target=allow_non_binary_target
)
return indexes.long().flatten(), preds, target
def _check_retrieval_target_and_prediction_types(
preds: Tensor,
target: Tensor,
allow_non_binary_target: bool = False,
) -> Tuple[Tensor, Tensor]:
"""Check ``preds`` and ``target`` tensors are of the same shape and of the correct data type.
Args:
preds: either tensor with scores/logits
target: tensor with ground true labels
allow_non_binary_target: whether to allow target to contain non-binary values
Raises:
ValueError:
If ``preds`` and ``target`` don't have the same shape, if they are empty or not of the correct ``dtypes``.
"""
if target.dtype not in (torch.bool, torch.long, torch.int) and not torch.is_floating_point(target):
raise ValueError("`target` must be a tensor of booleans, integers or floats")
if not preds.is_floating_point():
raise ValueError("`preds` must be a tensor of floats")
if not allow_non_binary_target and (target.max() > 1 or target.min() < 0):
raise ValueError("`target` must contain `binary` values")
target = target.float() if target.is_floating_point() else target.long()
preds = preds.float()
return preds.flatten(), target.flatten()
def _allclose_recursive(res1: Any, res2: Any, atol: float = 1e-6) -> bool:
"""Recursively asserting that two results are within a certain tolerance."""
# single output compare
if isinstance(res1, Tensor):
return torch.allclose(res1, res2, atol=atol)
if isinstance(res1, str):
return res1 == res2
if isinstance(res1, Sequence):
return all(_allclose_recursive(r1, r2) for r1, r2 in zip(res1, res2))
if isinstance(res1, Mapping):
return all(_allclose_recursive(res1[k], res2[k]) for k in res1)
return res1 == res2
@no_type_check
def check_forward_full_state_property(
metric_class: Metric,
init_args: Optional[Dict[str, Any]] = None,
input_args: Optional[Dict[str, Any]] = None,
num_update_to_compare: Sequence[int] = [10, 100, 1000],
reps: int = 5,
) -> None:
"""Check if the new ``full_state_update`` property works as intended.
This function checks if the property can safely be set to ``False`` which will for most metrics results in a
speedup when using ``forward``.
Args:
metric_class: metric class object that should be checked
init_args: dict containing arguments for initializing the metric class
input_args: dict containing arguments to pass to ``forward``
num_update_to_compare: if we successfully detect that the flag is safe to set to ``False``
we will run some speedup test. This arg should be a list of integers for how many
steps to compare over.
reps: number of repetitions of speedup test
Example (states in ``update`` are independent, save to set ``full_state_update=False``)
>>> from torchmetrics.classification import MulticlassConfusionMatrix
>>> check_forward_full_state_property( # doctest: +SKIP
... MulticlassConfusionMatrix,
... init_args = {'num_classes': 3},
... input_args = {'preds': torch.randint(3, (100,)), 'target': torch.randint(3, (100,))},
... )
Full state for 10 steps took: ...
Partial state for 10 steps took: ...
Full state for 100 steps took: ...
Partial state for 100 steps took: ...
Full state for 1000 steps took: ...
Partial state for 1000 steps took: ...
Recommended setting `full_state_update=False`
Example (states in ``update`` are dependent meaning that ``full_state_update=True``):
>>> from torchmetrics.classification import MulticlassConfusionMatrix
>>> class MyMetric(MulticlassConfusionMatrix):
... def update(self, preds, target):
... super().update(preds, target)
... # by construction make future states dependent on prior states
... if self.confmat.sum() > 20:
... self.reset()
>>> check_forward_full_state_property(
... MyMetric,
... init_args = {'num_classes': 3},
... input_args = {'preds': torch.randint(3, (10,)), 'target': torch.randint(3, (10,))},
... )
Recommended setting `full_state_update=True`
"""
init_args = init_args or {}
input_args = input_args or {}
class FullState(metric_class):
full_state_update = True
class PartState(metric_class):
full_state_update = False
fullstate = FullState(**init_args)
partstate = PartState(**init_args)
equal = True
try: # if it fails, the code most likely need access to the full state
for _ in range(num_update_to_compare[0]):
equal = equal & _allclose_recursive(fullstate(**input_args), partstate(**input_args))
except RuntimeError:
equal = False
res1 = fullstate.compute()
try: # if it fails, the code most likely need access to the full state
res2 = partstate.compute()
except RuntimeError:
equal = False
equal = equal & _allclose_recursive(res1, res2)
if not equal: # we can stop early because the results did not match
print("Recommended setting `full_state_update=True`")
return
# Do timings
res = torch.zeros(2, len(num_update_to_compare), reps)
for i, metric in enumerate([fullstate, partstate]):
for j, t in enumerate(num_update_to_compare):
for r in range(reps):
start = perf_counter()
for _ in range(t):
_ = metric(**input_args)
end = perf_counter()
res[i, j, r] = end - start
metric.reset()
mean = torch.mean(res, -1)
std = torch.std(res, -1)
for t in range(len(num_update_to_compare)):
print(f"Full state for {num_update_to_compare[t]} steps took: {mean[0, t]}+-{std[0, t]:0.3f}")
print(f"Partial state for {num_update_to_compare[t]} steps took: {mean[1, t]:0.3f}+-{std[1, t]:0.3f}")
faster = (mean[1, -1] < mean[0, -1]).item() # if faster on average, we recommend upgrading
print(f"Recommended setting `full_state_update={not faster}`")
return
def is_overridden(method_name: str, instance: object, parent: object) -> bool:
"""Check if a method has been overridden by an instance compared to its parent class."""
instance_attr = getattr(instance, method_name, None)
if instance_attr is None:
return False
# `functools.wraps()` support
if hasattr(instance_attr, "__wrapped__"):
instance_attr = instance_attr.__wrapped__
# `Mock(wraps=...)` support
if isinstance(instance_attr, Mock):
# access the wrapped function
instance_attr = instance_attr._mock_wraps
# `partial` support
elif isinstance(instance_attr, partial):
instance_attr = instance_attr.func
if instance_attr is None:
return False
parent_attr = getattr(parent, method_name, None)
if parent_attr is None:
raise ValueError("The parent should define the method")
return instance_attr.__code__ != parent_attr.__code__
def _try_proceed_with_timeout(fn: Callable, timeout: int = _DOCTEST_DOWNLOAD_TIMEOUT) -> bool:
"""Check if a certain function is taking too long to execute.
Function will only be executed if running inside a doctest context. Currently does not support Windows.
Args:
fn: function to check
timeout: timeout for function
Returns:
Bool indicating if the function finished within the specified timeout
"""
# source: https://stackoverflow.com/a/14924210/4521646
proc = multiprocessing.Process(target=fn)
logging.debug(f"try to run `{fn.__name__}` for {timeout}s...")
proc.start()
# Wait for N seconds or until process finishes
proc.join(timeout)
# If thread is still active
if not proc.is_alive():
return True
logging.warning(f"running `{fn.__name__}`... let's kill it...")
# Terminate - may not work if process is stuck for good
proc.terminate()
# OR Kill - will work for sure, no chance for process to finish nicely however
# p.kill()
proc.join()
return False
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/utilities/distributed.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional
import torch
from torch import Tensor
from torch.nn import functional as F # noqa: N812
from typing_extensions import Literal
def reduce(x: Tensor, reduction: Literal["elementwise_mean", "sum", "none", None]) -> Tensor:
"""Reduces a given tensor by a given reduction method.
Args:
x: the tensor, which shall be reduced
reduction: a string specifying the reduction method ('elementwise_mean', 'none', 'sum')
Return:
reduced Tensor
Raise:
ValueError if an invalid reduction parameter was given
"""
if reduction == "elementwise_mean":
return torch.mean(x)
if reduction == "none" or reduction is None:
return x
if reduction == "sum":
return torch.sum(x)
raise ValueError("Reduction parameter unknown.")
def class_reduce(
num: Tensor,
denom: Tensor,
weights: Tensor,
class_reduction: Literal["micro", "macro", "weighted", "none", None] = "none",
) -> Tensor:
"""Reduce classification metrics of the form ``num / denom * weights``.
For example for calculating standard accuracy the num would be number of true positives per class, denom would be
the support per class, and weights would be a tensor of 1s.
Args:
num: numerator tensor
denom: denominator tensor
weights: weights for each class
class_reduction: reduction method for multiclass problems:
- ``'micro'``: calculate metrics globally (default)
- ``'macro'``: calculate metrics for each label, and find their unweighted mean.
- ``'weighted'``: calculate metrics for each label, and find their weighted mean.
- ``'none'`` or ``None``: returns calculated metric per class
Raises:
ValueError:
If ``class_reduction`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"none"`` or ``None``.
"""
valid_reduction = ("micro", "macro", "weighted", "none", None)
fraction = torch.sum(num) / torch.sum(denom) if class_reduction == "micro" else num / denom
# We need to take care of instances where the denom can be 0
# for some (or all) classes which will produce nans
fraction[fraction != fraction] = 0
if class_reduction == "micro":
return fraction
if class_reduction == "macro":
return torch.mean(fraction)
if class_reduction == "weighted":
return torch.sum(fraction * (weights.float() / torch.sum(weights)))
if class_reduction == "none" or class_reduction is None:
return fraction
raise ValueError(f"Reduction parameter {class_reduction} unknown. Choose between one of these: {valid_reduction}")
def _simple_gather_all_tensors(result: Tensor, group: Any, world_size: int) -> List[Tensor]:
gathered_result = [torch.zeros_like(result) for _ in range(world_size)]
torch.distributed.all_gather(gathered_result, result, group)
return gathered_result
def gather_all_tensors(result: Tensor, group: Optional[Any] = None) -> List[Tensor]:
"""Gather all tensors from several ddp processes onto a list that is broadcasted to all processes.
Works on tensors that have the same number of dimensions, but where each dimension may differ. In this case
tensors are padded, gathered and then trimmed to secure equal workload for all processes.
Args:
result: the value to sync
group: the process group to gather results from. Defaults to all processes (world)
Return:
list with size equal to the process group where element i corresponds to result tensor from process i
"""
if group is None:
group = torch.distributed.group.WORLD
# convert tensors to contiguous format
result = result.contiguous()
world_size = torch.distributed.get_world_size(group)
torch.distributed.barrier(group=group)
# if the tensor is scalar, things are easy
if result.ndim == 0:
return _simple_gather_all_tensors(result, group, world_size)
# 1. Gather sizes of all tensors
local_size = torch.tensor(result.shape, device=result.device)
local_sizes = [torch.zeros_like(local_size) for _ in range(world_size)]
torch.distributed.all_gather(local_sizes, local_size, group=group)
max_size = torch.stack(local_sizes).max(dim=0).values
all_sizes_equal = all(all(ls == max_size) for ls in local_sizes)
# 2. If shapes are all the same, then do a simple gather:
if all_sizes_equal:
return _simple_gather_all_tensors(result, group, world_size)
# 3. If not, we need to pad each local tensor to maximum size, gather and then truncate
pad_dims = []
pad_by = (max_size - local_size).detach().cpu()
for val in reversed(pad_by):
pad_dims.append(0)
pad_dims.append(val.item())
result_padded = F.pad(result, pad_dims)
gathered_result = [torch.zeros_like(result_padded) for _ in range(world_size)]
torch.distributed.all_gather(gathered_result, result_padded, group)
for idx, item_size in enumerate(local_sizes):
slice_param = [slice(dim_size) for dim_size in item_size]
gathered_result[idx] = gathered_result[idx][slice_param]
return gathered_result
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/utilities/enums.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Type
from lightning_utilities.core.enums import StrEnum
from typing_extensions import Literal
class EnumStr(StrEnum):
"""Base Enum."""
@staticmethod
def _name() -> str:
return "Task"
@classmethod
def from_str(cls: Type["EnumStr"], value: str, source: Literal["key", "value", "any"] = "key") -> "EnumStr":
"""Load from string.
Raises:
ValueError:
If required value is not among the supported options.
>>> class MyEnum(EnumStr):
... a = "aaa"
... b = "bbb"
>>> MyEnum.from_str("a")
<MyEnum.a: 'aaa'>
>>> MyEnum.from_str("c")
Traceback (most recent call last):
...
ValueError: Invalid Task: expected one of ['a', 'b'], but got c.
"""
try:
me = super().from_str(value.replace("-", "_"), source=source)
except ValueError as err:
_allowed_im = [m.lower() for m in cls._member_names_]
raise ValueError(
f"Invalid {cls._name()}: expected one of {cls._allowed_matches(source)}, but got {value}."
) from err
return cls(me)
class DataType(EnumStr):
"""Enum to represent data type.
>>> "Binary" in list(DataType)
True
"""
@staticmethod
def _name() -> str:
return "Data type"
BINARY = "binary"
MULTILABEL = "multi-label"
MULTICLASS = "multi-class"
MULTIDIM_MULTICLASS = "multi-dim multi-class"
class AverageMethod(EnumStr):
"""Enum to represent average method.
>>> None in list(AverageMethod)
True
>>> AverageMethod.NONE == None
True
>>> AverageMethod.NONE == 'none'
True
"""
@staticmethod
def _name() -> str:
return "Average method"
MICRO = "micro"
MACRO = "macro"
WEIGHTED = "weighted"
NONE = None
SAMPLES = "samples"
class MDMCAverageMethod(EnumStr):
"""Enum to represent multi-dim multi-class average method."""
@staticmethod
def _name() -> str:
return "MDMC Average method"
GLOBAL = "global"
SAMPLEWISE = "samplewise"
class ClassificationTask(EnumStr):
"""Enum to represent the different tasks in classification metrics.
>>> "binary" in list(ClassificationTask)
True
"""
@staticmethod
def _name() -> str:
return "Classification"
BINARY = "binary"
MULTICLASS = "multiclass"
MULTILABEL = "multilabel"
class ClassificationTaskNoBinary(EnumStr):
"""Enum to represent the different tasks in classification metrics.
>>> "binary" in list(ClassificationTaskNoBinary)
False
"""
@staticmethod
def _name() -> str:
return "Classification"
MULTILABEL = "multilabel"
MULTICLASS = "multiclass"
class ClassificationTaskNoMultilabel(EnumStr):
"""Enum to represent the different tasks in classification metrics.
>>> "multilabel" in list(ClassificationTaskNoMultilabel)
False
"""
@staticmethod
def _name() -> str:
return "Classification"
BINARY = "binary"
MULTICLASS = "multiclass"
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/utilities/exceptions.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TorchMetricsUserError(Exception):
"""Error used to inform users of a wrong combination of Metric API calls."""
class TorchMetricsUserWarning(Warning):
"""Error used to inform users of specific warnings due to the torchmetrics API."""
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/utilities/plot.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import product
from math import ceil, floor, sqrt
from typing import Any, Dict, Generator, List, Optional, Sequence, Tuple, Union, no_type_check
import numpy as np
import torch
from torch import Tensor
from torchmetrics.utilities.imports import _LATEX_AVAILABLE, _MATPLOTLIB_AVAILABLE, _SCIENCEPLOT_AVAILABLE
if _MATPLOTLIB_AVAILABLE:
import matplotlib
import matplotlib.axes
import matplotlib.pyplot as plt
_PLOT_OUT_TYPE = Tuple[plt.Figure, Union[matplotlib.axes.Axes, np.ndarray]]
_AX_TYPE = matplotlib.axes.Axes
style_change = plt.style.context
else:
_PLOT_OUT_TYPE = Tuple[object, object] # type: ignore[misc]
_AX_TYPE = object
from contextlib import contextmanager
@contextmanager
def style_change(*args: Any, **kwargs: Any) -> Generator:
"""No-ops decorator if matplotlib is not installed."""
yield
if _SCIENCEPLOT_AVAILABLE:
import scienceplots # noqa: F401
_style = ["science", "no-latex"]
_style = ["science"] if _SCIENCEPLOT_AVAILABLE and _LATEX_AVAILABLE else ["default"]
def _error_on_missing_matplotlib() -> None:
"""Raise error if matplotlib is not installed."""
if not _MATPLOTLIB_AVAILABLE:
raise ModuleNotFoundError(
"Plot function expects `matplotlib` to be installed. Please install with `pip install matplotlib`"
)
@style_change(_style)
def plot_single_or_multi_val(
val: Union[Tensor, Sequence[Tensor], Dict[str, Tensor], Sequence[Dict[str, Tensor]]],
ax: Optional[_AX_TYPE] = None, # type: ignore[valid-type]
higher_is_better: Optional[bool] = None,
lower_bound: Optional[float] = None,
upper_bound: Optional[float] = None,
legend_name: Optional[str] = None,
name: Optional[str] = None,
) -> _PLOT_OUT_TYPE:
"""Plot a single metric value or multiple, including bounds of value if existing.
Args:
val: A single tensor with one or multiple values (multiclass/label/output format) or a list of such tensors.
If a list is provided the values are interpreted as a time series of evolving values.
ax: Axis from a figure.
higher_is_better: Indicates if a label indicating where the optimal value it should be added to the figure
lower_bound: lower value that the metric can take
upper_bound: upper value that the metric can take
legend_name: for class based metrics specify the legend prefix e.g. Class or Label to use when multiple values
are provided
name: Name of the metric to use for the y-axis label
Returns:
A tuple consisting of the figure and respective ax objects of the generated figure
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
"""
_error_on_missing_matplotlib()
fig, ax = plt.subplots() if ax is None else (None, ax)
ax.get_xaxis().set_visible(False)
if isinstance(val, Tensor):
if val.numel() == 1:
ax.plot([val.detach().cpu()], marker="o", markersize=10)
else:
for i, v in enumerate(val):
label = f"{legend_name} {i}" if legend_name else f"{i}"
ax.plot(i, v.detach().cpu(), marker="o", markersize=10, linestyle="None", label=label)
elif isinstance(val, dict):
for i, (k, v) in enumerate(val.items()):
if v.numel() != 1:
ax.plot(v.detach().cpu(), marker="o", markersize=10, linestyle="-", label=k)
ax.get_xaxis().set_visible(True)
ax.set_xlabel("Step")
ax.set_xticks(torch.arange(len(v)))
else:
ax.plot(i, v.detach().cpu(), marker="o", markersize=10, label=k)
elif isinstance(val, Sequence):
n_steps = len(val)
if isinstance(val[0], dict):
val = {k: torch.stack([val[i][k] for i in range(n_steps)]) for k in val[0]} # type: ignore
for k, v in val.items():
ax.plot(v.detach().cpu(), marker="o", markersize=10, linestyle="-", label=k)
else:
val = torch.stack(val, 0) # type: ignore
multi_series = val.ndim != 1
val = val.T if multi_series else val.unsqueeze(0)
for i, v in enumerate(val):
label = (f"{legend_name} {i}" if legend_name else f"{i}") if multi_series else ""
ax.plot(v.detach().cpu(), marker="o", markersize=10, linestyle="-", label=label)
ax.get_xaxis().set_visible(True)
ax.set_xlabel("Step")
ax.set_xticks(torch.arange(n_steps))
else:
raise ValueError("Got unknown format for argument `val`.")
handles, labels = ax.get_legend_handles_labels()
if handles and labels:
ax.legend(handles, labels, loc="upper center", bbox_to_anchor=(0.5, 1.15), ncol=3, fancybox=True, shadow=True)
ylim = ax.get_ylim()
if lower_bound is not None and upper_bound is not None:
factor = 0.1 * (upper_bound - lower_bound)
else:
factor = 0.1 * (ylim[1] - ylim[0])
ax.set_ylim(
bottom=lower_bound - factor if lower_bound is not None else ylim[0] - factor,
top=upper_bound + factor if upper_bound is not None else ylim[1] + factor,
)
ax.grid(True)
ax.set_ylabel(name if name is not None else None)
xlim = ax.get_xlim()
factor = 0.1 * (xlim[1] - xlim[0])
y_lines = []
if lower_bound is not None:
y_lines.append(lower_bound)
if upper_bound is not None:
y_lines.append(upper_bound)
ax.hlines(y_lines, xlim[0], xlim[1], linestyles="dashed", colors="k")
if higher_is_better is not None:
if lower_bound is not None and not higher_is_better:
ax.set_xlim(xlim[0] - factor, xlim[1])
ax.text(
xlim[0], lower_bound, s="Optimal \n value", horizontalalignment="center", verticalalignment="center"
)
if upper_bound is not None and higher_is_better:
ax.set_xlim(xlim[0] - factor, xlim[1])
ax.text(
xlim[0], upper_bound, s="Optimal \n value", horizontalalignment="center", verticalalignment="center"
)
return fig, ax
def _get_col_row_split(n: int) -> Tuple[int, int]:
"""Split `n` figures into `rows` x `cols` figures."""
nsq = sqrt(n)
if int(nsq) == nsq: # square number
return int(nsq), int(nsq)
if floor(nsq) * ceil(nsq) >= n:
return floor(nsq), ceil(nsq)
return ceil(nsq), ceil(nsq)
def trim_axs(axs: Union[_AX_TYPE, np.ndarray], nb: int) -> Union[np.ndarray, _AX_TYPE]: # type: ignore[valid-type]
"""Reduce `axs` to `nb` Axes.
All further Axes are removed from the figure.
"""
if isinstance(axs, _AX_TYPE):
return axs
axs = axs.flat # type: ignore[union-attr]
for ax in axs[nb:]:
ax.remove()
return axs[:nb]
@style_change(_style)
@no_type_check
def plot_confusion_matrix(
confmat: Tensor,
ax: Optional[_AX_TYPE] = None,
add_text: bool = True,
labels: Optional[List[Union[int, str]]] = None,
) -> _PLOT_OUT_TYPE:
"""Plot an confusion matrix.
Inspired by: https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/metrics/_plot/confusion_matrix.py.
Works for both binary, multiclass and multilabel confusion matrices.
Args:
confmat: the confusion matrix. Either should be an [N,N] matrix in the binary and multiclass cases or an
[N, 2, 2] matrix for multilabel classification
ax: Axis from a figure. If not provided, a new figure and axis will be created
add_text: if text should be added to each cell with the given value
labels: labels to add the x- and y-axis
Returns:
A tuple consisting of the figure and respective ax objects (or array of ax objects) of the generated figure
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
"""
_error_on_missing_matplotlib()
if confmat.ndim == 3: # multilabel
nb, n_classes = confmat.shape[0], 2
rows, cols = _get_col_row_split(nb)
else:
nb, n_classes, rows, cols = 1, confmat.shape[0], 1, 1
if labels is not None and confmat.ndim != 3 and len(labels) != n_classes:
raise ValueError(
"Expected number of elements in arg `labels` to match number of labels in confmat but "
f"got {len(labels)} and {n_classes}"
)
if confmat.ndim == 3:
fig_label = labels or np.arange(nb)
labels = list(map(str, range(n_classes)))
else:
fig_label = None
labels = labels or np.arange(n_classes).tolist()
fig, axs = plt.subplots(nrows=rows, ncols=cols) if ax is None else (ax.get_figure(), ax)
axs = trim_axs(axs, nb)
for i in range(nb):
ax = axs[i] if rows != 1 and cols != 1 else axs
if fig_label is not None:
ax.set_title(f"Label {fig_label[i]}", fontsize=15)
ax.imshow(confmat[i].cpu().detach() if confmat.ndim == 3 else confmat.cpu().detach())
ax.set_xlabel("Predicted class", fontsize=15)
ax.set_ylabel("True class", fontsize=15)
ax.set_xticks(list(range(n_classes)))
ax.set_yticks(list(range(n_classes)))
ax.set_xticklabels(labels, rotation=45, fontsize=10)
ax.set_yticklabels(labels, rotation=25, fontsize=10)
if add_text:
for ii, jj in product(range(n_classes), range(n_classes)):
val = confmat[i, ii, jj] if confmat.ndim == 3 else confmat[ii, jj]
ax.text(jj, ii, str(val.item()), ha="center", va="center", fontsize=15)
return fig, axs
@style_change(_style)
def plot_curve(
curve: Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]],
score: Optional[Tensor] = None,
ax: Optional[_AX_TYPE] = None, # type: ignore[valid-type]
label_names: Optional[Tuple[str, str]] = None,
legend_name: Optional[str] = None,
name: Optional[str] = None,
) -> _PLOT_OUT_TYPE:
"""Inspired by: https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/metrics/_plot/roc_curve.py.
Plots a curve object
Args:
curve: a tuple of (x, y, t) where x and y are the coordinates of the curve and t are the thresholds used
to compute the curve
score: optional area under the curve added as label to the plot
ax: Axis from a figure
label_names: Tuple containing the names of the x and y axis
legend_name: Name of the curve to be used in the legend
name: Custom name to describe the metric
Returns:
A tuple consisting of the figure and respective ax objects (or array of ax objects) of the generated figure
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
ValueError:
If `curve` does not have 3 elements, being in the wrong format
"""
if len(curve) < 2:
raise ValueError("Expected 2 or 3 elements in curve but got {len(curve)}")
x, y = curve[:2]
_error_on_missing_matplotlib()
fig, ax = plt.subplots() if ax is None else (None, ax)
if isinstance(x, Tensor) and isinstance(y, Tensor) and x.ndim == 1 and y.ndim == 1:
label = f"AUC={score.item():0.3f}" if score is not None else None
ax.plot(x.detach().cpu(), y.detach().cpu(), linestyle="-", linewidth=2, label=label)
if label_names is not None:
ax.set_xlabel(label_names[0])
ax.set_ylabel(label_names[1])
if label is not None:
ax.legend()
elif (isinstance(x, list) and isinstance(y, list)) or (
isinstance(x, Tensor) and isinstance(y, Tensor) and x.ndim == 2 and y.ndim == 2
):
for i, (x_, y_) in enumerate(zip(x, y)):
label = f"{legend_name}_{i}" if legend_name is not None else str(i)
label += f" AUC={score[i].item():0.3f}" if score is not None else ""
ax.plot(x_.detach().cpu(), y_.detach().cpu(), label=label)
ax.legend()
else:
raise ValueError(
f"Unknown format for argument `x` and `y`. Expected either list or tensors but got {type(x)} and {type(y)}."
)
ax.grid(True)
ax.set_title(name)
return fig, ax
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/utilities/compute.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from torch import Tensor
def _safe_matmul(x: Tensor, y: Tensor) -> Tensor:
"""Safe calculation of matrix multiplication.
If input is float16, will cast to float32 for computation and back again.
"""
if x.dtype == torch.float16 or y.dtype == torch.float16:
return (x.float() @ y.T.float()).half()
return x @ y.T
def _safe_xlogy(x: Tensor, y: Tensor) -> Tensor:
"""Compute x * log(y). Returns 0 if x=0.
Example:
>>> import torch
>>> x = torch.zeros(1)
>>> _safe_xlogy(x, 1/x)
tensor([0.])
"""
res = x * torch.log(y)
res[x == 0] = 0.0
return res
def _safe_divide(num: Tensor, denom: Tensor) -> Tensor:
"""Safe division, by preventing division by zero.
Additionally casts to float if input is not already to secure backwards compatibility.
"""
denom[denom == 0.0] = 1
num = num if num.is_floating_point() else num.float()
denom = denom if denom.is_floating_point() else denom.float()
return num / denom
def _adjust_weights_safe_divide(
score: Tensor, average: Optional[str], multilabel: bool, tp: Tensor, fp: Tensor, fn: Tensor
) -> Tensor:
if average is None or average == "none":
return score
if average == "weighted":
weights = tp + fn
else:
weights = torch.ones_like(score)
if not multilabel:
weights[tp + fp + fn == 0] = 0.0
return _safe_divide(weights * score, weights.sum(-1, keepdim=True)).sum(-1)
def _auc_format_inputs(x: Tensor, y: Tensor) -> Tuple[Tensor, Tensor]:
"""Check that auc input is correct."""
x = x.squeeze() if x.ndim > 1 else x
y = y.squeeze() if y.ndim > 1 else y
if x.ndim > 1 or y.ndim > 1:
raise ValueError(
f"Expected both `x` and `y` tensor to be 1d, but got tensors with dimension {x.ndim} and {y.ndim}"
)
if x.numel() != y.numel():
raise ValueError(
f"Expected the same number of elements in `x` and `y` tensor but received {x.numel()} and {y.numel()}"
)
return x, y
def _auc_compute_without_check(x: Tensor, y: Tensor, direction: float, axis: int = -1) -> Tensor:
"""Compute area under the curve using the trapezoidal rule.
Assumes increasing or decreasing order of `x`.
"""
with torch.no_grad():
auc_: Tensor = torch.trapz(y, x, dim=axis) * direction
return auc_
def _auc_compute(x: Tensor, y: Tensor, reorder: bool = False) -> Tensor:
with torch.no_grad():
if reorder:
x, x_idx = torch.sort(x, stable=True)
y = y[x_idx]
dx = x[1:] - x[:-1]
if (dx < 0).any():
if (dx <= 0).all():
direction = -1.0
else:
raise ValueError(
"The `x` tensor is neither increasing or decreasing. Try setting the reorder argument to `True`."
)
else:
direction = 1.0
return _auc_compute_without_check(x, y, direction)
def auc(x: Tensor, y: Tensor, reorder: bool = False) -> Tensor:
"""Compute Area Under the Curve (AUC) using the trapezoidal rule.
Args:
x: x-coordinates, must be either increasing or decreasing
y: y-coordinates
reorder: if True, will reorder the arrays to make it either increasing or decreasing
Return:
Tensor containing AUC score
"""
x, y = _auc_format_inputs(x, y)
return _auc_compute(x, y, reorder=reorder)
def interp(x: Tensor, xp: Tensor, fp: Tensor) -> Tensor:
"""One-dimensional linear interpolation for monotonically increasing sample points.
Returns the one-dimensional piecewise linear interpolant to a function with
given discrete data points :math:`(xp, fp)`, evaluated at :math:`x`.
Adjusted version of this https://github.com/pytorch/pytorch/issues/50334#issuecomment-1000917964
Args:
x: the :math:`x`-coordinates at which to evaluate the interpolated values.
xp: the :math:`x`-coordinates of the data points, must be increasing.
fp: the :math:`y`-coordinates of the data points, same length as `xp`.
Returns:
the interpolated values, same size as `x`.
"""
m = _safe_divide(fp[1:] - fp[:-1], xp[1:] - xp[:-1])
b = fp[:-1] - (m * xp[:-1])
indices = torch.sum(torch.ge(x[:, None], xp[None, :]), 1) - 1
indices = torch.clamp(indices, 0, len(m) - 1)
return m[indices] * x + b[indices]
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/utilities/imports.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import utilities."""
import operator
import shutil
import sys
from typing import Optional
from lightning_utilities.core.imports import compare_version, package_available
from packaging.version import Version, parse
_PYTHON_VERSION = ".".join(map(str, [sys.version_info.major, sys.version_info.minor, sys.version_info.micro]))
_PYTHON_LOWER_3_8 = parse(_PYTHON_VERSION) < Version("3.8")
_TORCH_LOWER_2_0: Optional[bool] = compare_version("torch", operator.lt, "2.0.0")
_TORCH_GREATER_EQUAL_1_11: Optional[bool] = compare_version("torch", operator.ge, "1.11.0")
_TORCH_GREATER_EQUAL_1_12: Optional[bool] = compare_version("torch", operator.ge, "1.12.0")
_TORCH_GREATER_EQUAL_1_13: Optional[bool] = compare_version("torch", operator.ge, "1.13.0")
_TORCH_GREATER_EQUAL_2_0: Optional[bool] = compare_version("torch", operator.ge, "2.0.0")
_TORCH_GREATER_EQUAL_2_1: Optional[bool] = compare_version("torch", operator.ge, "2.1.0")
_JIWER_AVAILABLE: bool = package_available("jiwer")
_NLTK_AVAILABLE: bool = package_available("nltk")
_ROUGE_SCORE_AVAILABLE: bool = package_available("rouge_score")
_BERTSCORE_AVAILABLE: bool = package_available("bert_score")
_SCIPY_AVAILABLE: bool = package_available("scipy")
_SCIPY_GREATER_EQUAL_1_8 = compare_version("scipy", operator.ge, "1.8.0")
_TORCH_FIDELITY_AVAILABLE: bool = package_available("torch_fidelity")
_LPIPS_AVAILABLE: bool = package_available("lpips")
_PYCOCOTOOLS_AVAILABLE: bool = package_available("pycocotools")
_TORCHVISION_AVAILABLE: bool = package_available("torchvision")
_TORCHVISION_GREATER_EQUAL_0_8: Optional[bool] = compare_version("torchvision", operator.ge, "0.8.0")
_TORCHVISION_GREATER_EQUAL_0_13: Optional[bool] = compare_version("torchvision", operator.ge, "0.13.0")
_TQDM_AVAILABLE: bool = package_available("tqdm")
_TRANSFORMERS_AVAILABLE: bool = package_available("transformers")
_TRANSFORMERS_GREATER_EQUAL_4_4: Optional[bool] = compare_version("transformers", operator.ge, "4.4.0")
_TRANSFORMERS_GREATER_EQUAL_4_10: Optional[bool] = compare_version("transformers", operator.ge, "4.10.0")
_PESQ_AVAILABLE: bool = package_available("pesq")
_GAMMATONE_AVAILABLE: bool = package_available("gammatone")
_TORCHAUDIO_AVAILABLE: bool = package_available("torchaudio")
_TORCHAUDIO_GREATER_EQUAL_0_10: Optional[bool] = compare_version("torchaudio", operator.ge, "0.10.0")
_SACREBLEU_AVAILABLE: bool = package_available("sacrebleu")
_REGEX_AVAILABLE: bool = package_available("regex")
_PYSTOI_AVAILABLE: bool = package_available("pystoi")
_FAST_BSS_EVAL_AVAILABLE: bool = package_available("fast_bss_eval")
_MATPLOTLIB_AVAILABLE: bool = package_available("matplotlib")
_SCIENCEPLOT_AVAILABLE: bool = package_available("scienceplots")
_MULTIPROCESSING_AVAILABLE: bool = package_available("multiprocessing")
_XLA_AVAILABLE: bool = package_available("torch_xla")
_PIQ_GREATER_EQUAL_0_8: Optional[bool] = compare_version("piq", operator.ge, "0.8.0")
_FASTER_COCO_EVAL_AVAILABLE: bool = package_available("faster_coco_eval")
_MECAB_AVAILABLE: bool = package_available("MeCab")
_MECAB_KO_AVAILABLE: bool = package_available("mecab_ko")
_MECAB_KO_DIC_AVAILABLE: bool = package_available("mecab_ko_dic")
_IPADIC_AVAILABLE: bool = package_available("ipadic")
_SENTENCEPIECE_AVAILABLE: bool = package_available("sentencepiece")
_LATEX_AVAILABLE: bool = shutil.which("latex") is not None
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/utilities/data.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import torch
from lightning_utilities import apply_to_collection
from torch import Tensor
from torchmetrics.utilities.exceptions import TorchMetricsUserWarning
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_12, _XLA_AVAILABLE
from torchmetrics.utilities.prints import rank_zero_warn
METRIC_EPS = 1e-6
def dim_zero_cat(x: Union[Tensor, List[Tensor]]) -> Tensor:
"""Concatenation along the zero dimension."""
if isinstance(x, torch.Tensor):
return x
x = [y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y for y in x]
if not x: # empty list
raise ValueError("No samples to concatenate")
return torch.cat(x, dim=0)
def dim_zero_sum(x: Tensor) -> Tensor:
"""Summation along the zero dimension."""
return torch.sum(x, dim=0)
def dim_zero_mean(x: Tensor) -> Tensor:
"""Average along the zero dimension."""
return torch.mean(x, dim=0)
def dim_zero_max(x: Tensor) -> Tensor:
"""Max along the zero dimension."""
return torch.max(x, dim=0).values
def dim_zero_min(x: Tensor) -> Tensor:
"""Min along the zero dimension."""
return torch.min(x, dim=0).values
def _flatten(x: Sequence) -> list:
"""Flatten list of list into single list."""
return [item for sublist in x for item in sublist]
def _flatten_dict(x: Dict) -> Tuple[Dict, bool]:
"""Flatten dict of dicts into single dict and checking for duplicates in keys along the way."""
new_dict = {}
duplicates = False
for key, value in x.items():
if isinstance(value, dict):
for k, v in value.items():
if k in new_dict:
duplicates = True
new_dict[k] = v
else:
if key in new_dict:
duplicates = True
new_dict[key] = value
return new_dict, duplicates
def to_onehot(
label_tensor: Tensor,
num_classes: Optional[int] = None,
) -> Tensor:
"""Convert a dense label tensor to one-hot format.
Args:
label_tensor: dense label tensor, with shape [N, d1, d2, ...]
num_classes: number of classes C
Returns:
A sparse label tensor with shape [N, C, d1, d2, ...]
Example:
>>> x = torch.tensor([1, 2, 3])
>>> to_onehot(x)
tensor([[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
"""
if num_classes is None:
num_classes = int(label_tensor.max().detach().item() + 1)
tensor_onehot = torch.zeros(
label_tensor.shape[0],
num_classes,
*label_tensor.shape[1:],
dtype=label_tensor.dtype,
device=label_tensor.device,
)
index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot)
return tensor_onehot.scatter_(1, index, 1.0)
def select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor:
"""Convert a probability tensor to binary by selecting top-k the highest entries.
Args:
prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the
position defined by the ``dim`` argument
topk: number of the highest entries to turn into 1s
dim: dimension on which to compare entries
Returns:
A binary tensor of the same shape as the input tensor of type ``torch.int32``
Example:
>>> x = torch.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]])
>>> select_topk(x, topk=2)
tensor([[0, 1, 1],
[1, 1, 0]], dtype=torch.int32)
"""
zeros = torch.zeros_like(prob_tensor)
if topk == 1: # argmax has better performance than topk
topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0)
else:
topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0)
return topk_tensor.int()
def to_categorical(x: Tensor, argmax_dim: int = 1) -> Tensor:
"""Convert a tensor of probabilities to a dense label tensor.
Args:
x: probabilities to get the categorical label [N, d1, d2, ...]
argmax_dim: dimension to apply
Return:
A tensor with categorical labels [N, d2, ...]
Example:
>>> x = torch.tensor([[0.2, 0.5], [0.9, 0.1]])
>>> to_categorical(x)
tensor([1, 0])
"""
return torch.argmax(x, dim=argmax_dim)
def _squeeze_scalar_element_tensor(x: Tensor) -> Tensor:
return x.squeeze() if x.numel() == 1 else x
def _squeeze_if_scalar(data: Any) -> Any:
return apply_to_collection(data, Tensor, _squeeze_scalar_element_tensor)
def _bincount(x: Tensor, minlength: Optional[int] = None) -> Tensor:
"""Implement custom bincount.
PyTorch currently does not support ``torch.bincount`` for:
- deterministic mode on GPU.
- MPS devices
This implementation fallback to a for-loop counting occurrences in that case.
Args:
x: tensor to count
minlength: minimum length to count
Returns:
Number of occurrences for each unique element in x
Example:
>>> x = torch.tensor([0,0,0,1,1,2,2,2,2])
>>> _bincount(x, minlength=3)
tensor([3, 2, 4])
"""
if minlength is None:
minlength = len(torch.unique(x))
if torch.are_deterministic_algorithms_enabled() or _XLA_AVAILABLE or _TORCH_GREATER_EQUAL_1_12 and x.is_mps:
output = torch.zeros(minlength, device=x.device, dtype=torch.long)
for i in range(minlength):
output[i] = (x == i).sum()
return output
return torch.bincount(x, minlength=minlength)
def _cumsum(x: Tensor, dim: Optional[int] = 0, dtype: Optional[torch.dtype] = None) -> Tensor:
if torch.are_deterministic_algorithms_enabled() and x.is_cuda and x.is_floating_point() and sys.platform != "win32":
rank_zero_warn(
"You are trying to use a metric in deterministic mode on GPU that uses `torch.cumsum`, which is currently "
"not supported. The tensor will be copied to the CPU memory to compute it and then copied back to GPU. "
"Expect some slowdowns.",
TorchMetricsUserWarning,
)
return x.cpu().cumsum(dim=dim, dtype=dtype).cuda()
return torch.cumsum(x, dim=dim, dtype=dtype)
def _flexible_bincount(x: Tensor) -> Tensor:
"""Similar to `_bincount`, but works also with tensor that do not contain continuous values.
Args:
x: tensor to count
Returns:
Number of occurrences for each unique element in x
"""
# make sure elements in x start from 0
x = x - x.min()
unique_x = torch.unique(x)
output = _bincount(x, minlength=torch.max(unique_x) + 1) # type: ignore[arg-type]
# remove zeros from output tensor
return output[unique_x]
def allclose(tensor1: Tensor, tensor2: Tensor) -> bool:
"""Wrap torch.allclose to be robust towards dtype difference."""
if tensor1.dtype != tensor2.dtype:
tensor2 = tensor2.to(dtype=tensor1.dtype)
return torch.allclose(tensor1, tensor2)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/utilities/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.utilities.checks import check_forward_full_state_property
from torchmetrics.utilities.data import (
dim_zero_cat,
dim_zero_max,
dim_zero_mean,
dim_zero_min,
dim_zero_sum,
)
from torchmetrics.utilities.distributed import class_reduce, reduce
from torchmetrics.utilities.prints import rank_zero_debug, rank_zero_info, rank_zero_warn
__all__ = [
"check_forward_full_state_property",
"class_reduce",
"reduce",
"rank_zero_debug",
"rank_zero_info",
"rank_zero_warn",
"dim_zero_cat",
"dim_zero_max",
"dim_zero_mean",
"dim_zero_min",
"dim_zero_sum",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/detection/giou.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.detection.iou import IntersectionOverUnion
from torchmetrics.functional.detection.giou import _giou_compute, _giou_update
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TORCHVISION_GREATER_EQUAL_0_8
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _TORCHVISION_GREATER_EQUAL_0_8:
__doctest_skip__ = ["GeneralizedIntersectionOverUnion", "GeneralizedIntersectionOverUnion.plot"]
elif not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["GeneralizedIntersectionOverUnion.plot"]
class GeneralizedIntersectionOverUnion(IntersectionOverUnion):
r"""Compute Generalized Intersection Over Union (`GIoU`_).
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~List`): A list consisting of dictionaries each containing the key-values
(each dictionary corresponds to a single image). Parameters that should be provided per dict:
- ``boxes`` (:class:`~torch.Tensor`): float tensor of shape ``(num_boxes, 4)`` containing ``num_boxes``
detection boxes of the format specified in the constructor.
By default, this method expects ``(xmin, ymin, xmax, ymax)`` in absolute image coordinates.
- ``labels`` (:class:`~torch.Tensor`): integer tensor of shape ``(num_boxes)`` containing 0-indexed detection
classes for the boxes.
- ``target`` (:class:`~List`): A list consisting of dictionaries each containing the key-values
(each dictionary corresponds to a single image). Parameters that should be provided per dict:
- ``boxes`` (:class:`~torch.Tensor`): float tensor of shape ``(num_boxes, 4)`` containing ``num_boxes`` ground
truth boxes of the format specified in the constructor.
By default, this method expects ``(xmin, ymin, xmax, ymax)`` in absolute image coordinates.
- ``labels`` (:class:`~torch.Tensor`): integer tensor of shape ``(num_boxes)`` containing 0-indexed ground truth
classes for the boxes.
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``giou_dict``: A dictionary containing the following key-values:
- giou: (:class:`~torch.Tensor`) with overall giou value over all classes and samples.
- giou/cl_{cl}: (:class:`~torch.Tensor`), if argument ``class metrics=True``
Args:
box_format:
Input format of given boxes. Supported formats are ``[`xyxy`, `xywh`, `cxcywh`]``.
iou_thresholds:
Optional IoU thresholds for evaluation. If set to `None` the threshold is ignored.
class_metrics:
Option to enable per-class metrics for IoU. Has a performance impact.
respect_labels:
Ignore values from boxes that do not have the same label as the ground truth box. Else will compute Iou
between all pairs of boxes.
kwargs:
Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> from torchmetrics.detection import GeneralizedIntersectionOverUnion
>>> preds = [
... {
... "boxes": torch.tensor([[296.55, 93.96, 314.97, 152.79], [298.55, 98.96, 314.97, 151.79]]),
... "scores": torch.tensor([0.236, 0.56]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> target = [
... {
... "boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00]]),
... "labels": torch.tensor([5]),
... }
... ]
>>> metric = GeneralizedIntersectionOverUnion()
>>> metric(preds, target)
{'giou': tensor(0.8613)}
Raises:
ModuleNotFoundError:
If torchvision is not installed with version 0.8.0 or newer.
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = True
_iou_type: str = "giou"
_invalid_val: float = -1.0
def __init__(
self,
box_format: str = "xyxy",
iou_threshold: Optional[float] = None,
class_metrics: bool = False,
respect_labels: bool = True,
**kwargs: Any,
) -> None:
super().__init__(box_format, iou_threshold, class_metrics, respect_labels, **kwargs)
@staticmethod
def _iou_update_fn(*args: Any, **kwargs: Any) -> Tensor:
return _giou_update(*args, **kwargs)
@staticmethod
def _iou_compute_fn(*args: Any, **kwargs: Any) -> Tensor:
return _giou_compute(*args, **kwargs)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting single value
>>> import torch
>>> from torchmetrics.detection import GeneralizedIntersectionOverUnion
>>> preds = [
... {
... "boxes": torch.tensor([[296.55, 93.96, 314.97, 152.79], [298.55, 98.96, 314.97, 151.79]]),
... "scores": torch.tensor([0.236, 0.56]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> target = [
... {
... "boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00]]),
... "labels": torch.tensor([5]),
... }
... ]
>>> metric = GeneralizedIntersectionOverUnion()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.detection import GeneralizedIntersectionOverUnion
>>> preds = [
... {
... "boxes": torch.tensor([[296.55, 93.96, 314.97, 152.79], [298.55, 98.96, 314.97, 151.79]]),
... "scores": torch.tensor([0.236, 0.56]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> target = lambda : [
... {
... "boxes": torch.tensor([[300.00, 100.00, 335.00, 150.00]]) + torch.randint(-10, 10, (1, 4)),
... "labels": torch.tensor([5]),
... }
... ]
>>> metric = GeneralizedIntersectionOverUnion()
>>> vals = []
>>> for _ in range(20):
... vals.append(metric(preds, target()))
>>> fig_, ax_ = metric.plot(vals)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/detection/ciou.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.detection.iou import IntersectionOverUnion
from torchmetrics.functional.detection.ciou import _ciou_compute, _ciou_update
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TORCHVISION_GREATER_EQUAL_0_13
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _TORCHVISION_GREATER_EQUAL_0_13:
__doctest_skip__ = ["CompleteIntersectionOverUnion", "CompleteIntersectionOverUnion.plot"]
elif not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["CompleteIntersectionOverUnion.plot"]
class CompleteIntersectionOverUnion(IntersectionOverUnion):
r"""Computes Complete Intersection Over Union (`CIoU`_).
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~List`): A list consisting of dictionaries each containing the key-values
(each dictionary corresponds to a single image). Parameters that should be provided per dict:
- ``boxes`` (:class:`~torch.Tensor`): float tensor of shape ``(num_boxes, 4)`` containing ``num_boxes``
detection boxes of the format specified in the constructor.
By default, this method expects ``(xmin, ymin, xmax, ymax)`` in absolute image coordinates.
- ``labels`` (:class:`~torch.Tensor`): integer tensor of shape ``(num_boxes)`` containing 0-indexed detection
classes for the boxes.
- ``target`` (:class:`~List`): A list consisting of dictionaries each containing the key-values
(each dictionary corresponds to a single image). Parameters that should be provided per dict:
- ``boxes`` (:class:`~torch.Tensor`): float tensor of shape ``(num_boxes, 4)`` containing ``num_boxes`` ground
truth boxes of the format specified in the constructor.
By default, this method expects ``(xmin, ymin, xmax, ymax)`` in absolute image coordinates.
- ``labels`` (:class:`~torch.Tensor`): integer tensor of shape ``(num_boxes)`` containing 0-indexed detection
classes for the boxes.
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``ciou_dict``: A dictionary containing the following key-values:
- ciou: (:class:`~torch.Tensor`) with overall ciou value over all classes and samples.
- ciou/cl_{cl}: (:class:`~torch.Tensor`), if argument ``class_metrics=True``
Args:
box_format:
Input format of given boxes. Supported formats are ``[`xyxy`, `xywh`, `cxcywh`]``.
iou_thresholds:
Optional IoU thresholds for evaluation. If set to `None` the threshold is ignored.
class_metrics:
Option to enable per-class metrics for IoU. Has a performance impact.
respect_labels:
Ignore values from boxes that do not have the same label as the ground truth box. Else will compute Iou
between all pairs of boxes.
kwargs:
Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> from torchmetrics.detection import CompleteIntersectionOverUnion
>>> preds = [
... {
... "boxes": torch.tensor([[296.55, 93.96, 314.97, 152.79], [298.55, 98.96, 314.97, 151.79]]),
... "scores": torch.tensor([0.236, 0.56]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> target = [
... {
... "boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00]]),
... "labels": torch.tensor([5]),
... }
... ]
>>> metric = CompleteIntersectionOverUnion()
>>> metric(preds, target)
{'ciou': tensor(0.8611)}
Raises:
ModuleNotFoundError:
If torchvision is not installed with version 0.13.0 or newer.
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = True
_iou_type: str = "ciou"
_invalid_val: float = -2.0 # unsure, min val could be just -1.5 as well
def __init__(
self,
box_format: str = "xyxy",
iou_threshold: Optional[float] = None,
class_metrics: bool = False,
respect_labels: bool = True,
**kwargs: Any,
) -> None:
if not _TORCHVISION_GREATER_EQUAL_0_13:
raise ModuleNotFoundError(
f"Metric `{self._iou_type.upper()}` requires that `torchvision` version 0.13.0 or newer is installed."
" Please install with `pip install torchvision>=0.13` or `pip install torchmetrics[detection]`."
)
super().__init__(box_format, iou_threshold, class_metrics, respect_labels, **kwargs)
@staticmethod
def _iou_update_fn(*args: Any, **kwargs: Any) -> Tensor:
return _ciou_update(*args, **kwargs)
@staticmethod
def _iou_compute_fn(*args: Any, **kwargs: Any) -> Tensor:
return _ciou_compute(*args, **kwargs)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting single value
>>> import torch
>>> from torchmetrics.detection import CompleteIntersectionOverUnion
>>> preds = [
... {
... "boxes": torch.tensor([[296.55, 93.96, 314.97, 152.79], [298.55, 98.96, 314.97, 151.79]]),
... "scores": torch.tensor([0.236, 0.56]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> target = [
... {
... "boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00]]),
... "labels": torch.tensor([5]),
... }
... ]
>>> metric = CompleteIntersectionOverUnion()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.detection import CompleteIntersectionOverUnion
>>> preds = [
... {
... "boxes": torch.tensor([[296.55, 93.96, 314.97, 152.79], [298.55, 98.96, 314.97, 151.79]]),
... "scores": torch.tensor([0.236, 0.56]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> target = lambda : [
... {
... "boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00]]) + torch.randint(-10, 10, (1, 4)),
... "labels": torch.tensor([5]),
... }
... ]
>>> metric = CompleteIntersectionOverUnion()
>>> vals = []
>>> for _ in range(20):
... vals.append(metric(preds, target()))
>>> fig_, ax_ = metric.plot(vals)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/detection/helpers.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Literal, Sequence, Tuple, Union
from torch import Tensor
def _input_validator(
preds: Sequence[Dict[str, Tensor]],
targets: Sequence[Dict[str, Tensor]],
iou_type: Union[Literal["bbox", "segm"], Tuple[Literal["bbox", "segm"]]] = "bbox",
ignore_score: bool = False,
) -> None:
"""Ensure the correct input format of `preds` and `targets`."""
if isinstance(iou_type, str):
iou_type = (iou_type,)
name_map = {"bbox": "boxes", "segm": "masks"}
if any(tp not in name_map for tp in iou_type):
raise Exception(f"IOU type {iou_type} is not supported")
item_val_name = [name_map[tp] for tp in iou_type]
if not isinstance(preds, Sequence):
raise ValueError(f"Expected argument `preds` to be of type Sequence, but got {preds}")
if not isinstance(targets, Sequence):
raise ValueError(f"Expected argument `target` to be of type Sequence, but got {targets}")
if len(preds) != len(targets):
raise ValueError(
f"Expected argument `preds` and `target` to have the same length, but got {len(preds)} and {len(targets)}"
)
for k in [*item_val_name, "labels"] + (["scores"] if not ignore_score else []):
if any(k not in p for p in preds):
raise ValueError(f"Expected all dicts in `preds` to contain the `{k}` key")
for k in [*item_val_name, "labels"]:
if any(k not in p for p in targets):
raise ValueError(f"Expected all dicts in `target` to contain the `{k}` key")
for ivn in item_val_name:
if not all(isinstance(pred[ivn], Tensor) for pred in preds):
raise ValueError(f"Expected all {ivn} in `preds` to be of type Tensor")
if not ignore_score and not all(isinstance(pred["scores"], Tensor) for pred in preds):
raise ValueError("Expected all scores in `preds` to be of type Tensor")
if not all(isinstance(pred["labels"], Tensor) for pred in preds):
raise ValueError("Expected all labels in `preds` to be of type Tensor")
for ivn in item_val_name:
if not all(isinstance(target[ivn], Tensor) for target in targets):
raise ValueError(f"Expected all {ivn} in `target` to be of type Tensor")
if not all(isinstance(target["labels"], Tensor) for target in targets):
raise ValueError("Expected all labels in `target` to be of type Tensor")
for i, item in enumerate(targets):
for ivn in item_val_name:
if item[ivn].size(0) != item["labels"].size(0):
raise ValueError(
f"Input '{ivn}' and labels of sample {i} in targets have a"
f" different length (expected {item[ivn].size(0)} labels, got {item['labels'].size(0)})"
)
if ignore_score:
return
for i, item in enumerate(preds):
for ivn in item_val_name:
if not (item[ivn].size(0) == item["labels"].size(0) == item["scores"].size(0)):
raise ValueError(
f"Input '{ivn}', labels and scores of sample {i} in predictions have a"
f" different length (expected {item[ivn].size(0)} labels and scores,"
f" got {item['labels'].size(0)} labels and {item['scores'].size(0)})"
)
def _fix_empty_tensors(boxes: Tensor) -> Tensor:
"""Empty tensors can cause problems in DDP mode, this methods corrects them."""
if boxes.numel() == 0 and boxes.ndim == 1:
return boxes.unsqueeze(0)
return boxes
def _validate_iou_type_arg(iou_type: Union[Literal["bbox", "segm"], Tuple[str]] = "bbox") -> Tuple[str]:
"""Validate that iou type argument is correct."""
allowed_iou_types = ("segm", "bbox")
if isinstance(iou_type, str):
iou_type = (iou_type,)
if any(tp not in allowed_iou_types for tp in iou_type):
raise ValueError(
f"Expected argument `iou_type` to be one of {allowed_iou_types} or a list of, but got {iou_type}"
)
return iou_type
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/detection/iou.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Sequence, Union
import torch
from torch import Tensor
from torchmetrics.detection.helpers import _fix_empty_tensors, _input_validator
from torchmetrics.functional.detection.iou import _iou_compute, _iou_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TORCHVISION_GREATER_EQUAL_0_8
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _TORCHVISION_GREATER_EQUAL_0_8:
__doctest_skip__ = ["IntersectionOverUnion", "IntersectionOverUnion.plot"]
elif not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["IntersectionOverUnion.plot"]
class IntersectionOverUnion(Metric):
r"""Computes Intersection Over Union (IoU).
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~List`): A list consisting of dictionaries each containing the key-values
(each dictionary corresponds to a single image). Parameters that should be provided per dict:
- ``boxes`` (:class:`~torch.Tensor`): float tensor of shape ``(num_boxes, 4)`` containing ``num_boxes``
detection boxes of the format specified in the constructor.
By default, this method expects ``(xmin, ymin, xmax, ymax)`` in absolute image coordinates.
- labels: ``IntTensor`` of shape ``(num_boxes)`` containing 0-indexed detection classes for
the boxes.
- ``target`` (:class:`~List`): A list consisting of dictionaries each containing the key-values
(each dictionary corresponds to a single image). Parameters that should be provided per dict:
- ``boxes`` (:class:`~torch.Tensor`): float tensor of shape ``(num_boxes, 4)`` containing ``num_boxes`` ground
truth boxes of the format specified in the constructor.
By default, this method expects ``(xmin, ymin, xmax, ymax)`` in absolute image coordinates.
- ``labels`` (:class:`~torch.Tensor`): integer tensor of shape ``(num_boxes)`` containing 0-indexed ground truth
classes for the boxes.
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``iou_dict``: A dictionary containing the following key-values:
- iou: (:class:`~torch.Tensor`)
- iou/cl_{cl}: (:class:`~torch.Tensor`), if argument ``class metrics=True``
Args:
box_format:
Input format of given boxes. Supported formats are ``[`xyxy`, `xywh`, `cxcywh`]``.
iou_thresholds:
Optional IoU thresholds for evaluation. If set to `None` the threshold is ignored.
class_metrics:
Option to enable per-class metrics for IoU. Has a performance impact.
respect_labels:
Ignore values from boxes that do not have the same label as the ground truth box. Else will compute Iou
between all pairs of boxes.
kwargs:
Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
>>> import torch
>>> from torchmetrics.detection import IntersectionOverUnion
>>> preds = [
... {
... "boxes": torch.tensor([
... [296.55, 93.96, 314.97, 152.79],
... [298.55, 98.96, 314.97, 151.79]]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> target = [
... {
... "boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00]]),
... "labels": torch.tensor([5]),
... }
... ]
>>> metric = IntersectionOverUnion()
>>> metric(preds, target)
{'iou': tensor(0.8614)}
Example::
The metric can also return the score per class:
>>> import torch
>>> from torchmetrics.detection import IntersectionOverUnion
>>> preds = [
... {
... "boxes": torch.tensor([
... [296.55, 93.96, 314.97, 152.79],
... [298.55, 98.96, 314.97, 151.79]]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> target = [
... {
... "boxes": torch.tensor([
... [300.00, 100.00, 315.00, 150.00],
... [300.00, 100.00, 315.00, 150.00]
... ]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> metric = IntersectionOverUnion(class_metrics=True)
>>> metric(preds, target)
{'iou': tensor(0.7756), 'iou/cl_4': tensor(0.6898), 'iou/cl_5': tensor(0.8614)}
Raises:
ModuleNotFoundError:
If torchvision is not installed with version 0.8.0 or newer.
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = True
groundtruth_labels: List[Tensor]
iou_matrix: List[Tensor]
_iou_type: str = "iou"
_invalid_val: float = -1.0
def __init__(
self,
box_format: str = "xyxy",
iou_threshold: Optional[float] = None,
class_metrics: bool = False,
respect_labels: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not _TORCHVISION_GREATER_EQUAL_0_8:
raise ModuleNotFoundError(
f"Metric `{self._iou_type.upper()}` requires that `torchvision` version 0.8.0 or newer is installed."
" Please install with `pip install torchvision>=0.8` or `pip install torchmetrics[detection]`."
)
allowed_box_formats = ("xyxy", "xywh", "cxcywh")
if box_format not in allowed_box_formats:
raise ValueError(f"Expected argument `box_format` to be one of {allowed_box_formats} but got {box_format}")
self.box_format = box_format
self.iou_threshold = iou_threshold
if not isinstance(class_metrics, bool):
raise ValueError("Expected argument `class_metrics` to be a boolean")
self.class_metrics = class_metrics
if not isinstance(respect_labels, bool):
raise ValueError("Expected argument `respect_labels` to be a boolean")
self.respect_labels = respect_labels
self.add_state("groundtruth_labels", default=[], dist_reduce_fx=None)
self.add_state("iou_matrix", default=[], dist_reduce_fx=None)
@staticmethod
def _iou_update_fn(*args: Any, **kwargs: Any) -> Tensor:
return _iou_update(*args, **kwargs)
@staticmethod
def _iou_compute_fn(*args: Any, **kwargs: Any) -> Tensor:
return _iou_compute(*args, **kwargs)
def update(self, preds: List[Dict[str, Tensor]], target: List[Dict[str, Tensor]]) -> None:
"""Update state with predictions and targets."""
_input_validator(preds, target, ignore_score=True)
for p, t in zip(preds, target):
det_boxes = self._get_safe_item_values(p["boxes"])
gt_boxes = self._get_safe_item_values(t["boxes"])
self.groundtruth_labels.append(t["labels"])
iou_matrix = self._iou_update_fn(det_boxes, gt_boxes, self.iou_threshold, self._invalid_val) # N x M
if self.respect_labels:
label_eq = p["labels"].unsqueeze(1) == t["labels"].unsqueeze(0) # N x M
iou_matrix[~label_eq] = self._invalid_val
self.iou_matrix.append(iou_matrix)
def _get_safe_item_values(self, boxes: Tensor) -> Tensor:
from torchvision.ops import box_convert
boxes = _fix_empty_tensors(boxes)
if boxes.numel() > 0:
boxes = box_convert(boxes, in_fmt=self.box_format, out_fmt="xyxy")
return boxes
def _get_gt_classes(self) -> List:
"""Returns a list of unique classes found in ground truth and detection data."""
if len(self.groundtruth_labels) > 0:
return torch.cat(self.groundtruth_labels).unique().tolist()
return []
def compute(self) -> dict:
"""Computes IoU based on inputs passed in to ``update`` previously."""
score = torch.cat([mat[mat != self._invalid_val] for mat in self.iou_matrix], 0).mean()
results: Dict[str, Tensor] = {f"{self._iou_type}": score}
if self.class_metrics:
gt_labels = dim_zero_cat(self.groundtruth_labels)
classes = gt_labels.unique().tolist() if len(gt_labels) > 0 else []
for cl in classes:
masked_iou, observed = torch.zeros_like(score), torch.zeros_like(score)
for mat, gt_lab in zip(self.iou_matrix, self.groundtruth_labels):
scores = mat[:, gt_lab == cl]
masked_iou += scores[scores != self._invalid_val].sum()
observed += scores[scores != self._invalid_val].numel()
results.update({f"{self._iou_type}/cl_{cl}": masked_iou / observed})
return results
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.detection import IntersectionOverUnion
>>> preds = [
... {
... "boxes": torch.tensor([[296.55, 93.96, 314.97, 152.79], [298.55, 98.96, 314.97, 151.79]]),
... "scores": torch.tensor([0.236, 0.56]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> target = [
... {
... "boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00]]),
... "labels": torch.tensor([5]),
... }
... ]
>>> metric = IntersectionOverUnion()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.detection import IntersectionOverUnion
>>> preds = [
... {
... "boxes": torch.tensor([[296.55, 93.96, 314.97, 152.79], [298.55, 98.96, 314.97, 151.79]]),
... "scores": torch.tensor([0.236, 0.56]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> target = lambda : [
... {
... "boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00]]) + torch.randint(-10, 10, (1, 4)),
... "labels": torch.tensor([5]),
... }
... ]
>>> metric = IntersectionOverUnion()
>>> vals = []
>>> for _ in range(20):
... vals.append(metric(preds, target()))
>>> fig_, ax_ = metric.plot(vals)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/detection/diou.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.detection.iou import IntersectionOverUnion
from torchmetrics.functional.detection.diou import _diou_compute, _diou_update
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TORCHVISION_GREATER_EQUAL_0_13
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _TORCHVISION_GREATER_EQUAL_0_13:
__doctest_skip__ = ["DistanceIntersectionOverUnion", "DistanceIntersectionOverUnion.plot"]
elif not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["DistanceIntersectionOverUnion.plot"]
class DistanceIntersectionOverUnion(IntersectionOverUnion):
r"""Computes Distance Intersection Over Union (`DIoU`_).
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~List`): A list consisting of dictionaries each containing the key-values
(each dictionary corresponds to a single image). Parameters that should be provided per dict:
- ``boxes`` (:class:`~torch.Tensor`): float tensor of shape ``(num_boxes, 4)`` containing ``num_boxes``
detection boxes of the format specified in the constructor.
By default, this method expects ``(xmin, ymin, xmax, ymax)`` in absolute image coordinates.
- ``labels`` (:class:`~torch.Tensor`): integer tensor of shape ``(num_boxes)`` containing 0-indexed detection
classes for the boxes.
- ``target`` (:class:`~List`): A list consisting of dictionaries each containing the key-values
(each dictionary corresponds to a single image). Parameters that should be provided per dict:
- ``boxes`` (:class:`~torch.Tensor`): float tensor of shape ``(num_boxes, 4)`` containing ``num_boxes`` ground
truth boxes of the format specified in the constructor.
By default, this method expects ``(xmin, ymin, xmax, ymax)`` in absolute image coordinates.
- ``labels`` (:class:`~torch.Tensor`): integer tensor of shape ``(num_boxes)`` containing 0-indexed ground truth
classes for the boxes.
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``diou_dict``: A dictionary containing the following key-values:
- diou: (:class:`~torch.Tensor`) with overall diou value over all classes and samples.
- diou/cl_{cl}: (:class:`~torch.Tensor`), if argument ``class_metrics=True``
Args:
box_format:
Input format of given boxes. Supported formats are ``['xyxy', 'xywh', 'cxcywh']``.
iou_thresholds:
Optional IoU thresholds for evaluation. If set to `None` the threshold is ignored.
class_metrics:
Option to enable per-class metrics for IoU. Has a performance impact.
respect_labels:
Ignore values from boxes that do not have the same label as the ground truth box. Else will compute Iou
between all pairs of boxes.
kwargs:
Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> from torchmetrics.detection import DistanceIntersectionOverUnion
>>> preds = [
... {
... "boxes": torch.tensor([[296.55, 93.96, 314.97, 152.79], [298.55, 98.96, 314.97, 151.79]]),
... "scores": torch.tensor([0.236, 0.56]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> target = [
... {
... "boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00]]),
... "labels": torch.tensor([5]),
... }
... ]
>>> metric = DistanceIntersectionOverUnion()
>>> metric(preds, target)
{'diou': tensor(0.8611)}
Raises:
ModuleNotFoundError:
If torchvision is not installed with version 0.13.0 or newer.
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = True
_iou_type: str = "diou"
_invalid_val: float = -1.0
def __init__(
self,
box_format: str = "xyxy",
iou_threshold: Optional[float] = None,
class_metrics: bool = False,
respect_labels: bool = True,
**kwargs: Any,
) -> None:
if not _TORCHVISION_GREATER_EQUAL_0_13:
raise ModuleNotFoundError(
f"Metric `{self._iou_type.upper()}` requires that `torchvision` version 0.13.0 or newer is installed."
" Please install with `pip install torchvision>=0.13` or `pip install torchmetrics[detection]`."
)
super().__init__(box_format, iou_threshold, class_metrics, respect_labels, **kwargs)
@staticmethod
def _iou_update_fn(*args: Any, **kwargs: Any) -> Tensor:
return _diou_update(*args, **kwargs)
@staticmethod
def _iou_compute_fn(*args: Any, **kwargs: Any) -> Tensor:
return _diou_compute(*args, **kwargs)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting single value
>>> import torch
>>> from torchmetrics.detection import DistanceIntersectionOverUnion
>>> preds = [
... {
... "boxes": torch.tensor([[296.55, 93.96, 314.97, 152.79], [298.55, 98.96, 314.97, 151.79]]),
... "scores": torch.tensor([0.236, 0.56]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> target = [
... {
... "boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00]]),
... "labels": torch.tensor([5]),
... }
... ]
>>> metric = DistanceIntersectionOverUnion()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.detection import DistanceIntersectionOverUnion
>>> preds = [
... {
... "boxes": torch.tensor([[296.55, 93.96, 314.97, 152.79], [298.55, 98.96, 314.97, 151.79]]),
... "scores": torch.tensor([0.236, 0.56]),
... "labels": torch.tensor([4, 5]),
... }
... ]
>>> target = lambda : [
... {
... "boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00]]) + torch.randint(-10, 10, (1, 4)),
... "labels": torch.tensor([5]),
... }
... ]
>>> metric = DistanceIntersectionOverUnion()
>>> vals = []
>>> for _ in range(20):
... vals.append(metric(preds, target()))
>>> fig_, ax_ = metric.plot(vals)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/detection/_mean_ap.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.distributed as dist
from torch import IntTensor, Tensor
from torchmetrics.detection.helpers import _fix_empty_tensors, _input_validator
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import _cumsum
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _PYCOCOTOOLS_AVAILABLE, _TORCHVISION_GREATER_EQUAL_0_8
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MeanAveragePrecision.plot"]
if not _TORCHVISION_GREATER_EQUAL_0_8 or not _PYCOCOTOOLS_AVAILABLE:
__doctest_skip__ = ["MeanAveragePrecision.plot", "MeanAveragePrecision"]
log = logging.getLogger(__name__)
def compute_area(inputs: List[Any], iou_type: str = "bbox") -> Tensor:
"""Compute area of input depending on the specified iou_type.
Default output for empty input is :class:`~torch.Tensor`
"""
import pycocotools.mask as mask_utils
from torchvision.ops import box_area
if len(inputs) == 0:
return Tensor([])
if iou_type == "bbox":
return box_area(torch.stack(inputs))
if iou_type == "segm":
inputs = [{"size": i[0], "counts": i[1]} for i in inputs]
return torch.tensor(mask_utils.area(inputs).astype("float"))
raise Exception(f"IOU type {iou_type} is not supported")
def compute_iou(
det: List[Any],
gt: List[Any],
iou_type: str = "bbox",
) -> Tensor:
"""Compute IOU between detections and ground-truth using the specified iou_type."""
from torchvision.ops import box_iou
if iou_type == "bbox":
return box_iou(torch.stack(det), torch.stack(gt))
if iou_type == "segm":
return _segm_iou(det, gt)
raise Exception(f"IOU type {iou_type} is not supported")
class BaseMetricResults(dict):
"""Base metric class, that allows fields for pre-defined metrics."""
def __getattr__(self, key: str) -> Tensor:
"""Get a specific metric attribute."""
# Using this you get the correct error message, an AttributeError instead of a KeyError
if key in self:
return self[key]
raise AttributeError(f"No such attribute: {key}")
def __setattr__(self, key: str, value: Tensor) -> None:
"""Set a specific metric attribute."""
self[key] = value
def __delattr__(self, key: str) -> None:
"""Delete a specific metric attribute."""
if key in self:
del self[key]
raise AttributeError(f"No such attribute: {key}")
class MAPMetricResults(BaseMetricResults):
"""Class to wrap the final mAP results."""
__slots__ = ("map", "map_50", "map_75", "map_small", "map_medium", "map_large", "classes")
class MARMetricResults(BaseMetricResults):
"""Class to wrap the final mAR results."""
__slots__ = ("mar_1", "mar_10", "mar_100", "mar_small", "mar_medium", "mar_large")
class COCOMetricResults(BaseMetricResults):
"""Class to wrap the final COCO metric results including various mAP/mAR values."""
__slots__ = (
"map",
"map_50",
"map_75",
"map_small",
"map_medium",
"map_large",
"mar_1",
"mar_10",
"mar_100",
"mar_small",
"mar_medium",
"mar_large",
"map_per_class",
"mar_100_per_class",
)
def _segm_iou(det: List[Tuple[np.ndarray, np.ndarray]], gt: List[Tuple[np.ndarray, np.ndarray]]) -> Tensor:
"""Compute IOU between detections and ground-truths using mask-IOU.
Implementation is based on pycocotools toolkit for mask_utils.
Args:
det: A list of detection masks as ``[(RLE_SIZE, RLE_COUNTS)]``, where ``RLE_SIZE`` is (width, height) dimension
of the input and RLE_COUNTS is its RLE representation;
gt: A list of ground-truth masks as ``[(RLE_SIZE, RLE_COUNTS)]``, where ``RLE_SIZE`` is (width, height) dimension
of the input and RLE_COUNTS is its RLE representation;
"""
import pycocotools.mask as mask_utils
det_coco_format = [{"size": i[0], "counts": i[1]} for i in det]
gt_coco_format = [{"size": i[0], "counts": i[1]} for i in gt]
return torch.tensor(mask_utils.iou(det_coco_format, gt_coco_format, [False for _ in gt]))
class MeanAveragePrecision(Metric):
r"""Compute the `Mean-Average-Precision (mAP) and Mean-Average-Recall (mAR)`_ for object detection predictions.
.. math::
\text{mAP} = \frac{1}{n} \sum_{i=1}^{n} AP_i
where :math:`AP_i` is the average precision for class :math:`i` and :math:`n` is the number of classes. The average
precision is defined as the area under the precision-recall curve. If argument `class_metrics` is set to ``True``,
the metric will also return the mAP/mAR per class.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~List`): A list consisting of dictionaries each containing the key-values
(each dictionary corresponds to a single image). Parameters that should be provided per dict
- boxes: (:class:`~torch.FloatTensor`) of shape ``(num_boxes, 4)`` containing ``num_boxes`` detection
boxes of the format specified in the constructor.
By default, this method expects ``(xmin, ymin, xmax, ymax)`` in absolute image coordinates.
- scores: :class:`~torch.FloatTensor` of shape ``(num_boxes)`` containing detection scores for the boxes.
- labels: :class:`~torch.IntTensor` of shape ``(num_boxes)`` containing 0-indexed detection classes for
the boxes.
- masks: :class:`~torch.bool` of shape ``(num_boxes, image_height, image_width)`` containing boolean masks.
Only required when `iou_type="segm"`.
- ``target`` (:class:`~List`) A list consisting of dictionaries each containing the key-values
(each dictionary corresponds to a single image). Parameters that should be provided per dict:
- boxes: :class:`~torch.FloatTensor` of shape ``(num_boxes, 4)`` containing ``num_boxes`` ground truth
boxes of the format specified in the constructor.
By default, this method expects ``(xmin, ymin, xmax, ymax)`` in absolute image coordinates.
- labels: :class:`~torch.IntTensor` of shape ``(num_boxes)`` containing 0-indexed ground truth
classes for the boxes.
- masks: :class:`~torch.bool` of shape ``(num_boxes, image_height, image_width)`` containing boolean masks.
Only required when `iou_type="segm"`.
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``map_dict``: A dictionary containing the following key-values:
- map: (:class:`~torch.Tensor`)
- map_small: (:class:`~torch.Tensor`)
- map_medium:(:class:`~torch.Tensor`)
- map_large: (:class:`~torch.Tensor`)
- mar_1: (:class:`~torch.Tensor`)
- mar_10: (:class:`~torch.Tensor`)
- mar_100: (:class:`~torch.Tensor`)
- mar_small: (:class:`~torch.Tensor`)
- mar_medium: (:class:`~torch.Tensor`)
- mar_large: (:class:`~torch.Tensor`)
- map_50: (:class:`~torch.Tensor`) (-1 if 0.5 not in the list of iou thresholds)
- map_75: (:class:`~torch.Tensor`) (-1 if 0.75 not in the list of iou thresholds)
- map_per_class: (:class:`~torch.Tensor`) (-1 if class metrics are disabled)
- mar_100_per_class: (:class:`~torch.Tensor`) (-1 if class metrics are disabled)
- classes (:class:`~torch.Tensor`)
For an example on how to use this metric check the `torchmetrics mAP example`_.
.. note::
``map`` score is calculated with @[ IoU=self.iou_thresholds | area=all | max_dets=max_detection_thresholds ].
Caution: If the initialization parameters are changed, dictionary keys for mAR can change as well.
The default properties are also accessible via fields and will raise an ``AttributeError`` if not available.
.. note::
This metric is following the mAP implementation of `pycocotools`_ a standard implementation for the mAP metric
for object detection.
.. note::
This metric requires you to have `torchvision` version 0.8.0 or newer installed
(with corresponding version 1.7.0 of torch or newer). This metric requires `pycocotools`
installed when iou_type is `segm`. Please install with ``pip install torchvision`` or
``pip install torchmetrics[detection]``.
Args:
box_format:
Input format of given boxes. Supported formats are ``[`xyxy`, `xywh`, `cxcywh`]``.
iou_type:
Type of input (either masks or bounding-boxes) used for computing IOU.
Supported IOU types are ``["bbox", "segm"]``.
If using ``"segm"``, masks should be provided (see :meth:`update`).
iou_thresholds:
IoU thresholds for evaluation. If set to ``None`` it corresponds to the stepped range ``[0.5,...,0.95]``
with step ``0.05``. Else provide a list of floats.
rec_thresholds:
Recall thresholds for evaluation. If set to ``None`` it corresponds to the stepped range ``[0,...,1]``
with step ``0.01``. Else provide a list of floats.
max_detection_thresholds:
Thresholds on max detections per image. If set to `None` will use thresholds ``[1, 10, 100]``.
Else, please provide a list of ints.
class_metrics:
Option to enable per-class metrics for mAP and mAR_100. Has a performance impact.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ModuleNotFoundError:
If ``torchvision`` is not installed or version installed is lower than 0.8.0
ModuleNotFoundError:
If ``iou_type`` is equal to ``segm`` and ``pycocotools`` is not installed
ValueError:
If ``class_metrics`` is not a boolean
ValueError:
If ``preds`` is not of type (:class:`~List[Dict[str, Tensor]]`)
ValueError:
If ``target`` is not of type ``List[Dict[str, Tensor]]``
ValueError:
If ``preds`` and ``target`` are not of the same length
ValueError:
If any of ``preds.boxes``, ``preds.scores`` and ``preds.labels`` are not of the same length
ValueError:
If any of ``target.boxes`` and ``target.labels`` are not of the same length
ValueError:
If any box is not type float and of length 4
ValueError:
If any class is not type int and of length 1
ValueError:
If any score is not type float and of length 1
Example:
>>> from torch import tensor
>>> from torchmetrics.detection import MeanAveragePrecision
>>> preds = [
... dict(
... boxes=tensor([[258.0, 41.0, 606.0, 285.0]]),
... scores=tensor([0.536]),
... labels=tensor([0]),
... )
... ]
>>> target = [
... dict(
... boxes=tensor([[214.0, 41.0, 562.0, 285.0]]),
... labels=tensor([0]),
... )
... ]
>>> metric = MeanAveragePrecision()
>>> metric.update(preds, target)
>>> from pprint import pprint
>>> pprint(metric.compute())
{'classes': tensor(0, dtype=torch.int32),
'map': tensor(0.6000),
'map_50': tensor(1.),
'map_75': tensor(1.),
'map_large': tensor(0.6000),
'map_medium': tensor(-1.),
'map_per_class': tensor(-1.),
'map_small': tensor(-1.),
'mar_1': tensor(0.6000),
'mar_10': tensor(0.6000),
'mar_100': tensor(0.6000),
'mar_100_per_class': tensor(-1.),
'mar_large': tensor(0.6000),
'mar_medium': tensor(-1.),
'mar_small': tensor(-1.)}
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = True
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
detections: List[Tensor]
detection_scores: List[Tensor]
detection_labels: List[Tensor]
groundtruths: List[Tensor]
groundtruth_labels: List[Tensor]
def __init__(
self,
box_format: str = "xyxy",
iou_type: str = "bbox",
iou_thresholds: Optional[List[float]] = None,
rec_thresholds: Optional[List[float]] = None,
max_detection_thresholds: Optional[List[int]] = None,
class_metrics: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not _PYCOCOTOOLS_AVAILABLE:
raise ModuleNotFoundError(
"`MAP` metric requires that `pycocotools` installed."
" Please install with `pip install pycocotools` or `pip install torchmetrics[detection]`"
)
if not _TORCHVISION_GREATER_EQUAL_0_8:
raise ModuleNotFoundError(
"`MeanAveragePrecision` metric requires that `torchvision` version 0.8.0 or newer is installed."
" Please install with `pip install torchvision>=0.8` or `pip install torchmetrics[detection]`."
)
allowed_box_formats = ("xyxy", "xywh", "cxcywh")
allowed_iou_types = ("segm", "bbox")
if box_format not in allowed_box_formats:
raise ValueError(f"Expected argument `box_format` to be one of {allowed_box_formats} but got {box_format}")
self.box_format = box_format
self.iou_thresholds = iou_thresholds or torch.linspace(0.5, 0.95, round((0.95 - 0.5) / 0.05) + 1).tolist()
self.rec_thresholds = rec_thresholds or torch.linspace(0.0, 1.00, round(1.00 / 0.01) + 1).tolist()
max_det_thr, _ = torch.sort(IntTensor(max_detection_thresholds or [1, 10, 100]))
self.max_detection_thresholds = max_det_thr.tolist()
if iou_type not in allowed_iou_types:
raise ValueError(f"Expected argument `iou_type` to be one of {allowed_iou_types} but got {iou_type}")
if iou_type == "segm" and not _PYCOCOTOOLS_AVAILABLE:
raise ModuleNotFoundError("When `iou_type` is set to 'segm', pycocotools need to be installed")
self.iou_type = iou_type
self.bbox_area_ranges = {
"all": (float(0**2), float(1e5**2)),
"small": (float(0**2), float(32**2)),
"medium": (float(32**2), float(96**2)),
"large": (float(96**2), float(1e5**2)),
}
if not isinstance(class_metrics, bool):
raise ValueError("Expected argument `class_metrics` to be a boolean")
self.class_metrics = class_metrics
self.add_state("detections", default=[], dist_reduce_fx=None)
self.add_state("detection_scores", default=[], dist_reduce_fx=None)
self.add_state("detection_labels", default=[], dist_reduce_fx=None)
self.add_state("groundtruths", default=[], dist_reduce_fx=None)
self.add_state("groundtruth_labels", default=[], dist_reduce_fx=None)
def update(self, preds: List[Dict[str, Tensor]], target: List[Dict[str, Tensor]]) -> None:
"""Update state with predictions and targets."""
_input_validator(preds, target, iou_type=self.iou_type)
for item in preds:
detections = self._get_safe_item_values(item)
self.detections.append(detections)
self.detection_labels.append(item["labels"])
self.detection_scores.append(item["scores"])
for item in target:
groundtruths = self._get_safe_item_values(item)
self.groundtruths.append(groundtruths)
self.groundtruth_labels.append(item["labels"])
def _move_list_states_to_cpu(self) -> None:
"""Move list states to cpu to save GPU memory."""
for key in self._defaults:
current_val = getattr(self, key)
current_to_cpu = []
if isinstance(current_val, Sequence):
for cur_v in current_val:
# Cannot handle RLE as Tensor
if not isinstance(cur_v, tuple):
cur_v = cur_v.to("cpu")
current_to_cpu.append(cur_v)
setattr(self, key, current_to_cpu)
def _get_safe_item_values(self, item: Dict[str, Any]) -> Union[Tensor, Tuple]:
import pycocotools.mask as mask_utils
from torchvision.ops import box_convert
if self.iou_type == "bbox":
boxes = _fix_empty_tensors(item["boxes"])
if boxes.numel() > 0:
boxes = box_convert(boxes, in_fmt=self.box_format, out_fmt="xyxy")
return boxes
if self.iou_type == "segm":
masks = []
for i in item["masks"].cpu().numpy():
rle = mask_utils.encode(np.asfortranarray(i))
masks.append((tuple(rle["size"]), rle["counts"]))
return tuple(masks)
raise Exception(f"IOU type {self.iou_type} is not supported")
def _get_classes(self) -> List:
"""Return a list of unique classes found in ground truth and detection data."""
if len(self.detection_labels) > 0 or len(self.groundtruth_labels) > 0:
return torch.cat(self.detection_labels + self.groundtruth_labels).unique().tolist()
return []
def _compute_iou(self, idx: int, class_id: int, max_det: int) -> Tensor:
"""Compute the Intersection over Union (IoU) between bounding boxes for the given image and class.
Args:
idx:
Image Id, equivalent to the index of supplied samples
class_id:
Class Id of the supplied ground truth and detection labels
max_det:
Maximum number of evaluated detection bounding boxes
"""
# if self.iou_type == "bbox":
gt = self.groundtruths[idx]
det = self.detections[idx]
gt_label_mask = (self.groundtruth_labels[idx] == class_id).nonzero().squeeze(1)
det_label_mask = (self.detection_labels[idx] == class_id).nonzero().squeeze(1)
if len(gt_label_mask) == 0 or len(det_label_mask) == 0:
return Tensor([])
gt = [gt[i] for i in gt_label_mask]
det = [det[i] for i in det_label_mask]
if len(gt) == 0 or len(det) == 0:
return Tensor([])
# Sort by scores and use only max detections
scores = self.detection_scores[idx]
scores_filtered = scores[self.detection_labels[idx] == class_id]
inds = torch.argsort(scores_filtered, descending=True)
# TODO Fix (only for masks is necessary)
det = [det[i] for i in inds]
if len(det) > max_det:
det = det[:max_det]
return compute_iou(det, gt, self.iou_type).to(self.device)
def __evaluate_image_gt_no_preds(
self, gt: Tensor, gt_label_mask: Tensor, area_range: Tuple[int, int], num_iou_thrs: int
) -> Dict[str, Any]:
"""Evaluate images with a ground truth but no predictions."""
# GTs
gt = [gt[i] for i in gt_label_mask]
num_gt = len(gt)
areas = compute_area(gt, iou_type=self.iou_type).to(self.device)
ignore_area = (areas < area_range[0]) | (areas > area_range[1])
gt_ignore, _ = torch.sort(ignore_area.to(torch.uint8))
gt_ignore = gt_ignore.to(torch.bool)
# Detections
num_det = 0
det_ignore = torch.zeros((num_iou_thrs, num_det), dtype=torch.bool, device=self.device)
return {
"dtMatches": torch.zeros((num_iou_thrs, num_det), dtype=torch.bool, device=self.device),
"gtMatches": torch.zeros((num_iou_thrs, num_gt), dtype=torch.bool, device=self.device),
"dtScores": torch.zeros(num_det, dtype=torch.float32, device=self.device),
"gtIgnore": gt_ignore,
"dtIgnore": det_ignore,
}
def __evaluate_image_preds_no_gt(
self,
det: Tensor,
idx: int,
det_label_mask: Tensor,
max_det: int,
area_range: Tuple[int, int],
num_iou_thrs: int,
) -> Dict[str, Any]:
"""Evaluate images with a prediction but no ground truth."""
# GTs
num_gt = 0
gt_ignore = torch.zeros(num_gt, dtype=torch.bool, device=self.device)
# Detections
det = [det[i] for i in det_label_mask]
scores = self.detection_scores[idx]
scores_filtered = scores[det_label_mask]
scores_sorted, dtind = torch.sort(scores_filtered, descending=True)
det = [det[i] for i in dtind]
if len(det) > max_det:
det = det[:max_det]
num_det = len(det)
det_areas = compute_area(det, iou_type=self.iou_type).to(self.device)
det_ignore_area = (det_areas < area_range[0]) | (det_areas > area_range[1])
ar = det_ignore_area.reshape((1, num_det))
det_ignore = torch.repeat_interleave(ar, num_iou_thrs, 0)
return {
"dtMatches": torch.zeros((num_iou_thrs, num_det), dtype=torch.bool, device=self.device),
"gtMatches": torch.zeros((num_iou_thrs, num_gt), dtype=torch.bool, device=self.device),
"dtScores": scores_sorted.to(self.device),
"gtIgnore": gt_ignore.to(self.device),
"dtIgnore": det_ignore.to(self.device),
}
def _evaluate_image(
self, idx: int, class_id: int, area_range: Tuple[int, int], max_det: int, ious: dict
) -> Optional[dict]:
"""Perform evaluation for single class and image.
Args:
idx:
Image Id, equivalent to the index of supplied samples.
class_id:
Class Id of the supplied ground truth and detection labels.
area_range:
List of lower and upper bounding box area threshold.
max_det:
Maximum number of evaluated detection bounding boxes.
ious:
IoU results for image and class.
"""
gt = self.groundtruths[idx]
det = self.detections[idx]
gt_label_mask = (self.groundtruth_labels[idx] == class_id).nonzero().squeeze(1)
det_label_mask = (self.detection_labels[idx] == class_id).nonzero().squeeze(1)
# No Gt and No predictions --> ignore image
if len(gt_label_mask) == 0 and len(det_label_mask) == 0:
return None
num_iou_thrs = len(self.iou_thresholds)
# Some GT but no predictions
if len(gt_label_mask) > 0 and len(det_label_mask) == 0:
return self.__evaluate_image_gt_no_preds(gt, gt_label_mask, area_range, num_iou_thrs)
# Some predictions but no GT
if len(gt_label_mask) == 0 and len(det_label_mask) > 0:
return self.__evaluate_image_preds_no_gt(det, idx, det_label_mask, max_det, area_range, num_iou_thrs)
gt = [gt[i] for i in gt_label_mask]
det = [det[i] for i in det_label_mask]
if len(gt) == 0 and len(det) == 0:
return None
if isinstance(det, dict):
det = [det]
if isinstance(gt, dict):
gt = [gt]
areas = compute_area(gt, iou_type=self.iou_type).to(self.device)
ignore_area = torch.logical_or(areas < area_range[0], areas > area_range[1])
# sort dt highest score first, sort gt ignore last
ignore_area_sorted, gtind = torch.sort(ignore_area.to(torch.uint8))
# Convert to uint8 temporarily and back to bool, because "Sort currently does not support bool dtype on CUDA"
ignore_area_sorted = ignore_area_sorted.to(torch.bool).to(self.device)
gt = [gt[i] for i in gtind]
scores = self.detection_scores[idx]
scores_filtered = scores[det_label_mask]
scores_sorted, dtind = torch.sort(scores_filtered, descending=True)
det = [det[i] for i in dtind]
if len(det) > max_det:
det = det[:max_det]
# load computed ious
ious = ious[idx, class_id][:, gtind] if len(ious[idx, class_id]) > 0 else ious[idx, class_id]
num_iou_thrs = len(self.iou_thresholds)
num_gt = len(gt)
num_det = len(det)
gt_matches = torch.zeros((num_iou_thrs, num_gt), dtype=torch.bool, device=self.device)
det_matches = torch.zeros((num_iou_thrs, num_det), dtype=torch.bool, device=self.device)
gt_ignore = ignore_area_sorted
det_ignore = torch.zeros((num_iou_thrs, num_det), dtype=torch.bool, device=self.device)
if torch.numel(ious) > 0:
for idx_iou, t in enumerate(self.iou_thresholds):
for idx_det, _ in enumerate(det):
m = MeanAveragePrecision._find_best_gt_match(t, gt_matches, idx_iou, gt_ignore, ious, idx_det)
if m == -1:
continue
det_ignore[idx_iou, idx_det] = gt_ignore[m]
det_matches[idx_iou, idx_det] = 1
gt_matches[idx_iou, m] = 1
# set unmatched detections outside of area range to ignore
det_areas = compute_area(det, iou_type=self.iou_type).to(self.device)
det_ignore_area = (det_areas < area_range[0]) | (det_areas > area_range[1])
ar = det_ignore_area.reshape((1, num_det))
det_ignore = torch.logical_or(
det_ignore, torch.logical_and(det_matches == 0, torch.repeat_interleave(ar, num_iou_thrs, 0))
)
return {
"dtMatches": det_matches.to(self.device),
"gtMatches": gt_matches.to(self.device),
"dtScores": scores_sorted.to(self.device),
"gtIgnore": gt_ignore.to(self.device),
"dtIgnore": det_ignore.to(self.device),
}
@staticmethod
def _find_best_gt_match(
thr: int, gt_matches: Tensor, idx_iou: float, gt_ignore: Tensor, ious: Tensor, idx_det: int
) -> int:
"""Return id of best ground truth match with current detection.
Args:
thr:
Current threshold value.
gt_matches:
Tensor showing if a ground truth matches for threshold ``t`` exists.
idx_iou:
Id of threshold ``t``.
gt_ignore:
Tensor showing if ground truth should be ignored.
ious:
IoUs for all combinations of detection and ground truth.
idx_det:
Id of current detection.
"""
previously_matched = gt_matches[idx_iou]
# Remove previously matched or ignored gts
remove_mask = previously_matched | gt_ignore
gt_ious = ious[idx_det] * ~remove_mask
match_idx = gt_ious.argmax().item()
if gt_ious[match_idx] > thr:
return match_idx
return -1
def _summarize(
self,
results: Dict,
avg_prec: bool = True,
iou_threshold: Optional[float] = None,
area_range: str = "all",
max_dets: int = 100,
) -> Tensor:
"""Perform evaluation for single class and image.
Args:
results:
Dictionary including precision, recall and scores for all combinations.
avg_prec:
Calculate average precision. Else calculate average recall.
iou_threshold:
IoU threshold. If set to ``None`` it all values are used. Else results are filtered.
area_range:
Bounding box area range key.
max_dets:
Maximum detections.
"""
area_inds = [i for i, k in enumerate(self.bbox_area_ranges.keys()) if k == area_range]
mdet_inds = [i for i, k in enumerate(self.max_detection_thresholds) if k == max_dets]
if avg_prec:
# dimension of precision: [TxRxKxAxM]
prec = results["precision"]
# IoU
if iou_threshold is not None:
thr = self.iou_thresholds.index(iou_threshold)
prec = prec[thr, :, :, area_inds, mdet_inds]
else:
prec = prec[:, :, :, area_inds, mdet_inds]
else:
# dimension of recall: [TxKxAxM]
prec = results["recall"]
if iou_threshold is not None:
thr = self.iou_thresholds.index(iou_threshold)
prec = prec[thr, :, :, area_inds, mdet_inds]
else:
prec = prec[:, :, area_inds, mdet_inds]
return torch.tensor([-1.0]) if len(prec[prec > -1]) == 0 else torch.mean(prec[prec > -1])
def _calculate(self, class_ids: List) -> Tuple[MAPMetricResults, MARMetricResults]:
"""Calculate the precision and recall for all supplied classes to calculate mAP/mAR.
Args:
class_ids:
List of label class Ids.
"""
img_ids = range(len(self.groundtruths))
max_detections = self.max_detection_thresholds[-1]
area_ranges = self.bbox_area_ranges.values()
ious = {
(idx, class_id): self._compute_iou(idx, class_id, max_detections)
for idx in img_ids
for class_id in class_ids
}
eval_imgs = [
self._evaluate_image(img_id, class_id, area, max_detections, ious)
for class_id in class_ids
for area in area_ranges
for img_id in img_ids
]
num_iou_thrs = len(self.iou_thresholds)
num_rec_thrs = len(self.rec_thresholds)
num_classes = len(class_ids)
num_bbox_areas = len(self.bbox_area_ranges)
num_max_det_thrs = len(self.max_detection_thresholds)
num_imgs = len(img_ids)
precision = -torch.ones((num_iou_thrs, num_rec_thrs, num_classes, num_bbox_areas, num_max_det_thrs))
recall = -torch.ones((num_iou_thrs, num_classes, num_bbox_areas, num_max_det_thrs))
scores = -torch.ones((num_iou_thrs, num_rec_thrs, num_classes, num_bbox_areas, num_max_det_thrs))
# move tensors if necessary
rec_thresholds_tensor = torch.tensor(self.rec_thresholds)
# retrieve E at each category, area range, and max number of detections
for idx_cls, _ in enumerate(class_ids):
for idx_bbox_area, _ in enumerate(self.bbox_area_ranges):
for idx_max_det_thrs, max_det in enumerate(self.max_detection_thresholds):
recall, precision, scores = MeanAveragePrecision.__calculate_recall_precision_scores(
recall,
precision,
scores,
idx_cls=idx_cls,
idx_bbox_area=idx_bbox_area,
idx_max_det_thrs=idx_max_det_thrs,
eval_imgs=eval_imgs,
rec_thresholds=rec_thresholds_tensor,
max_det=max_det,
num_imgs=num_imgs,
num_bbox_areas=num_bbox_areas,
)
return precision, recall
def _summarize_results(self, precisions: Tensor, recalls: Tensor) -> Tuple[MAPMetricResults, MARMetricResults]:
"""Summarizes the precision and recall values to calculate mAP/mAR.
Args:
precisions:
Precision values for different thresholds
recalls:
Recall values for different thresholds
"""
results = {"precision": precisions, "recall": recalls}
map_metrics = MAPMetricResults()
last_max_det_thr = self.max_detection_thresholds[-1]
map_metrics.map = self._summarize(results, True, max_dets=last_max_det_thr)
if 0.5 in self.iou_thresholds:
map_metrics.map_50 = self._summarize(results, True, iou_threshold=0.5, max_dets=last_max_det_thr)
else:
map_metrics.map_50 = torch.tensor([-1])
if 0.75 in self.iou_thresholds:
map_metrics.map_75 = self._summarize(results, True, iou_threshold=0.75, max_dets=last_max_det_thr)
else:
map_metrics.map_75 = torch.tensor([-1])
map_metrics.map_small = self._summarize(results, True, area_range="small", max_dets=last_max_det_thr)
map_metrics.map_medium = self._summarize(results, True, area_range="medium", max_dets=last_max_det_thr)
map_metrics.map_large = self._summarize(results, True, area_range="large", max_dets=last_max_det_thr)
mar_metrics = MARMetricResults()
for max_det in self.max_detection_thresholds:
mar_metrics[f"mar_{max_det}"] = self._summarize(results, False, max_dets=max_det)
mar_metrics.mar_small = self._summarize(results, False, area_range="small", max_dets=last_max_det_thr)
mar_metrics.mar_medium = self._summarize(results, False, area_range="medium", max_dets=last_max_det_thr)
mar_metrics.mar_large = self._summarize(results, False, area_range="large", max_dets=last_max_det_thr)
return map_metrics, mar_metrics
@staticmethod
def __calculate_recall_precision_scores(
recall: Tensor,
precision: Tensor,
scores: Tensor,
idx_cls: int,
idx_bbox_area: int,
idx_max_det_thrs: int,
eval_imgs: list,
rec_thresholds: Tensor,
max_det: int,
num_imgs: int,
num_bbox_areas: int,
) -> Tuple[Tensor, Tensor, Tensor]:
num_rec_thrs = len(rec_thresholds)
idx_cls_pointer = idx_cls * num_bbox_areas * num_imgs
idx_bbox_area_pointer = idx_bbox_area * num_imgs
# Load all image evals for current class_id and area_range
img_eval_cls_bbox = [eval_imgs[idx_cls_pointer + idx_bbox_area_pointer + i] for i in range(num_imgs)]
img_eval_cls_bbox = [e for e in img_eval_cls_bbox if e is not None]
if not img_eval_cls_bbox:
return recall, precision, scores
det_scores = torch.cat([e["dtScores"][:max_det] for e in img_eval_cls_bbox])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
# Sort in PyTorch does not support bool types on CUDA (yet, 1.11.0)
dtype = torch.uint8 if det_scores.is_cuda and det_scores.dtype is torch.bool else det_scores.dtype
# Explicitly cast to uint8 to avoid error for bool inputs on CUDA to argsort
inds = torch.argsort(det_scores.to(dtype), descending=True)
det_scores_sorted = det_scores[inds]
det_matches = torch.cat([e["dtMatches"][:, :max_det] for e in img_eval_cls_bbox], axis=1)[:, inds]
det_ignore = torch.cat([e["dtIgnore"][:, :max_det] for e in img_eval_cls_bbox], axis=1)[:, inds]
gt_ignore = torch.cat([e["gtIgnore"] for e in img_eval_cls_bbox])
npig = torch.count_nonzero(gt_ignore == False) # noqa: E712
if npig == 0:
return recall, precision, scores
tps = torch.logical_and(det_matches, torch.logical_not(det_ignore))
fps = torch.logical_and(torch.logical_not(det_matches), torch.logical_not(det_ignore))
tp_sum = _cumsum(tps, dim=1, dtype=torch.float)
fp_sum = _cumsum(fps, dim=1, dtype=torch.float)
for idx, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp_len = len(tp)
rc = tp / npig
pr = tp / (fp + tp + torch.finfo(torch.float64).eps)
prec = torch.zeros((num_rec_thrs,))
score = torch.zeros((num_rec_thrs,))
recall[idx, idx_cls, idx_bbox_area, idx_max_det_thrs] = rc[-1] if tp_len else 0
# Remove zigzags for AUC
diff_zero = torch.zeros((1,), device=pr.device)
diff = torch.ones((1,), device=pr.device)
while not torch.all(diff == 0):
diff = torch.clamp(torch.cat(((pr[1:] - pr[:-1]), diff_zero), 0), min=0)
pr += diff
inds = torch.searchsorted(rc, rec_thresholds.to(rc.device), right=False)
num_inds = inds.argmax() if inds.max() >= tp_len else num_rec_thrs
inds = inds[:num_inds]
prec[:num_inds] = pr[inds]
score[:num_inds] = det_scores_sorted[inds]
precision[idx, :, idx_cls, idx_bbox_area, idx_max_det_thrs] = prec
scores[idx, :, idx_cls, idx_bbox_area, idx_max_det_thrs] = score
return recall, precision, scores
def compute(self) -> dict:
"""Compute metric."""
classes = self._get_classes()
precisions, recalls = self._calculate(classes)
map_val, mar_val = self._summarize_results(precisions, recalls)
# if class mode is enabled, evaluate metrics per class
map_per_class_values: Tensor = torch.tensor([-1.0])
mar_max_dets_per_class_values: Tensor = torch.tensor([-1.0])
if self.class_metrics:
map_per_class_list = []
mar_max_dets_per_class_list = []
for class_idx, _ in enumerate(classes):
cls_precisions = precisions[:, :, class_idx].unsqueeze(dim=2)
cls_recalls = recalls[:, class_idx].unsqueeze(dim=1)
cls_map, cls_mar = self._summarize_results(cls_precisions, cls_recalls)
map_per_class_list.append(cls_map.map)
mar_max_dets_per_class_list.append(cls_mar[f"mar_{self.max_detection_thresholds[-1]}"])
map_per_class_values = torch.tensor(map_per_class_list, dtype=torch.float)
mar_max_dets_per_class_values = torch.tensor(mar_max_dets_per_class_list, dtype=torch.float)
metrics = COCOMetricResults()
metrics.update(map_val)
metrics.update(mar_val)
metrics.map_per_class = map_per_class_values
metrics[f"mar_{self.max_detection_thresholds[-1]}_per_class"] = mar_max_dets_per_class_values
metrics.classes = torch.tensor(classes, dtype=torch.int)
return metrics
def _apply(self, fn: Callable) -> torch.nn.Module:
"""Custom apply function.
Excludes the detections and groundtruths from the casting when the iou_type is set to `segm` as the state is
no longer a tensor but a tuple.
"""
if self.iou_type == "segm":
this = super()._apply(fn, exclude_state=("detections", "groundtruths"))
else:
this = super()._apply(fn)
return this
def _sync_dist(self, dist_sync_fn: Optional[Callable] = None, process_group: Optional[Any] = None) -> None:
"""Custom sync function.
For the iou_type `segm` the detections and groundtruths are no longer tensors but tuples. Therefore, we need
to gather the list of tuples and then convert it back to a list of tuples.
"""
super()._sync_dist(dist_sync_fn=dist_sync_fn, process_group=process_group)
if self.iou_type == "segm":
self.detections = self._gather_tuple_list(self.detections, process_group)
self.groundtruths = self._gather_tuple_list(self.groundtruths, process_group)
@staticmethod
def _gather_tuple_list(list_to_gather: List[Tuple], process_group: Optional[Any] = None) -> List[Any]:
"""Gather a list of tuples over multiple devices."""
world_size = dist.get_world_size(group=process_group)
dist.barrier(group=process_group)
list_gathered = [None for _ in range(world_size)]
dist.all_gather_object(list_gathered, list_to_gather, group=process_group)
return [list_gathered[rank][idx] for idx in range(len(list_gathered[0])) for rank in range(world_size)]
def plot(
self, val: Optional[Union[Dict[str, Tensor], Sequence[Dict[str, Tensor]]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import tensor
>>> from torchmetrics.detection.mean_ap import MeanAveragePrecision
>>> preds = [dict(
... boxes=tensor([[258.0, 41.0, 606.0, 285.0]]),
... scores=tensor([0.536]),
... labels=tensor([0]),
... )]
>>> target = [dict(
... boxes=tensor([[214.0, 41.0, 562.0, 285.0]]),
... labels=tensor([0]),
... )]
>>> metric = MeanAveragePrecision()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.detection.mean_ap import MeanAveragePrecision
>>> preds = lambda: [dict(
... boxes=torch.tensor([[258.0, 41.0, 606.0, 285.0]]) + torch.randint(10, (1,4)),
... scores=torch.tensor([0.536]) + 0.1*torch.rand(1),
... labels=torch.tensor([0]),
... )]
>>> target = [dict(
... boxes=torch.tensor([[214.0, 41.0, 562.0, 285.0]]),
... labels=torch.tensor([0]),
... )]
>>> metric = MeanAveragePrecision()
>>> vals = []
>>> for _ in range(20):
... vals.append(metric(preds(), target))
>>> fig_, ax_ = metric.plot(vals)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/detection/panoptic_qualities.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Collection, Optional, Sequence, Union
import torch
from torch import Tensor
from torchmetrics.functional.detection._panoptic_quality_common import (
_get_category_id_to_continuous_id,
_get_void_color,
_panoptic_quality_compute,
_panoptic_quality_update,
_parse_categories,
_prepocess_inputs,
_validate_inputs,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["PanopticQuality.plot", "ModifiedPanopticQuality.plot"]
class PanopticQuality(Metric):
r"""Compute the `Panoptic Quality`_ for panoptic segmentations.
.. math::
PQ = \frac{IOU}{TP + 0.5 FP + 0.5 FN}
where IOU, TP, FP and FN are respectively the sum of the intersection over union for true positives,
the number of true positives, false positives and false negatives. This metric is inspired by the PQ
implementation of panopticapi, a standard implementation for the PQ metric for panoptic segmentation.
.. note:
Points in the target tensor that do not map to a known category ID are automatically ignored in the metric
computation.
Args:
things:
Set of ``category_id`` for countable things.
stuffs:
Set of ``category_id`` for uncountable stuffs.
allow_unknown_preds_category:
Boolean flag to specify if unknown categories in the predictions are to be ignored in the metric
computation or raise an exception when found.
Raises:
ValueError:
If ``things``, ``stuffs`` have at least one common ``category_id``.
TypeError:
If ``things``, ``stuffs`` contain non-integer ``category_id``.
Example:
>>> from torch import tensor
>>> from torchmetrics.detection import PanopticQuality
>>> preds = tensor([[[[6, 0], [0, 0], [6, 0], [6, 0]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [7, 0], [6, 0], [1, 0]],
... [[0, 0], [7, 0], [7, 0], [7, 0]]]])
>>> target = tensor([[[[6, 0], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [1, 0]],
... [[0, 1], [7, 0], [1, 0], [1, 0]],
... [[0, 1], [7, 0], [7, 0], [7, 0]]]])
>>> panoptic_quality = PanopticQuality(things = {0, 1}, stuffs = {6, 7})
>>> panoptic_quality(preds, target)
tensor(0.5463, dtype=torch.float64)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
iou_sum: Tensor
true_positives: Tensor
false_positives: Tensor
false_negatives: Tensor
def __init__(
self,
things: Collection[int],
stuffs: Collection[int],
allow_unknown_preds_category: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
things, stuffs = _parse_categories(things, stuffs)
self.things = things
self.stuffs = stuffs
self.void_color = _get_void_color(things, stuffs)
self.cat_id_to_continuous_id = _get_category_id_to_continuous_id(things, stuffs)
self.allow_unknown_preds_category = allow_unknown_preds_category
# per category intermediate metrics
num_categories = len(things) + len(stuffs)
self.add_state("iou_sum", default=torch.zeros(num_categories, dtype=torch.double), dist_reduce_fx="sum")
self.add_state("true_positives", default=torch.zeros(num_categories, dtype=torch.int), dist_reduce_fx="sum")
self.add_state("false_positives", default=torch.zeros(num_categories, dtype=torch.int), dist_reduce_fx="sum")
self.add_state("false_negatives", default=torch.zeros(num_categories, dtype=torch.int), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
r"""Update state with predictions and targets.
Args:
preds: panoptic detection of shape ``[batch, *spatial_dims, 2]`` containing
the pair ``(category_id, instance_id)`` for each point.
If the ``category_id`` refer to a stuff, the instance_id is ignored.
target: ground truth of shape ``[batch, *spatial_dims, 2]`` containing
the pair ``(category_id, instance_id)`` for each pixel of the image.
If the ``category_id`` refer to a stuff, the instance_id is ignored.
Raises:
TypeError:
If ``preds`` or ``target`` is not an ``torch.Tensor``.
ValueError:
If ``preds`` and ``target`` have different shape.
ValueError:
If ``preds`` has less than 3 dimensions.
ValueError:
If the final dimension of ``preds`` has size != 2.
"""
_validate_inputs(preds, target)
flatten_preds = _prepocess_inputs(
self.things, self.stuffs, preds, self.void_color, self.allow_unknown_preds_category
)
flatten_target = _prepocess_inputs(self.things, self.stuffs, target, self.void_color, True)
iou_sum, true_positives, false_positives, false_negatives = _panoptic_quality_update(
flatten_preds, flatten_target, self.cat_id_to_continuous_id, self.void_color
)
self.iou_sum += iou_sum
self.true_positives += true_positives
self.false_positives += false_positives
self.false_negatives += false_negatives
def compute(self) -> Tensor:
"""Compute panoptic quality based on inputs passed in to ``update`` previously."""
return _panoptic_quality_compute(self.iou_sum, self.true_positives, self.false_positives, self.false_negatives)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import tensor
>>> from torchmetrics.detection import PanopticQuality
>>> preds = tensor([[[[6, 0], [0, 0], [6, 0], [6, 0]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [7, 0], [6, 0], [1, 0]],
... [[0, 0], [7, 0], [7, 0], [7, 0]]]])
>>> target = tensor([[[[6, 0], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [1, 0]],
... [[0, 1], [7, 0], [1, 0], [1, 0]],
... [[0, 1], [7, 0], [7, 0], [7, 0]]]])
>>> metric = PanopticQuality(things = {0, 1}, stuffs = {6, 7})
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torch import tensor
>>> from torchmetrics.detection import PanopticQuality
>>> preds = tensor([[[[6, 0], [0, 0], [6, 0], [6, 0]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [7, 0], [6, 0], [1, 0]],
... [[0, 0], [7, 0], [7, 0], [7, 0]]]])
>>> target = tensor([[[[6, 0], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [1, 0]],
... [[0, 1], [7, 0], [1, 0], [1, 0]],
... [[0, 1], [7, 0], [7, 0], [7, 0]]]])
>>> metric = PanopticQuality(things = {0, 1}, stuffs = {6, 7})
>>> vals = []
>>> for _ in range(20):
... vals.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(vals)
"""
return self._plot(val, ax)
class ModifiedPanopticQuality(Metric):
r"""Compute `Modified Panoptic Quality`_ for panoptic segmentations.
The metric was introduced in `Seamless Scene Segmentation paper`_, and is an adaptation of the original
`Panoptic Quality`_ where the metric for a stuff class is computed as
.. math::
PQ^{\dagger}_c = \frac{IOU_c}{|S_c|}
where :math:`IOU_c` is the sum of the intersection over union of all matching segments for a given class, and
:math:`|S_c|` is the overall number of segments in the ground truth for that class.
.. note:
Points in the target tensor that do not map to a known category ID are automatically ignored in the metric
computation.
Args:
things:
Set of ``category_id`` for countable things.
stuffs:
Set of ``category_id`` for uncountable stuffs.
allow_unknown_preds_category:
Boolean flag to specify if unknown categories in the predictions are to be ignored in the metric
computation or raise an exception when found.
Raises:
ValueError:
If ``things``, ``stuffs`` have at least one common ``category_id``.
TypeError:
If ``things``, ``stuffs`` contain non-integer ``category_id``.
Example:
>>> from torch import tensor
>>> from torchmetrics.detection import ModifiedPanopticQuality
>>> preds = tensor([[[0, 0], [0, 1], [6, 0], [7, 0], [0, 2], [1, 0]]])
>>> target = tensor([[[0, 1], [0, 0], [6, 0], [7, 0], [6, 0], [255, 0]]])
>>> pq_modified = ModifiedPanopticQuality(things = {0, 1}, stuffs = {6, 7})
>>> pq_modified(preds, target)
tensor(0.7667, dtype=torch.float64)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
iou_sum: Tensor
true_positives: Tensor
false_positives: Tensor
false_negatives: Tensor
def __init__(
self,
things: Collection[int],
stuffs: Collection[int],
allow_unknown_preds_category: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
things, stuffs = _parse_categories(things, stuffs)
self.things = things
self.stuffs = stuffs
self.void_color = _get_void_color(things, stuffs)
self.cat_id_to_continuous_id = _get_category_id_to_continuous_id(things, stuffs)
self.allow_unknown_preds_category = allow_unknown_preds_category
# per category intermediate metrics
num_categories = len(things) + len(stuffs)
self.add_state("iou_sum", default=torch.zeros(num_categories, dtype=torch.double), dist_reduce_fx="sum")
self.add_state("true_positives", default=torch.zeros(num_categories, dtype=torch.int), dist_reduce_fx="sum")
self.add_state("false_positives", default=torch.zeros(num_categories, dtype=torch.int), dist_reduce_fx="sum")
self.add_state("false_negatives", default=torch.zeros(num_categories, dtype=torch.int), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
r"""Update state with predictions and targets.
Args:
preds: panoptic detection of shape ``[batch, *spatial_dims, 2]`` containing
the pair ``(category_id, instance_id)`` for each point.
If the ``category_id`` refer to a stuff, the instance_id is ignored.
target: ground truth of shape ``[batch, *spatial_dims, 2]`` containing
the pair ``(category_id, instance_id)`` for each pixel of the image.
If the ``category_id`` refer to a stuff, the instance_id is ignored.
Raises:
TypeError:
If ``preds`` or ``target`` is not an ``torch.Tensor``.
ValueError:
If ``preds`` and ``target`` have different shape.
ValueError:
If ``preds`` has less than 3 dimensions.
ValueError:
If the final dimension of ``preds`` has size != 2.
"""
_validate_inputs(preds, target)
flatten_preds = _prepocess_inputs(
self.things, self.stuffs, preds, self.void_color, self.allow_unknown_preds_category
)
flatten_target = _prepocess_inputs(self.things, self.stuffs, target, self.void_color, True)
iou_sum, true_positives, false_positives, false_negatives = _panoptic_quality_update(
flatten_preds,
flatten_target,
self.cat_id_to_continuous_id,
self.void_color,
modified_metric_stuffs=self.stuffs,
)
self.iou_sum += iou_sum
self.true_positives += true_positives
self.false_positives += false_positives
self.false_negatives += false_negatives
def compute(self) -> Tensor:
"""Compute panoptic quality based on inputs passed in to ``update`` previously."""
return _panoptic_quality_compute(self.iou_sum, self.true_positives, self.false_positives, self.false_negatives)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import tensor
>>> from torchmetrics.detection import ModifiedPanopticQuality
>>> preds = tensor([[[[6, 0], [0, 0], [6, 0], [6, 0]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [7, 0], [6, 0], [1, 0]],
... [[0, 0], [7, 0], [7, 0], [7, 0]]]])
>>> target = tensor([[[[6, 0], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [1, 0]],
... [[0, 1], [7, 0], [1, 0], [1, 0]],
... [[0, 1], [7, 0], [7, 0], [7, 0]]]])
>>> metric = ModifiedPanopticQuality(things = {0, 1}, stuffs = {6, 7})
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torch import tensor
>>> from torchmetrics.detection import ModifiedPanopticQuality
>>> preds = tensor([[[[6, 0], [0, 0], [6, 0], [6, 0]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [7, 0], [6, 0], [1, 0]],
... [[0, 0], [7, 0], [7, 0], [7, 0]]]])
>>> target = tensor([[[[6, 0], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [1, 0]],
... [[0, 1], [7, 0], [1, 0], [1, 0]],
... [[0, 1], [7, 0], [7, 0], [7, 0]]]])
>>> metric = ModifiedPanopticQuality(things = {0, 1}, stuffs = {6, 7})
>>> vals = []
>>> for _ in range(20):
... vals.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(vals)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/detection/_deprecated.py | from typing import Any, Collection
from torchmetrics.detection import ModifiedPanopticQuality, PanopticQuality
from torchmetrics.utilities.prints import _deprecated_root_import_class
class _ModifiedPanopticQuality(ModifiedPanopticQuality):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([[[0, 0], [0, 1], [6, 0], [7, 0], [0, 2], [1, 0]]])
>>> target = tensor([[[0, 1], [0, 0], [6, 0], [7, 0], [6, 0], [255, 0]]])
>>> pq_modified = _ModifiedPanopticQuality(things = {0, 1}, stuffs = {6, 7})
>>> pq_modified(preds, target)
tensor(0.7667, dtype=torch.float64)
"""
def __init__(
self,
things: Collection[int],
stuffs: Collection[int],
allow_unknown_preds_category: bool = False,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("ModifiedPanopticQuality", "detection")
super().__init__(
things=things, stuffs=stuffs, allow_unknown_preds_category=allow_unknown_preds_category, **kwargs
)
class _PanopticQuality(PanopticQuality):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([[[[6, 0], [0, 0], [6, 0], [6, 0]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [7, 0], [6, 0], [1, 0]],
... [[0, 0], [7, 0], [7, 0], [7, 0]]]])
>>> target = tensor([[[[6, 0], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [1, 0]],
... [[0, 1], [7, 0], [1, 0], [1, 0]],
... [[0, 1], [7, 0], [7, 0], [7, 0]]]])
>>> panoptic_quality = _PanopticQuality(things = {0, 1}, stuffs = {6, 7})
>>> panoptic_quality(preds, target)
tensor(0.5463, dtype=torch.float64)
"""
def __init__(
self,
things: Collection[int],
stuffs: Collection[int],
allow_unknown_preds_category: bool = False,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("PanopticQuality", "detection")
super().__init__(
things=things, stuffs=stuffs, allow_unknown_preds_category=allow_unknown_preds_category, **kwargs
)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/detection/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.detection.panoptic_qualities import ModifiedPanopticQuality, PanopticQuality
from torchmetrics.utilities.imports import (
_TORCHVISION_GREATER_EQUAL_0_8,
_TORCHVISION_GREATER_EQUAL_0_13,
)
__all__ = ["ModifiedPanopticQuality", "PanopticQuality"]
if _TORCHVISION_GREATER_EQUAL_0_8:
from torchmetrics.detection.giou import GeneralizedIntersectionOverUnion
from torchmetrics.detection.iou import IntersectionOverUnion
from torchmetrics.detection.mean_ap import MeanAveragePrecision
__all__ += ["MeanAveragePrecision", "GeneralizedIntersectionOverUnion", "IntersectionOverUnion"]
if _TORCHVISION_GREATER_EQUAL_0_13:
from torchmetrics.detection.ciou import CompleteIntersectionOverUnion
from torchmetrics.detection.diou import DistanceIntersectionOverUnion
__all__ += ["CompleteIntersectionOverUnion", "DistanceIntersectionOverUnion"]
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/detection/mean_ap.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import io
import json
from types import ModuleType
from typing import Any, Callable, ClassVar, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from lightning_utilities import apply_to_collection
from torch import Tensor
from torch import distributed as dist
from typing_extensions import Literal
from torchmetrics.detection.helpers import _fix_empty_tensors, _input_validator, _validate_iou_type_arg
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.imports import (
_FASTER_COCO_EVAL_AVAILABLE,
_MATPLOTLIB_AVAILABLE,
_PYCOCOTOOLS_AVAILABLE,
_TORCHVISION_GREATER_EQUAL_0_8,
)
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MeanAveragePrecision.plot"]
if not _TORCHVISION_GREATER_EQUAL_0_8 or not (_PYCOCOTOOLS_AVAILABLE or _FASTER_COCO_EVAL_AVAILABLE):
__doctest_skip__ = [
"MeanAveragePrecision.plot",
"MeanAveragePrecision",
"MeanAveragePrecision.tm_to_coco",
"MeanAveragePrecision.coco_to_tm",
]
def _load_backend_tools(backend: Literal["pycocotools", "faster_coco_eval"]) -> Tuple[object, object, ModuleType]:
"""Load the backend tools for the given backend."""
if backend == "pycocotools":
if not _PYCOCOTOOLS_AVAILABLE:
raise ModuleNotFoundError(
"Backend `pycocotools` in metric `MeanAveragePrecision` metric requires that `pycocotools` is"
" installed. Please install with `pip install pycocotools` or `pip install torchmetrics[detection]`"
)
import pycocotools.mask as mask_utils
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
return COCO, COCOeval, mask_utils
if not _FASTER_COCO_EVAL_AVAILABLE:
raise ModuleNotFoundError(
"Backend `faster_coco_eval` in metric `MeanAveragePrecision` metric requires that `faster-coco-eval` is"
" installed. Please install with `pip install faster-coco-eval`."
)
from faster_coco_eval import COCO
from faster_coco_eval import COCOeval_faster as COCOeval
from faster_coco_eval.core import mask as mask_utils
return COCO, COCOeval, mask_utils
class MeanAveragePrecision(Metric):
r"""Compute the `Mean-Average-Precision (mAP) and Mean-Average-Recall (mAR)`_ for object detection predictions.
.. math::
\text{mAP} = \frac{1}{n} \sum_{i=1}^{n} AP_i
where :math:`AP_i` is the average precision for class :math:`i` and :math:`n` is the number of classes. The average
precision is defined as the area under the precision-recall curve. For object detection the recall and precision are
defined based on the intersection of union (IoU) between the predicted bounding boxes and the ground truth bounding
boxes e.g. if two boxes have an IoU > t (with t being some threshold) they are considered a match and therefore
considered a true positive. The precision is then defined as the number of true positives divided by the number of
all detected boxes and the recall is defined as the number of true positives divided by the number of all ground
boxes.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~List`): A list consisting of dictionaries each containing the key-values
(each dictionary corresponds to a single image). Parameters that should be provided per dict
- ``boxes`` (:class:`~torch.Tensor`): float tensor of shape ``(num_boxes, 4)`` containing ``num_boxes``
detection boxes of the format specified in the constructor.
By default, this method expects ``(xmin, ymin, xmax, ymax)`` in absolute image coordinates, but can be changed
using the ``box_format`` parameter. Only required when `iou_type="bbox"`.
- ``scores`` (:class:`~torch.Tensor`): float tensor of shape ``(num_boxes)`` containing detection scores for the
boxes.
- ``labels`` (:class:`~torch.Tensor`): integer tensor of shape ``(num_boxes)`` containing 0-indexed detection
classes for the boxes.
- ``masks`` (:class:`~torch.Tensor`): boolean tensor of shape ``(num_boxes, image_height, image_width)``
containing boolean masks. Only required when `iou_type="segm"`.
- ``target`` (:class:`~List`): A list consisting of dictionaries each containing the key-values
(each dictionary corresponds to a single image). Parameters that should be provided per dict:
- ``boxes`` (:class:`~torch.Tensor`): float tensor of shape ``(num_boxes, 4)`` containing ``num_boxes`` ground
truth boxes of the format specified in the constructor. only required when `iou_type="bbox"`.
By default, this method expects ``(xmin, ymin, xmax, ymax)`` in absolute image coordinates.
- ``labels`` (:class:`~torch.Tensor`): integer tensor of shape ``(num_boxes)`` containing 0-indexed ground truth
classes for the boxes.
- ``masks`` (:class:`~torch.Tensor`): boolean tensor of shape ``(num_boxes, image_height, image_width)``
containing boolean masks. Only required when `iou_type="segm"`.
- ``iscrowd`` (:class:`~torch.Tensor`): integer tensor of shape ``(num_boxes)`` containing 0/1 values indicating
whether the bounding box/masks indicate a crowd of objects. Value is optional, and if not provided it will
automatically be set to 0.
- ``area`` (:class:`~torch.Tensor`): float tensor of shape ``(num_boxes)`` containing the area of the object.
Value is optional, and if not provided will be automatically calculated based on the bounding box/masks
provided. Only affects which samples contribute to the `map_small`, `map_medium`, `map_large` values
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``map_dict``: A dictionary containing the following key-values:
- map: (:class:`~torch.Tensor`), global mean average precision
- map_small: (:class:`~torch.Tensor`), mean average precision for small objects
- map_medium:(:class:`~torch.Tensor`), mean average precision for medium objects
- map_large: (:class:`~torch.Tensor`), mean average precision for large objects
- mar_1: (:class:`~torch.Tensor`), mean average recall for 1 detection per image
- mar_10: (:class:`~torch.Tensor`), mean average recall for 10 detections per image
- mar_100: (:class:`~torch.Tensor`), mean average recall for 100 detections per image
- mar_small: (:class:`~torch.Tensor`), mean average recall for small objects
- mar_medium: (:class:`~torch.Tensor`), mean average recall for medium objects
- mar_large: (:class:`~torch.Tensor`), mean average recall for large objects
- map_50: (:class:`~torch.Tensor`) (-1 if 0.5 not in the list of iou thresholds), mean average precision at
IoU=0.50
- map_75: (:class:`~torch.Tensor`) (-1 if 0.75 not in the list of iou thresholds), mean average precision at
IoU=0.75
- map_per_class: (:class:`~torch.Tensor`) (-1 if class metrics are disabled), mean average precision per
observed class
- mar_100_per_class: (:class:`~torch.Tensor`) (-1 if class metrics are disabled), mean average recall for 100
detections per image per observed class
- classes (:class:`~torch.Tensor`), list of all observed classes
For an example on how to use this metric check the `torchmetrics mAP example`_.
.. note::
``map`` score is calculated with @[ IoU=self.iou_thresholds | area=all | max_dets=max_detection_thresholds ].
Caution: If the initialization parameters are changed, dictionary keys for mAR can change as well.
.. note::
This metric supports, at the moment, two different backends for the evaluation. The default backend is
``"pycocotools"``, which either require the official `pycocotools`_ implementation or this
`fork of pycocotools`_ to be installed. We recommend using the fork as it is better maintained and easily
available to install via pip: `pip install pycocotools`. It is also this fork that will be installed if you
install ``torchmetrics[detection]``. The second backend is the `faster-coco-eval`_ implementation, which can be
installed with ``pip install faster-coco-eval``. This implementation is a maintained open-source implementation
that is faster and corrects certain corner cases that the official implementation has. Our own testing has shown
that the results are identical to the official implementation. Regardless of the backend we also require you to
have `torchvision` version 0.8.0 or newer installed. Please install with ``pip install torchvision>=0.8`` or
``pip install torchmetrics[detection]``.
Args:
box_format:
Input format of given boxes. Supported formats are:
- 'xyxy': boxes are represented via corners, x1, y1 being top left and x2, y2 being bottom right.
- 'xywh' : boxes are represented via corner, width and height, x1, y2 being top left, w, h being
width and height. This is the default format used by pycoco and all input formats will be converted
to this.
- 'cxcywh': boxes are represented via centre, width and height, cx, cy being center of box, w, h being
width and height.
iou_type:
Type of input (either masks or bounding-boxes) used for computing IOU. Supported IOU types are
``"bbox"`` or ``"segm"`` or both as a tuple.
iou_thresholds:
IoU thresholds for evaluation. If set to ``None`` it corresponds to the stepped range ``[0.5,...,0.95]``
with step ``0.05``. Else provide a list of floats.
rec_thresholds:
Recall thresholds for evaluation. If set to ``None`` it corresponds to the stepped range ``[0,...,1]``
with step ``0.01``. Else provide a list of floats.
max_detection_thresholds:
Thresholds on max detections per image. If set to `None` will use thresholds ``[1, 10, 100]``.
Else, please provide a list of ints.
class_metrics:
Option to enable per-class metrics for mAP and mAR_100. Has a performance impact that scales linearly with
the number of classes in the dataset.
extended_summary:
Option to enable extended summary with additional metrics including IOU, precision and recall. The output
dictionary will contain the following extra key-values:
- ``ious``: a dictionary containing the IoU values for every image/class combination e.g.
``ious[(0,0)]`` would contain the IoU for image 0 and class 0. Each value is a tensor with shape
``(n,m)`` where ``n`` is the number of detections and ``m`` is the number of ground truth boxes for
that image/class combination.
- ``precision``: a tensor of shape ``(TxRxKxAxM)`` containing the precision values. Here ``T`` is the
number of IoU thresholds, ``R`` is the number of recall thresholds, ``K`` is the number of classes,
``A`` is the number of areas and ``M`` is the number of max detections per image.
- ``recall``: a tensor of shape ``(TxKxAxM)`` containing the recall values. Here ``T`` is the number of
IoU thresholds, ``K`` is the number of classes, ``A`` is the number of areas and ``M`` is the number
of max detections per image.
average:
Method for averaging scores over labels. Choose between "``"macro"`` and ``"micro"``.
backend:
Backend to use for the evaluation. Choose between ``"pycocotools"`` and ``"faster_coco_eval"``.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ModuleNotFoundError:
If ``pycocotools`` is not installed
ModuleNotFoundError:
If ``torchvision`` is not installed or version installed is lower than 0.8.0
ValueError:
If ``box_format`` is not one of ``"xyxy"``, ``"xywh"`` or ``"cxcywh"``
ValueError:
If ``iou_type`` is not one of ``"bbox"`` or ``"segm"``
ValueError:
If ``iou_thresholds`` is not None or a list of floats
ValueError:
If ``rec_thresholds`` is not None or a list of floats
ValueError:
If ``max_detection_thresholds`` is not None or a list of ints
ValueError:
If ``class_metrics`` is not a boolean
Example::
Basic example for when `iou_type="bbox"`. In this case the ``boxes`` key is required in the input dictionaries,
in addition to the ``scores`` and ``labels`` keys.
>>> from torch import tensor
>>> from torchmetrics.detection import MeanAveragePrecision
>>> preds = [
... dict(
... boxes=tensor([[258.0, 41.0, 606.0, 285.0]]),
... scores=tensor([0.536]),
... labels=tensor([0]),
... )
... ]
>>> target = [
... dict(
... boxes=tensor([[214.0, 41.0, 562.0, 285.0]]),
... labels=tensor([0]),
... )
... ]
>>> metric = MeanAveragePrecision(iou_type="bbox")
>>> metric.update(preds, target)
>>> from pprint import pprint
>>> pprint(metric.compute())
{'classes': tensor(0, dtype=torch.int32),
'map': tensor(0.6000),
'map_50': tensor(1.),
'map_75': tensor(1.),
'map_large': tensor(0.6000),
'map_medium': tensor(-1.),
'map_per_class': tensor(-1.),
'map_small': tensor(-1.),
'mar_1': tensor(0.6000),
'mar_10': tensor(0.6000),
'mar_100': tensor(0.6000),
'mar_100_per_class': tensor(-1.),
'mar_large': tensor(0.6000),
'mar_medium': tensor(-1.),
'mar_small': tensor(-1.)}
Example::
Basic example for when `iou_type="segm"`. In this case the ``masks`` key is required in the input dictionaries,
in addition to the ``scores`` and ``labels`` keys.
>>> from torch import tensor
>>> from torchmetrics.detection import MeanAveragePrecision
>>> mask_pred = [
... [0, 0, 0, 0, 0],
... [0, 0, 1, 1, 0],
... [0, 0, 1, 1, 0],
... [0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0],
... ]
>>> mask_tgt = [
... [0, 0, 0, 0, 0],
... [0, 0, 1, 0, 0],
... [0, 0, 1, 1, 0],
... [0, 0, 1, 0, 0],
... [0, 0, 0, 0, 0],
... ]
>>> preds = [
... dict(
... masks=tensor([mask_pred], dtype=torch.bool),
... scores=tensor([0.536]),
... labels=tensor([0]),
... )
... ]
>>> target = [
... dict(
... masks=tensor([mask_tgt], dtype=torch.bool),
... labels=tensor([0]),
... )
... ]
>>> metric = MeanAveragePrecision(iou_type="segm")
>>> metric.update(preds, target)
>>> from pprint import pprint
>>> pprint(metric.compute())
{'classes': tensor(0, dtype=torch.int32),
'map': tensor(0.2000),
'map_50': tensor(1.),
'map_75': tensor(0.),
'map_large': tensor(-1.),
'map_medium': tensor(-1.),
'map_per_class': tensor(-1.),
'map_small': tensor(0.2000),
'mar_1': tensor(0.2000),
'mar_10': tensor(0.2000),
'mar_100': tensor(0.2000),
'mar_100_per_class': tensor(-1.),
'mar_large': tensor(-1.),
'mar_medium': tensor(-1.),
'mar_small': tensor(0.2000)}
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = True
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
detection_box: List[Tensor]
detection_mask: List[Tensor]
detection_scores: List[Tensor]
detection_labels: List[Tensor]
groundtruth_box: List[Tensor]
groundtruth_mask: List[Tensor]
groundtruth_labels: List[Tensor]
groundtruth_crowds: List[Tensor]
groundtruth_area: List[Tensor]
warn_on_many_detections: bool = True
__jit_unused_properties__: ClassVar[List[str]] = [
"is_differentiable",
"higher_is_better",
"plot_lower_bound",
"plot_upper_bound",
"plot_legend_name",
"metric_state",
"_update_called",
# below is added for specifically for this metric
"coco",
"cocoeval",
"mask_utils",
]
def __init__(
self,
box_format: Literal["xyxy", "xywh", "cxcywh"] = "xyxy",
iou_type: Union[Literal["bbox", "segm"], Tuple[str]] = "bbox",
iou_thresholds: Optional[List[float]] = None,
rec_thresholds: Optional[List[float]] = None,
max_detection_thresholds: Optional[List[int]] = None,
class_metrics: bool = False,
extended_summary: bool = False,
average: Literal["macro", "micro"] = "macro",
backend: Literal["pycocotools", "faster_coco_eval"] = "pycocotools",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not (_PYCOCOTOOLS_AVAILABLE or _FASTER_COCO_EVAL_AVAILABLE):
raise ModuleNotFoundError(
"`MAP` metric requires that `pycocotools` or `faster-coco-eval` installed."
" Please install with `pip install pycocotools` or `pip install faster-coco-eval` or"
" `pip install torchmetrics[detection]`."
)
if not _TORCHVISION_GREATER_EQUAL_0_8:
raise ModuleNotFoundError(
"`MeanAveragePrecision` metric requires that `torchvision` version 0.8.0 or newer is installed."
" Please install with `pip install torchvision>=0.8` or `pip install torchmetrics[detection]`."
)
allowed_box_formats = ("xyxy", "xywh", "cxcywh")
if box_format not in allowed_box_formats:
raise ValueError(f"Expected argument `box_format` to be one of {allowed_box_formats} but got {box_format}")
self.box_format = box_format
self.iou_type = _validate_iou_type_arg(iou_type)
if iou_thresholds is not None and not isinstance(iou_thresholds, list):
raise ValueError(
f"Expected argument `iou_thresholds` to either be `None` or a list of floats but got {iou_thresholds}"
)
self.iou_thresholds = iou_thresholds or torch.linspace(0.5, 0.95, round((0.95 - 0.5) / 0.05) + 1).tolist()
if rec_thresholds is not None and not isinstance(rec_thresholds, list):
raise ValueError(
f"Expected argument `rec_thresholds` to either be `None` or a list of floats but got {rec_thresholds}"
)
self.rec_thresholds = rec_thresholds or torch.linspace(0.0, 1.00, round(1.00 / 0.01) + 1).tolist()
if max_detection_thresholds is not None and not isinstance(max_detection_thresholds, list):
raise ValueError(
f"Expected argument `max_detection_thresholds` to either be `None` or a list of ints"
f" but got {max_detection_thresholds}"
)
max_det_thr, _ = torch.sort(torch.tensor(max_detection_thresholds or [1, 10, 100], dtype=torch.int))
self.max_detection_thresholds = max_det_thr.tolist()
if not isinstance(class_metrics, bool):
raise ValueError("Expected argument `class_metrics` to be a boolean")
self.class_metrics = class_metrics
if not isinstance(extended_summary, bool):
raise ValueError("Expected argument `extended_summary` to be a boolean")
self.extended_summary = extended_summary
if average not in ("macro", "micro"):
raise ValueError(f"Expected argument `average` to be one of ('macro', 'micro') but got {average}")
self.average = average
if backend not in ("pycocotools", "faster_coco_eval"):
raise ValueError(
f"Expected argument `backend` to be one of ('pycocotools', 'faster_coco_eval') but got {backend}"
)
self.backend = backend
self.add_state("detection_box", default=[], dist_reduce_fx=None)
self.add_state("detection_mask", default=[], dist_reduce_fx=None)
self.add_state("detection_scores", default=[], dist_reduce_fx=None)
self.add_state("detection_labels", default=[], dist_reduce_fx=None)
self.add_state("groundtruth_box", default=[], dist_reduce_fx=None)
self.add_state("groundtruth_mask", default=[], dist_reduce_fx=None)
self.add_state("groundtruth_labels", default=[], dist_reduce_fx=None)
self.add_state("groundtruth_crowds", default=[], dist_reduce_fx=None)
self.add_state("groundtruth_area", default=[], dist_reduce_fx=None)
@property
def coco(self) -> object:
"""Returns the coco module for the given backend, done in this way to make metric picklable."""
coco, _, _ = _load_backend_tools(self.backend)
return coco
@property
def cocoeval(self) -> object:
"""Returns the coco eval module for the given backend, done in this way to make metric picklable."""
_, cocoeval, _ = _load_backend_tools(self.backend)
return cocoeval
@property
def mask_utils(self) -> object:
"""Returns the mask utils object for the given backend, done in this way to make metric picklable."""
_, _, mask_utils = _load_backend_tools(self.backend)
return mask_utils
def update(self, preds: List[Dict[str, Tensor]], target: List[Dict[str, Tensor]]) -> None:
"""Update metric state.
Raises:
ValueError:
If ``preds`` is not of type (:class:`~List[Dict[str, Tensor]]`)
ValueError:
If ``target`` is not of type ``List[Dict[str, Tensor]]``
ValueError:
If ``preds`` and ``target`` are not of the same length
ValueError:
If any of ``preds.boxes``, ``preds.scores`` and ``preds.labels`` are not of the same length
ValueError:
If any of ``target.boxes`` and ``target.labels`` are not of the same length
ValueError:
If any box is not type float and of length 4
ValueError:
If any class is not type int and of length 1
ValueError:
If any score is not type float and of length 1
"""
_input_validator(preds, target, iou_type=self.iou_type)
for item in preds:
bbox_detection, mask_detection = self._get_safe_item_values(item, warn=self.warn_on_many_detections)
if bbox_detection is not None:
self.detection_box.append(bbox_detection)
if mask_detection is not None:
self.detection_mask.append(mask_detection)
self.detection_labels.append(item["labels"])
self.detection_scores.append(item["scores"])
for item in target:
bbox_groundtruth, mask_groundtruth = self._get_safe_item_values(item)
if bbox_groundtruth is not None:
self.groundtruth_box.append(bbox_groundtruth)
if mask_groundtruth is not None:
self.groundtruth_mask.append(mask_groundtruth)
self.groundtruth_labels.append(item["labels"])
self.groundtruth_crowds.append(item.get("iscrowd", torch.zeros_like(item["labels"])))
self.groundtruth_area.append(item.get("area", torch.zeros_like(item["labels"])))
def compute(self) -> dict:
"""Computes the metric."""
coco_preds, coco_target = self._get_coco_datasets(average=self.average)
result_dict = {}
with contextlib.redirect_stdout(io.StringIO()):
for i_type in self.iou_type:
prefix = "" if len(self.iou_type) == 1 else f"{i_type}_"
if len(self.iou_type) > 1:
# the area calculation is different for bbox and segm and therefore to get the small, medium and
# large values correct we need to dynamically change the area attribute of the annotations
for anno in coco_preds.dataset["annotations"]:
anno["area"] = anno[f"area_{i_type}"]
coco_eval = self.cocoeval(coco_target, coco_preds, iouType=i_type)
coco_eval.params.iouThrs = np.array(self.iou_thresholds, dtype=np.float64)
coco_eval.params.recThrs = np.array(self.rec_thresholds, dtype=np.float64)
coco_eval.params.maxDets = self.max_detection_thresholds
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
stats = coco_eval.stats
result_dict.update(self._coco_stats_to_tensor_dict(stats, prefix=prefix))
summary = {}
if self.extended_summary:
summary = {
f"{prefix}ious": apply_to_collection(
coco_eval.ious, np.ndarray, lambda x: torch.tensor(x, dtype=torch.float32)
),
f"{prefix}precision": torch.tensor(coco_eval.eval["precision"]),
f"{prefix}recall": torch.tensor(coco_eval.eval["recall"]),
}
result_dict.update(summary)
# if class mode is enabled, evaluate metrics per class
if self.class_metrics:
if self.average == "micro":
# since micro averaging have all the data in one class, we need to reinitialize the coco_eval
# object in macro mode to get the per class stats
coco_preds, coco_target = self._get_coco_datasets(average="macro")
coco_eval = self.cocoeval(coco_target, coco_preds, iouType=i_type)
coco_eval.params.iouThrs = np.array(self.iou_thresholds, dtype=np.float64)
coco_eval.params.recThrs = np.array(self.rec_thresholds, dtype=np.float64)
coco_eval.params.maxDets = self.max_detection_thresholds
map_per_class_list = []
mar_100_per_class_list = []
for class_id in self._get_classes():
coco_eval.params.catIds = [class_id]
with contextlib.redirect_stdout(io.StringIO()):
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
class_stats = coco_eval.stats
map_per_class_list.append(torch.tensor([class_stats[0]]))
mar_100_per_class_list.append(torch.tensor([class_stats[8]]))
map_per_class_values = torch.tensor(map_per_class_list, dtype=torch.float32)
mar_100_per_class_values = torch.tensor(mar_100_per_class_list, dtype=torch.float32)
else:
map_per_class_values = torch.tensor([-1], dtype=torch.float32)
mar_100_per_class_values = torch.tensor([-1], dtype=torch.float32)
prefix = "" if len(self.iou_type) == 1 else f"{i_type}_"
result_dict.update(
{
f"{prefix}map_per_class": map_per_class_values,
f"{prefix}mar_100_per_class": mar_100_per_class_values,
},
)
result_dict.update({"classes": torch.tensor(self._get_classes(), dtype=torch.int32)})
return result_dict
def _get_coco_datasets(self, average: Literal["macro", "micro"]) -> Tuple[object, object]:
"""Returns the coco datasets for the target and the predictions."""
if average == "micro":
# for micro averaging we set everything to be the same class
groundtruth_labels = apply_to_collection(self.groundtruth_labels, Tensor, lambda x: torch.zeros_like(x))
detection_labels = apply_to_collection(self.detection_labels, Tensor, lambda x: torch.zeros_like(x))
else:
groundtruth_labels = self.groundtruth_labels
detection_labels = self.detection_labels
coco_target, coco_preds = self.coco(), self.coco()
coco_target.dataset = self._get_coco_format(
labels=groundtruth_labels,
boxes=self.groundtruth_box if len(self.groundtruth_box) > 0 else None,
masks=self.groundtruth_mask if len(self.groundtruth_mask) > 0 else None,
crowds=self.groundtruth_crowds,
area=self.groundtruth_area,
)
coco_preds.dataset = self._get_coco_format(
labels=detection_labels,
boxes=self.detection_box if len(self.detection_box) > 0 else None,
masks=self.detection_mask if len(self.detection_mask) > 0 else None,
scores=self.detection_scores,
)
with contextlib.redirect_stdout(io.StringIO()):
coco_target.createIndex()
coco_preds.createIndex()
return coco_preds, coco_target
@staticmethod
def _coco_stats_to_tensor_dict(stats: List[float], prefix: str) -> Dict[str, Tensor]:
"""Converts the output of COCOeval.stats to a dict of tensors."""
return {
f"{prefix}map": torch.tensor([stats[0]], dtype=torch.float32),
f"{prefix}map_50": torch.tensor([stats[1]], dtype=torch.float32),
f"{prefix}map_75": torch.tensor([stats[2]], dtype=torch.float32),
f"{prefix}map_small": torch.tensor([stats[3]], dtype=torch.float32),
f"{prefix}map_medium": torch.tensor([stats[4]], dtype=torch.float32),
f"{prefix}map_large": torch.tensor([stats[5]], dtype=torch.float32),
f"{prefix}mar_1": torch.tensor([stats[6]], dtype=torch.float32),
f"{prefix}mar_10": torch.tensor([stats[7]], dtype=torch.float32),
f"{prefix}mar_100": torch.tensor([stats[8]], dtype=torch.float32),
f"{prefix}mar_small": torch.tensor([stats[9]], dtype=torch.float32),
f"{prefix}mar_medium": torch.tensor([stats[10]], dtype=torch.float32),
f"{prefix}mar_large": torch.tensor([stats[11]], dtype=torch.float32),
}
@staticmethod
def coco_to_tm(
coco_preds: str,
coco_target: str,
iou_type: Union[Literal["bbox", "segm"], List[str]] = "bbox",
backend: Literal["pycocotools", "faster_coco_eval"] = "pycocotools",
) -> Tuple[List[Dict[str, Tensor]], List[Dict[str, Tensor]]]:
"""Utility function for converting .json coco format files to the input format of this metric.
The function accepts a file for the predictions and a file for the target in coco format and converts them to
a list of dictionaries containing the boxes, labels and scores in the input format of this metric.
Args:
coco_preds: Path to the json file containing the predictions in coco format
coco_target: Path to the json file containing the targets in coco format
iou_type: Type of input, either `bbox` for bounding boxes or `segm` for segmentation masks
backend: Backend to use for the conversion. Either `pycocotools` or `faster_coco_eval`.
Returns:
A tuple containing the predictions and targets in the input format of this metric. Each element of the
tuple is a list of dictionaries containing the boxes, labels and scores.
Example:
>>> # File formats are defined at https://cocodataset.org/#format-data
>>> # Example files can be found at
>>> # https://github.com/cocodataset/cocoapi/tree/master/results
>>> from torchmetrics.detection import MeanAveragePrecision
>>> preds, target = MeanAveragePrecision.coco_to_tm(
... "instances_val2014_fakebbox100_results.json.json",
... "val2014_fake_eval_res.txt.json"
... iou_type="bbox"
... ) # doctest: +SKIP
"""
iou_type = _validate_iou_type_arg(iou_type)
coco, _, _ = _load_backend_tools(backend)
with contextlib.redirect_stdout(io.StringIO()):
gt = coco(coco_target)
dt = gt.loadRes(coco_preds)
gt_dataset = gt.dataset["annotations"]
dt_dataset = dt.dataset["annotations"]
target = {}
for t in gt_dataset:
if t["image_id"] not in target:
target[t["image_id"]] = {
"labels": [],
"iscrowd": [],
"area": [],
}
if "bbox" in iou_type:
target[t["image_id"]]["boxes"] = []
if "segm" in iou_type:
target[t["image_id"]]["masks"] = []
if "bbox" in iou_type:
target[t["image_id"]]["boxes"].append(t["bbox"])
if "segm" in iou_type:
target[t["image_id"]]["masks"].append(gt.annToMask(t))
target[t["image_id"]]["labels"].append(t["category_id"])
target[t["image_id"]]["iscrowd"].append(t["iscrowd"])
target[t["image_id"]]["area"].append(t["area"])
preds = {}
for p in dt_dataset:
if p["image_id"] not in preds:
preds[p["image_id"]] = {"scores": [], "labels": []}
if "bbox" in iou_type:
preds[p["image_id"]]["boxes"] = []
if "segm" in iou_type:
preds[p["image_id"]]["masks"] = []
if "bbox" in iou_type:
preds[p["image_id"]]["boxes"].append(p["bbox"])
if "segm" in iou_type:
preds[p["image_id"]]["masks"].append(gt.annToMask(p))
preds[p["image_id"]]["scores"].append(p["score"])
preds[p["image_id"]]["labels"].append(p["category_id"])
for k in target: # add empty predictions for images without predictions
if k not in preds:
preds[k] = {"scores": [], "labels": []}
if "bbox" in iou_type:
preds[k]["boxes"] = []
if "segm" in iou_type:
preds[k]["masks"] = []
batched_preds, batched_target = [], []
for key in target:
bp = {
"scores": torch.tensor(preds[key]["scores"], dtype=torch.float32),
"labels": torch.tensor(preds[key]["labels"], dtype=torch.int32),
}
if "bbox" in iou_type:
bp["boxes"] = torch.tensor(np.array(preds[key]["boxes"]), dtype=torch.float32)
if "segm" in iou_type:
bp["masks"] = torch.tensor(np.array(preds[key]["masks"]), dtype=torch.uint8)
batched_preds.append(bp)
bt = {
"labels": torch.tensor(target[key]["labels"], dtype=torch.int32),
"iscrowd": torch.tensor(target[key]["iscrowd"], dtype=torch.int32),
"area": torch.tensor(target[key]["area"], dtype=torch.float32),
}
if "bbox" in iou_type:
bt["boxes"] = torch.tensor(target[key]["boxes"], dtype=torch.float32)
if "segm" in iou_type:
bt["masks"] = torch.tensor(np.array(target[key]["masks"]), dtype=torch.uint8)
batched_target.append(bt)
return batched_preds, batched_target
def tm_to_coco(self, name: str = "tm_map_input") -> None:
"""Utility function for converting the input for this metric to coco format and saving it to a json file.
This function should be used after calling `.update(...)` or `.forward(...)` on all data that should be written
to the file, as the input is then internally cached. The function then converts to information to coco format
a writes it to json files.
Args:
name: Name of the output file, which will be appended with "_preds.json" and "_target.json"
Example:
>>> from torch import tensor
>>> from torchmetrics.detection import MeanAveragePrecision
>>> preds = [
... dict(
... boxes=tensor([[258.0, 41.0, 606.0, 285.0]]),
... scores=tensor([0.536]),
... labels=tensor([0]),
... )
... ]
>>> target = [
... dict(
... boxes=tensor([[214.0, 41.0, 562.0, 285.0]]),
... labels=tensor([0]),
... )
... ]
>>> metric = MeanAveragePrecision()
>>> metric.update(preds, target)
>>> metric.tm_to_coco("tm_map_input") # doctest: +SKIP
"""
target_dataset = self._get_coco_format(
labels=self.groundtruth_labels,
boxes=self.groundtruth_box,
masks=self.groundtruth_mask,
crowds=self.groundtruth_crowds,
area=self.groundtruth_area,
)
preds_dataset = self._get_coco_format(
labels=self.detection_labels, boxes=self.detection_box, masks=self.detection_mask
)
preds_json = json.dumps(preds_dataset["annotations"], indent=4)
target_json = json.dumps(target_dataset, indent=4)
with open(f"{name}_preds.json", "w") as f:
f.write(preds_json)
with open(f"{name}_target.json", "w") as f:
f.write(target_json)
def _get_safe_item_values(
self, item: Dict[str, Any], warn: bool = False
) -> Tuple[Optional[Tensor], Optional[Tuple]]:
"""Convert and return the boxes or masks from the item depending on the iou_type.
Args:
item: input dictionary containing the boxes or masks
warn: whether to warn if the number of boxes or masks exceeds the max_detection_thresholds
Returns:
boxes or masks depending on the iou_type
"""
from torchvision.ops import box_convert
output = [None, None]
if "bbox" in self.iou_type:
boxes = _fix_empty_tensors(item["boxes"])
if boxes.numel() > 0:
boxes = box_convert(boxes, in_fmt=self.box_format, out_fmt="xywh")
output[0] = boxes
if "segm" in self.iou_type:
masks = []
for i in item["masks"].cpu().numpy():
rle = self.mask_utils.encode(np.asfortranarray(i))
masks.append((tuple(rle["size"]), rle["counts"]))
output[1] = tuple(masks)
if (output[0] is not None and len(output[0]) > self.max_detection_thresholds[-1]) or (
output[1] is not None and len(output[1]) > self.max_detection_thresholds[-1]
):
_warning_on_too_many_detections(self.max_detection_thresholds[-1])
return output
def _get_classes(self) -> List:
"""Return a list of unique classes found in ground truth and detection data."""
if len(self.detection_labels) > 0 or len(self.groundtruth_labels) > 0:
return torch.cat(self.detection_labels + self.groundtruth_labels).unique().cpu().tolist()
return []
def _get_coco_format(
self,
labels: List[torch.Tensor],
boxes: Optional[List[torch.Tensor]] = None,
masks: Optional[List[torch.Tensor]] = None,
scores: Optional[List[torch.Tensor]] = None,
crowds: Optional[List[torch.Tensor]] = None,
area: Optional[List[torch.Tensor]] = None,
) -> Dict:
"""Transforms and returns all cached targets or predictions in COCO format.
Format is defined at
https://cocodataset.org/#format-data
"""
images = []
annotations = []
annotation_id = 1 # has to start with 1, otherwise COCOEval results are wrong
for image_id, image_labels in enumerate(labels):
if boxes is not None:
image_boxes = boxes[image_id]
image_boxes = image_boxes.cpu().tolist()
if masks is not None:
image_masks = masks[image_id]
if len(image_masks) == 0 and boxes is None:
continue
image_labels = image_labels.cpu().tolist()
images.append({"id": image_id})
if "segm" in self.iou_type and len(image_masks) > 0:
images[-1]["height"], images[-1]["width"] = image_masks[0][0][0], image_masks[0][0][1]
for k, image_label in enumerate(image_labels):
if boxes is not None:
image_box = image_boxes[k]
if masks is not None and len(image_masks) > 0:
image_mask = image_masks[k]
image_mask = {"size": image_mask[0], "counts": image_mask[1]}
if "bbox" in self.iou_type and len(image_box) != 4:
raise ValueError(
f"Invalid input box of sample {image_id}, element {k} (expected 4 values, got {len(image_box)})"
)
if not isinstance(image_label, int):
raise ValueError(
f"Invalid input class of sample {image_id}, element {k}"
f" (expected value of type integer, got type {type(image_label)})"
)
area_stat_box = None
area_stat_mask = None
if area is not None and area[image_id][k].cpu().tolist() > 0:
area_stat = area[image_id][k].cpu().tolist()
else:
area_stat = (
self.mask_utils.area(image_mask) if "segm" in self.iou_type else image_box[2] * image_box[3]
)
if len(self.iou_type) > 1:
area_stat_box = image_box[2] * image_box[3]
area_stat_mask = self.mask_utils.area(image_mask)
annotation = {
"id": annotation_id,
"image_id": image_id,
"area": area_stat,
"category_id": image_label,
"iscrowd": crowds[image_id][k].cpu().tolist() if crowds is not None else 0,
}
if area_stat_box is not None:
annotation["area_bbox"] = area_stat_box
annotation["area_segm"] = area_stat_mask
if boxes is not None:
annotation["bbox"] = image_box
if masks is not None:
annotation["segmentation"] = image_mask
if scores is not None:
score = scores[image_id][k].cpu().tolist()
if not isinstance(score, float):
raise ValueError(
f"Invalid input score of sample {image_id}, element {k}"
f" (expected value of type float, got type {type(score)})"
)
annotation["score"] = score
annotations.append(annotation)
annotation_id += 1
classes = [{"id": i, "name": str(i)} for i in self._get_classes()]
return {"images": images, "annotations": annotations, "categories": classes}
def plot(
self, val: Optional[Union[Dict[str, Tensor], Sequence[Dict[str, Tensor]]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import tensor
>>> from torchmetrics.detection.mean_ap import MeanAveragePrecision
>>> preds = [dict(
... boxes=tensor([[258.0, 41.0, 606.0, 285.0]]),
... scores=tensor([0.536]),
... labels=tensor([0]),
... )]
>>> target = [dict(
... boxes=tensor([[214.0, 41.0, 562.0, 285.0]]),
... labels=tensor([0]),
... )]
>>> metric = MeanAveragePrecision()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.detection.mean_ap import MeanAveragePrecision
>>> preds = lambda: [dict(
... boxes=torch.tensor([[258.0, 41.0, 606.0, 285.0]]) + torch.randint(10, (1,4)),
... scores=torch.tensor([0.536]) + 0.1*torch.rand(1),
... labels=torch.tensor([0]),
... )]
>>> target = [dict(
... boxes=torch.tensor([[214.0, 41.0, 562.0, 285.0]]),
... labels=torch.tensor([0]),
... )]
>>> metric = MeanAveragePrecision()
>>> vals = []
>>> for _ in range(20):
... vals.append(metric(preds(), target))
>>> fig_, ax_ = metric.plot(vals)
"""
return self._plot(val, ax)
# --------------------
# specialized synchronization and apply functions for this metric
# --------------------
def _apply(self, fn: Callable) -> torch.nn.Module: # type: ignore[override]
"""Custom apply function.
Excludes the detections and groundtruths from the casting when the iou_type is set to `segm` as the state is
no longer a tensor but a tuple.
"""
return super()._apply(fn, exclude_state=("detection_mask", "groundtruth_mask"))
def _sync_dist(self, dist_sync_fn: Optional[Callable] = None, process_group: Optional[Any] = None) -> None:
"""Custom sync function.
For the iou_type `segm` the detections and groundtruths are no longer tensors but tuples. Therefore, we need
to gather the list of tuples and then convert it back to a list of tuples.
"""
super()._sync_dist(dist_sync_fn=dist_sync_fn, process_group=process_group)
if "segm" in self.iou_type:
self.detection_mask = self._gather_tuple_list(self.detection_mask, process_group)
self.groundtruth_mask = self._gather_tuple_list(self.groundtruth_mask, process_group)
@staticmethod
def _gather_tuple_list(list_to_gather: List[Tuple], process_group: Optional[Any] = None) -> List[Any]:
"""Gather a list of tuples over multiple devices.
Args:
list_to_gather: input list of tuples that should be gathered across devices
process_group: process group to gather the list of tuples
Returns:
list of tuples gathered across devices
"""
world_size = dist.get_world_size(group=process_group)
dist.barrier(group=process_group)
list_gathered = [None for _ in range(world_size)]
dist.all_gather_object(list_gathered, list_to_gather, group=process_group)
return [list_gathered[rank][idx] for idx in range(len(list_gathered[0])) for rank in range(world_size)]
def _warning_on_too_many_detections(limit: int) -> None:
rank_zero_warn(
f"Encountered more than {limit} detections in a single image. This means that certain detections with the"
" lowest scores will be ignored, that may have an undesirable impact on performance. Please consider adjusting"
" the `max_detection_threshold` to suit your use case. To disable this warning, set attribute class"
" `warn_on_many_detections=False`, after initializing the metric.",
UserWarning,
)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/wmape.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor
from torchmetrics.functional.regression.wmape import (
_weighted_mean_absolute_percentage_error_compute,
_weighted_mean_absolute_percentage_error_update,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["WeightedMeanAbsolutePercentageError.plot"]
class WeightedMeanAbsolutePercentageError(Metric):
r"""Compute weighted mean absolute percentage error (`WMAPE`_).
The output of WMAPE metric is a non-negative floating point, where the optimal value is 0. It is computes as:
.. math::
\text{WMAPE} = \frac{\sum_{t=1}^n | y_t - \hat{y}_t | }{\sum_{t=1}^n |y_t| }
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predictions from model
- ``target`` (:class:`~torch.Tensor`): Ground truth float tensor with shape ``(N,d)``
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``wmape`` (:class:`~torch.Tensor`): A tensor with non-negative floating point wmape value between 0 and 1
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> _ = torch.manual_seed(42)
>>> preds = torch.randn(20,)
>>> target = torch.randn(20,)
>>> wmape = WeightedMeanAbsolutePercentageError()
>>> wmape(preds, target)
tensor(1.3967)
"""
is_differentiable: bool = True
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
sum_abs_error: Tensor
sum_scale: Tensor
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.add_state("sum_abs_error", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("sum_scale", default=torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
sum_abs_error, sum_scale = _weighted_mean_absolute_percentage_error_update(preds, target)
self.sum_abs_error += sum_abs_error
self.sum_scale += sum_scale
def compute(self) -> Tensor:
"""Compute weighted mean absolute percentage error over state."""
return _weighted_mean_absolute_percentage_error_compute(self.sum_abs_error, self.sum_scale)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import WeightedMeanAbsolutePercentageError
>>> metric = WeightedMeanAbsolutePercentageError()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import WeightedMeanAbsolutePercentageError
>>> metric = WeightedMeanAbsolutePercentageError()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/kl_divergence.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.regression.kl_divergence import _kld_compute, _kld_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["KLDivergence.plot"]
class KLDivergence(Metric):
r"""Compute the `KL divergence`_.
.. math::
D_{KL}(P||Q) = \sum_{x\in\mathcal{X}} P(x) \log\frac{P(x)}{Q{x}}
Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution
over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence
is a non-symmetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``p`` (:class:`~torch.Tensor`): a data distribution with shape ``(N, d)``
- ``q`` (:class:`~torch.Tensor`): prior or approximate distribution with shape ``(N, d)``
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``kl_divergence`` (:class:`~torch.Tensor`): A tensor with the KL divergence
Args:
log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities,
will normalize to make sure the distributes sum to 1.
reduction:
Determines how to reduce over the ``N``/batch dimension:
- ``'mean'`` [default]: Averages score across samples
- ``'sum'``: Sum score across samples
- ``'none'`` or ``None``: Returns score per sample
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
TypeError:
If ``log_prob`` is not an ``bool``.
ValueError:
If ``reduction`` is not one of ``'mean'``, ``'sum'``, ``'none'`` or ``None``.
.. note::
Half precision is only support on GPU for this metric
Example:
>>> from torch import tensor
>>> from torchmetrics.regression import KLDivergence
>>> p = tensor([[0.36, 0.48, 0.16]])
>>> q = tensor([[1/3, 1/3, 1/3]])
>>> kl_divergence = KLDivergence()
>>> kl_divergence(p, q)
tensor(0.0853)
"""
is_differentiable: bool = True
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
total: Tensor
# FIXME: Apply once minimal torch is 1.10. For torch<=1.9, jit does not support Union types
# measures: Union[Tensor, List[Tensor]]
def __init__(
self,
log_prob: bool = False,
reduction: Literal["mean", "sum", "none", None] = "mean",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not isinstance(log_prob, bool):
raise TypeError(f"Expected argument `log_prob` to be bool but got {log_prob}")
self.log_prob = log_prob
allowed_reduction = ["mean", "sum", "none", None]
if reduction not in allowed_reduction:
raise ValueError(f"Expected argument `reduction` to be one of {allowed_reduction} but got {reduction}")
self.reduction = reduction
if self.reduction in ["mean", "sum"]:
self.add_state("measures", torch.tensor(0.0), dist_reduce_fx="sum")
else:
self.add_state("measures", [], dist_reduce_fx="cat")
self.add_state("total", torch.tensor(0), dist_reduce_fx="sum")
def update(self, p: Tensor, q: Tensor) -> None:
"""Update metric states with predictions and targets."""
measures, total = _kld_update(p, q, self.log_prob)
if self.reduction is None or self.reduction == "none":
self.measures.append(measures)
else:
self.measures += measures.sum()
self.total += total
def compute(self) -> Tensor:
"""Compute metric."""
measures: Tensor = dim_zero_cat(self.measures) if self.reduction in ["none", None] else self.measures
return _kld_compute(measures, self.total, self.reduction)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import KLDivergence
>>> metric = KLDivergence()
>>> metric.update(randn(10,3).softmax(dim=-1), randn(10,3).softmax(dim=-1))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import KLDivergence
>>> metric = KLDivergence()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,3).softmax(dim=-1), randn(10,3).softmax(dim=-1)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/minkowski.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor, tensor
from torchmetrics.functional.regression.minkowski import _minkowski_distance_compute, _minkowski_distance_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.exceptions import TorchMetricsUserError
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MinkowskiDistance.plot"]
class MinkowskiDistance(Metric):
r"""Compute `Minkowski Distance`_.
.. math::
d_{\text{Minkowski}} = \sum_{i}^N (| y_i - \hat{y_i} |^p)^\frac{1}{p}
where
:math: `y` is a tensor of target values,
:math: `\hat{y}` is a tensor of predictions,
:math: `\p` is a non-negative integer or floating-point number
This metric can be seen as generalized version of the standard euclidean distance which corresponds to minkowski
distance with p=2.
Args:
p: int or float larger than 1, exponent to which the difference between preds and target is to be raised
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.regression import MinkowskiDistance
>>> target = tensor([1.0, 2.8, 3.5, 4.5])
>>> preds = tensor([6.1, 2.11, 3.1, 5.6])
>>> minkowski_distance = MinkowskiDistance(3)
>>> minkowski_distance(preds, target)
tensor(5.1220)
"""
is_differentiable: Optional[bool] = True
higher_is_better: Optional[bool] = False
full_state_update: Optional[bool] = False
plot_lower_bound: float = 0.0
minkowski_dist_sum: Tensor
def __init__(self, p: float, **kwargs: Any) -> None:
super().__init__(**kwargs)
if not (isinstance(p, (float, int)) and p >= 1):
raise TorchMetricsUserError(f"Argument ``p`` must be a float or int greater than 1, but got {p}")
self.p = p
self.add_state("minkowski_dist_sum", default=tensor(0.0), dist_reduce_fx="sum")
def update(self, preds: Tensor, targets: Tensor) -> None:
"""Update state with predictions and targets."""
minkowski_dist_sum = _minkowski_distance_update(preds, targets, self.p)
self.minkowski_dist_sum += minkowski_dist_sum
def compute(self) -> Tensor:
"""Compute metric."""
return _minkowski_distance_compute(self.minkowski_dist_sum, self.p)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import MinkowskiDistance
>>> metric = MinkowskiDistance(p=3)
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import MinkowskiDistance
>>> metric = MinkowskiDistance(p=3)
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/r2.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.regression.r2 import _r2_score_compute, _r2_score_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["R2Score.plot"]
class R2Score(Metric):
r"""Compute r2 score also known as `R2 Score_Coefficient Determination`_.
.. math:: R^2 = 1 - \frac{SS_{res}}{SS_{tot}}
where :math:`SS_{res}=\sum_i (y_i - f(x_i))^2` is the sum of residual squares, and
:math:`SS_{tot}=\sum_i (y_i - \bar{y})^2` is total sum of squares. Can also calculate
adjusted r2 score given by
.. math:: R^2_{adj} = 1 - \frac{(1-R^2)(n-1)}{n-k-1}
where the parameter :math:`k` (the number of independent regressors) should be provided as the `adjusted` argument.
The score is only proper defined when :math:`SS_{tot}\neq 0`, which can happen for near constant targets. In this
case a score of 0 is returned. By definition the score is bounded between 0 and 1, where 1 corresponds to the
predictions exactly matching the targets.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predictions from model in float tensor with shape ``(N,)``
or ``(N, M)`` (multioutput)
- ``target`` (:class:`~torch.Tensor`): Ground truth values in float tensor with shape ``(N,)``
or ``(N, M)`` (multioutput)
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``r2score`` (:class:`~torch.Tensor`): A tensor with the r2 score(s)
In the case of multioutput, as default the variances will be uniformly averaged over the additional dimensions.
Please see argument ``multioutput`` for changing this behavior.
Args:
num_outputs: Number of outputs in multioutput setting
adjusted: number of independent regressors for calculating adjusted r2 score.
multioutput: Defines aggregation in the case of multiple output scores. Can be one of the following strings:
* ``'raw_values'`` returns full set of scores
* ``'uniform_average'`` scores are uniformly averaged
* ``'variance_weighted'`` scores are weighted by their individual variances
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``adjusted`` parameter is not an integer larger or equal to 0.
ValueError:
If ``multioutput`` is not one of ``"raw_values"``, ``"uniform_average"`` or ``"variance_weighted"``.
Example:
>>> from torchmetrics.regression import R2Score
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> r2score = R2Score()
>>> r2score(preds, target)
tensor(0.9486)
>>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]])
>>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]])
>>> r2score = R2Score(num_outputs=2, multioutput='raw_values')
>>> r2score(preds, target)
tensor([0.9654, 0.9082])
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
sum_squared_error: Tensor
sum_error: Tensor
residual: Tensor
total: Tensor
def __init__(
self,
num_outputs: int = 1,
adjusted: int = 0,
multioutput: str = "uniform_average",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.num_outputs = num_outputs
if adjusted < 0 or not isinstance(adjusted, int):
raise ValueError("`adjusted` parameter should be an integer larger or equal to 0.")
self.adjusted = adjusted
allowed_multioutput = ("raw_values", "uniform_average", "variance_weighted")
if multioutput not in allowed_multioutput:
raise ValueError(
f"Invalid input to argument `multioutput`. Choose one of the following: {allowed_multioutput}"
)
self.multioutput = multioutput
self.add_state("sum_squared_error", default=torch.zeros(self.num_outputs), dist_reduce_fx="sum")
self.add_state("sum_error", default=torch.zeros(self.num_outputs), dist_reduce_fx="sum")
self.add_state("residual", default=torch.zeros(self.num_outputs), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
sum_squared_error, sum_error, residual, total = _r2_score_update(preds, target)
self.sum_squared_error += sum_squared_error
self.sum_error += sum_error
self.residual += residual
self.total += total
def compute(self) -> Tensor:
"""Compute r2 score over the metric states."""
return _r2_score_compute(
self.sum_squared_error, self.sum_error, self.residual, self.total, self.adjusted, self.multioutput
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import R2Score
>>> metric = R2Score()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import R2Score
>>> metric = R2Score()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/explained_variance.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.functional.regression.explained_variance import (
ALLOWED_MULTIOUTPUT,
_explained_variance_compute,
_explained_variance_update,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["ExplainedVariance.plot"]
class ExplainedVariance(Metric):
r"""Compute `explained variance`_.
.. math:: \text{ExplainedVariance} = 1 - \frac{\text{Var}(y - \hat{y})}{\text{Var}(y)}
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predictions from model in float tensor
with shape ``(N,)`` or ``(N, ...)`` (multioutput)
- ``target`` (:class:`~torch.Tensor`): Ground truth values in long tensor
with shape ``(N,)`` or ``(N, ...)`` (multioutput)
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``explained_variance`` (:class:`~torch.Tensor`): A tensor with the explained variance(s)
In the case of multioutput, as default the variances will be uniformly averaged over the additional dimensions.
Please see argument ``multioutput`` for changing this behavior.
Args:
multioutput:
Defines aggregation in the case of multiple output scores. Can be one
of the following strings (default is ``'uniform_average'``.):
* ``'raw_values'`` returns full set of scores
* ``'uniform_average'`` scores are uniformly averaged
* ``'variance_weighted'`` scores are weighted by their individual variances
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``multioutput`` is not one of ``"raw_values"``, ``"uniform_average"`` or ``"variance_weighted"``.
Example:
>>> from torch import tensor
>>> from torchmetrics.regression import ExplainedVariance
>>> target = tensor([3, -0.5, 2, 7])
>>> preds = tensor([2.5, 0.0, 2, 8])
>>> explained_variance = ExplainedVariance()
>>> explained_variance(preds, target)
tensor(0.9572)
>>> target = tensor([[0.5, 1], [-1, 1], [7, -6]])
>>> preds = tensor([[0, 2], [-1, 2], [8, -5]])
>>> explained_variance = ExplainedVariance(multioutput='raw_values')
>>> explained_variance(preds, target)
tensor([0.9677, 1.0000])
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
num_obs: Tensor
sum_error: Tensor
sum_squared_error: Tensor
sum_target: Tensor
sum_squared_target: Tensor
def __init__(
self,
multioutput: Literal["raw_values", "uniform_average", "variance_weighted"] = "uniform_average",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if multioutput not in ALLOWED_MULTIOUTPUT:
raise ValueError(
f"Invalid input to argument `multioutput`. Choose one of the following: {ALLOWED_MULTIOUTPUT}"
)
self.multioutput = multioutput
self.add_state("sum_error", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("sum_target", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("sum_squared_target", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("num_obs", default=tensor(0.0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
num_obs, sum_error, sum_squared_error, sum_target, sum_squared_target = _explained_variance_update(
preds, target
)
self.num_obs = self.num_obs + num_obs
self.sum_error = self.sum_error + sum_error
self.sum_squared_error = self.sum_squared_error + sum_squared_error
self.sum_target = self.sum_target + sum_target
self.sum_squared_target = self.sum_squared_target + sum_squared_target
def compute(self) -> Union[Tensor, Sequence[Tensor]]:
"""Compute explained variance over state."""
return _explained_variance_compute(
self.num_obs,
self.sum_error,
self.sum_squared_error,
self.sum_target,
self.sum_squared_target,
self.multioutput,
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import ExplainedVariance
>>> metric = ExplainedVariance()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import ExplainedVariance
>>> metric = ExplainedVariance()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/spearman.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.regression.spearman import _spearman_corrcoef_compute, _spearman_corrcoef_update
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["SpearmanCorrCoef.plot"]
class SpearmanCorrCoef(Metric):
r"""Compute `spearmans rank correlation coefficient`_.
.. math:
r_s = = \frac{cov(rg_x, rg_y)}{\sigma_{rg_x} * \sigma_{rg_y}}
where :math:`rg_x` and :math:`rg_y` are the rank associated to the variables :math:`x` and :math:`y`.
Spearmans correlations coefficient corresponds to the standard pearsons correlation coefficient calculated
on the rank variables.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predictions from model in float tensor with shape ``(N,d)``
- ``target`` (:class:`~torch.Tensor`): Ground truth values in float tensor with shape ``(N,d)``
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``spearman`` (:class:`~torch.Tensor`): A tensor with the spearman correlation(s)
Args:
num_outputs: Number of outputs in multioutput setting
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (single output regression):
>>> from torch import tensor
>>> from torchmetrics.regression import SpearmanCorrCoef
>>> target = tensor([3, -0.5, 2, 7])
>>> preds = tensor([2.5, 0.0, 2, 8])
>>> spearman = SpearmanCorrCoef()
>>> spearman(preds, target)
tensor(1.0000)
Example (multi output regression):
>>> from torchmetrics.regression import SpearmanCorrCoef
>>> target = tensor([[3, -0.5], [2, 7]])
>>> preds = tensor([[2.5, 0.0], [2, 8]])
>>> spearman = SpearmanCorrCoef(num_outputs=2)
>>> spearman(preds, target)
tensor([1.0000, 1.0000])
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = -1.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
def __init__(
self,
num_outputs: int = 1,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
rank_zero_warn(
"Metric `SpearmanCorrcoef` will save all targets and predictions in the buffer."
" For large datasets, this may lead to large memory footprint."
)
if not isinstance(num_outputs, int) and num_outputs < 1:
raise ValueError("Expected argument `num_outputs` to be an int larger than 0, but got {num_outputs}")
self.num_outputs = num_outputs
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
preds, target = _spearman_corrcoef_update(preds, target, num_outputs=self.num_outputs)
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""Compute Spearman's correlation coefficient."""
preds = dim_zero_cat(self.preds)
target = dim_zero_cat(self.target)
return _spearman_corrcoef_compute(preds, target)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import SpearmanCorrCoef
>>> metric = SpearmanCorrCoef()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import SpearmanCorrCoef
>>> metric = SpearmanCorrCoef()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/kendall.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Tuple, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.regression.kendall import (
_kendall_corrcoef_compute,
_kendall_corrcoef_update,
_MetricVariant,
_TestAlternative,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["KendallRankCorrCoef.plot"]
class KendallRankCorrCoef(Metric):
r"""Compute `Kendall Rank Correlation Coefficient`_.
.. math::
tau_a = \frac{C - D}{C + D}
where :math:`C` represents concordant pairs, :math:`D` stands for discordant pairs.
.. math::
tau_b = \frac{C - D}{\sqrt{(C + D + T_{preds}) * (C + D + T_{target})}}
where :math:`C` represents concordant pairs, :math:`D` stands for discordant pairs and :math:`T` represents
a total number of ties.
.. math::
tau_c = 2 * \frac{C - D}{n^2 * \frac{m - 1}{m}}
where :math:`C` represents concordant pairs, :math:`D` stands for discordant pairs, :math:`n` is a total number
of observations and :math:`m` is a ``min`` of unique values in ``preds`` and ``target`` sequence.
Definitions according to Definition according to `The Treatment of Ties in Ranking Problems`_.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Sequence of data in float tensor of either shape ``(N,)`` or ``(N,d)``
- ``target`` (:class:`~torch.Tensor`): Sequence of data in float tensor of either shape ``(N,)`` or ``(N,d)``
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``kendall`` (:class:`~torch.Tensor`): A tensor with the correlation tau statistic,
and if it is not None, the p-value of corresponding statistical test.
Args:
variant: Indication of which variant of Kendall's tau to be used
t_test: Indication whether to run t-test
alternative: Alternative hypothesis for t-test. Possible values:
- 'two-sided': the rank correlation is nonzero
- 'less': the rank correlation is negative (less than zero)
- 'greater': the rank correlation is positive (greater than zero)
num_outputs: Number of outputs in multioutput setting
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError: If ``t_test`` is not of a type bool
ValueError: If ``t_test=True`` and ``alternative=None``
Example (single output regression):
>>> from torch import tensor
>>> from torchmetrics.regression import KendallRankCorrCoef
>>> preds = tensor([2.5, 0.0, 2, 8])
>>> target = tensor([3, -0.5, 2, 1])
>>> kendall = KendallRankCorrCoef()
>>> kendall(preds, target)
tensor(0.3333)
Example (multi output regression):
>>> from torchmetrics.regression import KendallRankCorrCoef
>>> preds = tensor([[2.5, 0.0], [2, 8]])
>>> target = tensor([[3, -0.5], [2, 1]])
>>> kendall = KendallRankCorrCoef(num_outputs=2)
>>> kendall(preds, target)
tensor([1., 1.])
Example (single output regression with t-test):
>>> from torchmetrics.regression import KendallRankCorrCoef
>>> preds = tensor([2.5, 0.0, 2, 8])
>>> target = tensor([3, -0.5, 2, 1])
>>> kendall = KendallRankCorrCoef(t_test=True, alternative='two-sided')
>>> kendall(preds, target)
(tensor(0.3333), tensor(0.4969))
Example (multi output regression with t-test):
>>> from torchmetrics.regression import KendallRankCorrCoef
>>> preds = tensor([[2.5, 0.0], [2, 8]])
>>> target = tensor([[3, -0.5], [2, 1]])
>>> kendall = KendallRankCorrCoef(t_test=True, alternative='two-sided', num_outputs=2)
>>> kendall(preds, target)
(tensor([1., 1.]), tensor([nan, nan]))
"""
is_differentiable = False
higher_is_better = None
full_state_update = True
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
def __init__(
self,
variant: Literal["a", "b", "c"] = "b",
t_test: bool = False,
alternative: Optional[Literal["two-sided", "less", "greater"]] = "two-sided",
num_outputs: int = 1,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not isinstance(t_test, bool):
raise ValueError(f"Argument `t_test` is expected to be of a type `bool`, but got {type(t_test)}.")
if t_test and alternative is None:
raise ValueError("Argument `alternative` is required if `t_test=True` but got `None`.")
self.variant = _MetricVariant.from_str(str(variant))
self.alternative = _TestAlternative.from_str(str(alternative)) if t_test else None
self.num_outputs = num_outputs
self.add_state("preds", [], dist_reduce_fx="cat")
self.add_state("target", [], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update variables required to compute Kendall rank correlation coefficient."""
self.preds, self.target = _kendall_corrcoef_update(
preds,
target,
self.preds,
self.target,
num_outputs=self.num_outputs,
)
def compute(self) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Compute Kendall rank correlation coefficient, and optionally p-value of corresponding statistical test."""
preds = dim_zero_cat(self.preds)
target = dim_zero_cat(self.target)
tau, p_value = _kendall_corrcoef_compute(
preds, target, self.variant, self.alternative # type: ignore[arg-type] # todo
)
if p_value is not None:
return tau, p_value
return tau
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import KendallRankCorrCoef
>>> metric = KendallRankCorrCoef()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import KendallRankCorrCoef
>>> metric = KendallRankCorrCoef()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/log_mse.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor, tensor
from torchmetrics.functional.regression.log_mse import _mean_squared_log_error_compute, _mean_squared_log_error_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MeanSquaredLogError.plot"]
class MeanSquaredLogError(Metric):
r"""Compute `mean squared logarithmic error`_ (MSLE).
.. math:: \text{MSLE} = \frac{1}{N}\sum_i^N (\log_e(1 + y_i) - \log_e(1 + \hat{y_i}))^2
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predictions from model
- ``target`` (:class:`~torch.Tensor`): Ground truth values
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``mean_squared_log_error`` (:class:`~torch.Tensor`): A tensor with the mean squared log error
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.regression import MeanSquaredLogError
>>> target = tensor([2.5, 5, 4, 8])
>>> preds = tensor([3, 5, 2.5, 7])
>>> mean_squared_log_error = MeanSquaredLogError()
>>> mean_squared_log_error(preds, target)
tensor(0.0397)
.. note::
Half precision is only support on GPU for this metric
"""
is_differentiable: bool = True
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
sum_squared_log_error: Tensor
total: Tensor
def __init__(
self,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.add_state("sum_squared_log_error", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
sum_squared_log_error, num_obs = _mean_squared_log_error_update(preds, target)
self.sum_squared_log_error += sum_squared_log_error
self.total += num_obs
def compute(self) -> Tensor:
"""Compute mean squared logarithmic error over state."""
return _mean_squared_log_error_compute(self.sum_squared_log_error, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import MeanSquaredLogError
>>> metric = MeanSquaredLogError()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import MeanSquaredLogError
>>> metric = MeanSquaredLogError()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/rse.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.regression.r2 import _r2_score_update
from torchmetrics.functional.regression.rse import _relative_squared_error_compute
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["RelativeSquaredError.plot"]
class RelativeSquaredError(Metric):
r"""Computes the relative squared error (RSE).
.. math:: \text{RSE} = \frac{\sum_i^N(y_i - \hat{y_i})^2}{\sum_i^N(y_i - \overline{y})^2}
Where :math:`y` is a tensor of target values with mean :math:`\overline{y}`, and
:math:`\hat{y}` is a tensor of predictions.
If num_outputs > 1, the returned value is averaged over all the outputs.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predictions from model in float tensor with shape ``(N,)``
or ``(N, M)`` (multioutput)
- ``target`` (:class:`~torch.Tensor`): Ground truth values in float tensor with shape ``(N,)``
or ``(N, M)`` (multioutput)
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``rse`` (:class:`~torch.Tensor`): A tensor with the RSE score(s)
Args:
num_outputs: Number of outputs in multioutput setting
squared: If True returns RSE value, if False returns RRSE value.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.regression import RelativeSquaredError
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> relative_squared_error = RelativeSquaredError()
>>> relative_squared_error(preds, target)
tensor(0.0514)
"""
is_differentiable = True
higher_is_better = False
full_state_update = False
sum_squared_error: Tensor
sum_error: Tensor
residual: Tensor
total: Tensor
def __init__(
self,
num_outputs: int = 1,
squared: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.num_outputs = num_outputs
self.add_state("sum_squared_error", default=torch.zeros(self.num_outputs), dist_reduce_fx="sum")
self.add_state("sum_error", default=torch.zeros(self.num_outputs), dist_reduce_fx="sum")
self.add_state("residual", default=torch.zeros(self.num_outputs), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
self.squared = squared
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
sum_squared_error, sum_error, residual, total = _r2_score_update(preds, target)
self.sum_squared_error += sum_squared_error
self.sum_error += sum_error
self.residual += residual
self.total += total
def compute(self) -> Tensor:
"""Computes relative squared error over state."""
return _relative_squared_error_compute(
self.sum_squared_error, self.sum_error, self.residual, self.total, squared=self.squared
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import RelativeSquaredError
>>> metric = RelativeSquaredError()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import RelativeSquaredError
>>> metric = RelativeSquaredError()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/mse.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.regression.mse import _mean_squared_error_compute, _mean_squared_error_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MeanSquaredError.plot"]
class MeanSquaredError(Metric):
r"""Compute `mean squared error`_ (MSE).
.. math:: \text{MSE} = \frac{1}{N}\sum_i^N(y_i - \hat{y_i})^2
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predictions from model
- ``target`` (:class:`~torch.Tensor`): Ground truth values
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``mean_squared_error`` (:class:`~torch.Tensor`): A tensor with the mean squared error
Args:
squared: If True returns MSE value, if False returns RMSE value.
num_outputs: Number of outputs in multioutput setting
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
Single output mse computation:
>>> from torch import tensor
>>> from torchmetrics.regression import MeanSquaredError
>>> target = tensor([2.5, 5.0, 4.0, 8.0])
>>> preds = tensor([3.0, 5.0, 2.5, 7.0])
>>> mean_squared_error = MeanSquaredError()
>>> mean_squared_error(preds, target)
tensor(0.8750)
Example::
Multioutput mse computation:
>>> from torch import tensor
>>> from torchmetrics.regression import MeanSquaredError
>>> target = tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
>>> preds = tensor([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]])
>>> mean_squared_error = MeanSquaredError(num_outputs=3)
>>> mean_squared_error(preds, target)
tensor([1., 4., 9.])
"""
is_differentiable = True
higher_is_better = False
full_state_update = False
plot_lower_bound: float = 0.0
sum_squared_error: Tensor
total: Tensor
def __init__(
self,
squared: bool = True,
num_outputs: int = 1,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not isinstance(squared, bool):
raise ValueError(f"Expected argument `squared` to be a boolean but got {squared}")
self.squared = squared
if not (isinstance(num_outputs, int) and num_outputs > 0):
raise ValueError(f"Expected num_outputs to be a positive integer but got {num_outputs}")
self.num_outputs = num_outputs
self.add_state("sum_squared_error", default=torch.zeros(num_outputs), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
sum_squared_error, num_obs = _mean_squared_error_update(preds, target, num_outputs=self.num_outputs)
self.sum_squared_error += sum_squared_error
self.total += num_obs
def compute(self) -> Tensor:
"""Compute mean squared error over state."""
return _mean_squared_error_compute(self.sum_squared_error, self.total, squared=self.squared)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import MeanSquaredError
>>> metric = MeanSquaredError()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import MeanSquaredError
>>> metric = MeanSquaredError()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/cosine_similarity.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.regression.cosine_similarity import _cosine_similarity_compute, _cosine_similarity_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["CosineSimilarity.plot"]
class CosineSimilarity(Metric):
r"""Compute the `Cosine Similarity`_.
.. math::
cos_{sim}(x,y) = \frac{x \cdot y}{||x|| \cdot ||y||} =
\frac{\sum_{i=1}^n x_i y_i}{\sqrt{\sum_{i=1}^n x_i^2}\sqrt{\sum_{i=1}^n y_i^2}}
where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predicted float tensor with shape ``(N,d)``
- ``target`` (:class:`~torch.Tensor`): Ground truth float tensor with shape ``(N,d)``
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``cosine_similarity`` (:class:`~torch.Tensor`): A float tensor with the cosine similarity
Args:
reduction: how to reduce over the batch dimension using 'sum', 'mean' or 'none' (taking the individual scores)
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.regression import CosineSimilarity
>>> target = tensor([[0, 1], [1, 1]])
>>> preds = tensor([[0, 1], [0, 1]])
>>> cosine_similarity = CosineSimilarity(reduction = 'mean')
>>> cosine_similarity(preds, target)
tensor(0.8536)
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
def __init__(
self,
reduction: Literal["mean", "sum", "none", None] = "sum",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
allowed_reduction = ("sum", "mean", "none", None)
if reduction not in allowed_reduction:
raise ValueError(f"Expected argument `reduction` to be one of {allowed_reduction} but got {reduction}")
self.reduction = reduction
self.add_state("preds", [], dist_reduce_fx="cat")
self.add_state("target", [], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update metric states with predictions and targets."""
preds, target = _cosine_similarity_update(preds, target)
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""Compute metric."""
preds = dim_zero_cat(self.preds)
target = dim_zero_cat(self.target)
return _cosine_similarity_compute(preds, target, self.reduction)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import CosineSimilarity
>>> metric = CosineSimilarity()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import CosineSimilarity
>>> metric = CosineSimilarity()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/pearson.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torchmetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["PearsonCorrCoef.plot"]
def _final_aggregation(
means_x: Tensor,
means_y: Tensor,
vars_x: Tensor,
vars_y: Tensor,
corrs_xy: Tensor,
nbs: Tensor,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
"""Aggregate the statistics from multiple devices.
Formula taken from here: `Aggregate the statistics from multiple devices`_
"""
if len(means_x) == 1:
return means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]
mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]
for i in range(1, len(means_x)):
mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i]
nb = n1 + n2
mean_x = (n1 * mx1 + n2 * mx2) / nb
mean_y = (n1 * my1 + n2 * my2) / nb
# var_x
element_x1 = (n1 + 1) * mean_x - n1 * mx1
vx1 += (element_x1 - mx1) * (element_x1 - mean_x) - (element_x1 - mean_x) ** 2
element_x2 = (n2 + 1) * mean_x - n2 * mx2
vx2 += (element_x2 - mx2) * (element_x2 - mean_x) - (element_x2 - mean_x) ** 2
var_x = vx1 + vx2
# var_y
element_y1 = (n1 + 1) * mean_y - n1 * my1
vy1 += (element_y1 - my1) * (element_y1 - mean_y) - (element_y1 - mean_y) ** 2
element_y2 = (n2 + 1) * mean_y - n2 * my2
vy2 += (element_y2 - my2) * (element_y2 - mean_y) - (element_y2 - mean_y) ** 2
var_y = vy1 + vy2
# corr
cxy1 += (element_x1 - mx1) * (element_y1 - mean_y) - (element_x1 - mean_x) * (element_y1 - mean_y)
cxy2 += (element_x2 - mx2) * (element_y2 - mean_y) - (element_x2 - mean_x) * (element_y2 - mean_y)
corr_xy = cxy1 + cxy2
mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb
return mean_x, mean_y, var_x, var_y, corr_xy, nb
class PearsonCorrCoef(Metric):
r"""Compute `Pearson Correlation Coefficient`_.
.. math::
P_{corr}(x,y) = \frac{cov(x,y)}{\sigma_x \sigma_y}
Where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): either single output float tensor with shape ``(N,)``
or multioutput float tensor of shape ``(N,d)``
- ``target`` (:class:`~torch.Tensor`): either single output tensor with shape ``(N,)``
or multioutput tensor of shape ``(N,d)``
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``pearson`` (:class:`~torch.Tensor`): A tensor with the Pearson Correlation Coefficient
Args:
num_outputs: Number of outputs in multioutput setting
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (single output regression):
>>> from torchmetrics.regression import PearsonCorrCoef
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> pearson = PearsonCorrCoef()
>>> pearson(preds, target)
tensor(0.9849)
Example (multi output regression):
>>> from torchmetrics.regression import PearsonCorrCoef
>>> target = torch.tensor([[3, -0.5], [2, 7]])
>>> preds = torch.tensor([[2.5, 0.0], [2, 8]])
>>> pearson = PearsonCorrCoef(num_outputs=2)
>>> pearson(preds, target)
tensor([1., 1.])
"""
is_differentiable: bool = True
higher_is_better: Optional[bool] = None # both -1 and 1 are optimal
full_state_update: bool = True
plot_lower_bound: float = -1.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
mean_x: Tensor
mean_y: Tensor
var_x: Tensor
var_y: Tensor
corr_xy: Tensor
n_total: Tensor
def __init__(
self,
num_outputs: int = 1,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not isinstance(num_outputs, int) and num_outputs < 1:
raise ValueError("Expected argument `num_outputs` to be an int larger than 0, but got {num_outputs}")
self.num_outputs = num_outputs
self.add_state("mean_x", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)
self.add_state("mean_y", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)
self.add_state("var_x", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)
self.add_state("var_y", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)
self.add_state("corr_xy", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)
self.add_state("n_total", default=torch.zeros(self.num_outputs), dist_reduce_fx=None)
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total = _pearson_corrcoef_update(
preds,
target,
self.mean_x,
self.mean_y,
self.var_x,
self.var_y,
self.corr_xy,
self.n_total,
self.num_outputs,
)
def compute(self) -> Tensor:
"""Compute pearson correlation coefficient over state."""
if (self.num_outputs == 1 and self.mean_x.numel() > 1) or (self.num_outputs > 1 and self.mean_x.ndim > 1):
# multiple devices, need further reduction
_, _, var_x, var_y, corr_xy, n_total = _final_aggregation(
self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total
)
else:
var_x = self.var_x
var_y = self.var_y
corr_xy = self.corr_xy
n_total = self.n_total
return _pearson_corrcoef_compute(var_x, var_y, corr_xy, n_total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import PearsonCorrCoef
>>> metric = PearsonCorrCoef()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import PearsonCorrCoef
>>> metric = PearsonCorrCoef()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/symmetric_mape.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor, tensor
from torchmetrics.functional.regression.symmetric_mape import (
_symmetric_mean_absolute_percentage_error_compute,
_symmetric_mean_absolute_percentage_error_update,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["SymmetricMeanAbsolutePercentageError.plot"]
class SymmetricMeanAbsolutePercentageError(Metric):
r"""Compute symmetric mean absolute percentage error (`SMAPE`_).
.. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n\frac{| y_i - \hat{y_i} |}{\max(| y_i | + | \hat{y_i} |, \epsilon)}
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predictions from model
- ``target`` (:class:`~torch.Tensor`): Ground truth values
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``smape`` (:class:`~torch.Tensor`): A tensor with non-negative floating point smape value between 0 and 1
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.regression import SymmetricMeanAbsolutePercentageError
>>> target = tensor([1, 10, 1e6])
>>> preds = tensor([0.9, 15, 1.2e6])
>>> smape = SymmetricMeanAbsolutePercentageError()
>>> smape(preds, target)
tensor(0.2290)
"""
is_differentiable: bool = True
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
sum_abs_per_error: Tensor
total: Tensor
def __init__(
self,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.add_state("sum_abs_per_error", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0.0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(preds, target)
self.sum_abs_per_error += sum_abs_per_error
self.total += num_obs
def compute(self) -> Tensor:
"""Compute mean absolute percentage error over state."""
return _symmetric_mean_absolute_percentage_error_compute(self.sum_abs_per_error, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import SymmetricMeanAbsolutePercentageError
>>> metric = SymmetricMeanAbsolutePercentageError()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import SymmetricMeanAbsolutePercentageError
>>> metric = SymmetricMeanAbsolutePercentageError()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/tweedie_deviance.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor
from torchmetrics.functional.regression.tweedie_deviance import (
_tweedie_deviance_score_compute,
_tweedie_deviance_score_update,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["TweedieDevianceScore.plot"]
class TweedieDevianceScore(Metric):
r"""Compute the `Tweedie Deviance Score`_.
.. math::
deviance\_score(\hat{y},y) =
\begin{cases}
(\hat{y} - y)^2, & \text{for }p=0\\
2 * (y * log(\frac{y}{\hat{y}}) + \hat{y} - y), & \text{for }p=1\\
2 * (log(\frac{\hat{y}}{y}) + \frac{y}{\hat{y}} - 1), & \text{for }p=2\\
2 * (\frac{(max(y,0))^{2 - p}}{(1 - p)(2 - p)} - \frac{y(\hat{y})^{1 - p}}{1 - p} + \frac{(
\hat{y})^{2 - p}}{2 - p}), & \text{otherwise}
\end{cases}
where :math:`y` is a tensor of targets values, :math:`\hat{y}` is a tensor of predictions, and
:math:`p` is the `power`.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predicted float tensor with shape ``(N,...)``
- ``target`` (:class:`~torch.Tensor`): Ground truth float tensor with shape ``(N,...)``
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``deviance_score`` (:class:`~torch.Tensor`): A tensor with the deviance score
Args:
power:
- power < 0 : Extreme stable distribution. (Requires: preds > 0.)
- power = 0 : Normal distribution. (Requires: targets and preds can be any real numbers.)
- power = 1 : Poisson distribution. (Requires: targets >= 0 and y_pred > 0.)
- 1 < p < 2 : Compound Poisson distribution. (Requires: targets >= 0 and preds > 0.)
- power = 2 : Gamma distribution. (Requires: targets > 0 and preds > 0.)
- power = 3 : Inverse Gaussian distribution. (Requires: targets > 0 and preds > 0.)
- otherwise : Positive stable distribution. (Requires: targets > 0 and preds > 0.)
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.regression import TweedieDevianceScore
>>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0])
>>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0])
>>> deviance_score = TweedieDevianceScore(power=2)
>>> deviance_score(preds, targets)
tensor(1.2083)
"""
is_differentiable: bool = True
higher_is_better = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
sum_deviance_score: Tensor
num_observations: Tensor
def __init__(
self,
power: float = 0.0,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if 0 < power < 1:
raise ValueError(f"Deviance Score is not defined for power={power}.")
self.power: float = power
self.add_state("sum_deviance_score", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("num_observations", torch.tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, targets: Tensor) -> None:
"""Update metric states with predictions and targets."""
sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, self.power)
self.sum_deviance_score += sum_deviance_score
self.num_observations += num_observations
def compute(self) -> Tensor:
"""Compute metric."""
return _tweedie_deviance_score_compute(self.sum_deviance_score, self.num_observations)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import TweedieDevianceScore
>>> metric = TweedieDevianceScore()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import TweedieDevianceScore
>>> metric = TweedieDevianceScore()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/log_cosh.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor
from torchmetrics.functional.regression.log_cosh import _log_cosh_error_compute, _log_cosh_error_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["LogCoshError.plot"]
class LogCoshError(Metric):
r"""Compute the `LogCosh Error`_.
.. math:: \text{LogCoshError} = \log\left(\frac{\exp(\hat{y} - y) + \exp(\hat{y - y})}{2}\right)
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Estimated labels with shape ``(batch_size,)``
or ``(batch_size, num_outputs)``
- ``target`` (:class:`~torch.Tensor`): Ground truth labels with shape ``(batch_size,)``
or ``(batch_size, num_outputs)``
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``log_cosh_error`` (:class:`~torch.Tensor`): A tensor with the log cosh error
Args:
num_outputs: Number of outputs in multioutput setting
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (single output regression)::
>>> from torchmetrics.regression import LogCoshError
>>> preds = torch.tensor([3.0, 5.0, 2.5, 7.0])
>>> target = torch.tensor([2.5, 5.0, 4.0, 8.0])
>>> log_cosh_error = LogCoshError()
>>> log_cosh_error(preds, target)
tensor(0.3523)
Example (multi output regression)::
>>> from torchmetrics.regression import LogCoshError
>>> preds = torch.tensor([[3.0, 5.0, 1.2], [-2.1, 2.5, 7.0]])
>>> target = torch.tensor([[2.5, 5.0, 1.3], [0.3, 4.0, 8.0]])
>>> log_cosh_error = LogCoshError(num_outputs=3)
>>> log_cosh_error(preds, target)
tensor([0.9176, 0.4277, 0.2194])
"""
is_differentiable = True
higher_is_better = False
full_state_update = False
plot_lower_bound: float = 0.0
sum_log_cosh_error: Tensor
total: Tensor
def __init__(self, num_outputs: int = 1, **kwargs: Any) -> None:
super().__init__(**kwargs)
if not isinstance(num_outputs, int) and num_outputs < 1:
raise ValueError(f"Expected argument `num_outputs` to be an int larger than 0, but got {num_outputs}")
self.num_outputs = num_outputs
self.add_state("sum_log_cosh_error", default=torch.zeros(num_outputs), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets.
Raises:
ValueError:
If ``preds`` or ``target`` has multiple outputs when ``num_outputs=1``
"""
sum_log_cosh_error, num_obs = _log_cosh_error_update(preds, target, self.num_outputs)
self.sum_log_cosh_error += sum_log_cosh_error
self.total += num_obs
def compute(self) -> Tensor:
"""Compute LogCosh error over state."""
return _log_cosh_error_compute(self.sum_log_cosh_error, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import LogCoshError
>>> metric = LogCoshError()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import LogCoshError
>>> metric = LogCoshError()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/concordance.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.regression.concordance import _concordance_corrcoef_compute
from torchmetrics.regression.pearson import PearsonCorrCoef, _final_aggregation
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["ConcordanceCorrCoef.plot"]
class ConcordanceCorrCoef(PearsonCorrCoef):
r"""Compute concordance correlation coefficient that measures the agreement between two variables.
.. math::
\rho_c = \frac{2 \rho \sigma_x \sigma_y}{\sigma_x^2 + \sigma_y^2 + (\mu_x - \mu_y)^2}
where :math:`\mu_x, \mu_y` is the means for the two variables, :math:`\sigma_x^2, \sigma_y^2` are the corresponding
variances and \rho is the pearson correlation coefficient between the two variables.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): either single output float tensor with shape ``(N,)`` or multioutput
float tensor of shape ``(N,d)``
- ``target`` (:class:`~torch.Tensor`): either single output float tensor with shape ``(N,)`` or multioutput
float tensor of shape ``(N,d)``
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``concordance`` (:class:`~torch.Tensor`): A scalar float tensor with the concordance coefficient(s) for
non-multioutput input or a float tensor with shape ``(d,)`` for multioutput input
Args:
num_outputs: Number of outputs in multioutput setting
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (single output regression):
>>> from torchmetrics.regression import ConcordanceCorrCoef
>>> from torch import tensor
>>> target = tensor([3, -0.5, 2, 7])
>>> preds = tensor([2.5, 0.0, 2, 8])
>>> concordance = ConcordanceCorrCoef()
>>> concordance(preds, target)
tensor(0.9777)
Example (multi output regression):
>>> from torchmetrics.regression import ConcordanceCorrCoef
>>> target = tensor([[3, -0.5], [2, 7]])
>>> preds = tensor([[2.5, 0.0], [2, 8]])
>>> concordance = ConcordanceCorrCoef(num_outputs=2)
>>> concordance(preds, target)
tensor([0.7273, 0.9887])
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = True
plot_lower_bound: float = -1.0
plot_upper_bound: float = 1.0
def compute(self) -> Tensor:
"""Compute final concordance correlation coefficient over metric states."""
if (self.num_outputs == 1 and self.mean_x.numel() > 1) or (self.num_outputs > 1 and self.mean_x.ndim > 1):
mean_x, mean_y, var_x, var_y, corr_xy, n_total = _final_aggregation(
self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total
)
else:
mean_x = self.mean_x
mean_y = self.mean_y
var_x = self.var_x
var_y = self.var_y
corr_xy = self.corr_xy
n_total = self.n_total
return _concordance_corrcoef_compute(mean_x, mean_y, var_x, var_y, corr_xy, n_total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import ConcordanceCorrCoef
>>> metric = ConcordanceCorrCoef()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import ConcordanceCorrCoef
>>> metric = ConcordanceCorrCoef()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/mae.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor, tensor
from torchmetrics.functional.regression.mae import _mean_absolute_error_compute, _mean_absolute_error_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MeanAbsoluteError.plot"]
class MeanAbsoluteError(Metric):
r"""`Compute Mean Absolute Error`_ (MAE).
.. math:: \text{MAE} = \frac{1}{N}\sum_i^N | y_i - \hat{y_i} |
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predictions from model
- ``target`` (:class:`~torch.Tensor`): Ground truth values
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``mean_absolute_error`` (:class:`~torch.Tensor`): A tensor with the mean absolute error over the state
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.regression import MeanAbsoluteError
>>> target = tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = tensor([2.5, 0.0, 2.0, 8.0])
>>> mean_absolute_error = MeanAbsoluteError()
>>> mean_absolute_error(preds, target)
tensor(0.5000)
"""
is_differentiable: bool = True
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
sum_abs_error: Tensor
total: Tensor
def __init__(
self,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.add_state("sum_abs_error", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
sum_abs_error, num_obs = _mean_absolute_error_update(preds, target)
self.sum_abs_error += sum_abs_error
self.total += num_obs
def compute(self) -> Tensor:
"""Compute mean absolute error over state."""
return _mean_absolute_error_compute(self.sum_abs_error, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import MeanAbsoluteError
>>> metric = MeanAbsoluteError()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import MeanAbsoluteError
>>> metric = MeanAbsoluteError()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/mape.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor, tensor
from torchmetrics.functional.regression.mape import (
_mean_absolute_percentage_error_compute,
_mean_absolute_percentage_error_update,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MeanAbsolutePercentageError.plot"]
class MeanAbsolutePercentageError(Metric):
r"""Compute `Mean Absolute Percentage Error`_ (MAPE).
.. math:: \text{MAPE} = \frac{1}{n}\sum_{i=1}^n\frac{| y_i - \hat{y_i} |}{\max(\epsilon, | y_i |)}
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predictions from model
- ``target`` (:class:`~torch.Tensor`): Ground truth values
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``mean_abs_percentage_error`` (:class:`~torch.Tensor`): A tensor with the mean absolute percentage error over
state
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Note:
MAPE output is a non-negative floating point. Best result is ``0.0`` . But it is important to note that,
bad predictions, can lead to arbitrarily large values. Especially when some ``target`` values are close to 0.
This `MAPE implementation returns`_ a very large number instead of ``inf``.
Example:
>>> from torch import tensor
>>> from torchmetrics.regression import MeanAbsolutePercentageError
>>> target = tensor([1, 10, 1e6])
>>> preds = tensor([0.9, 15, 1.2e6])
>>> mean_abs_percentage_error = MeanAbsolutePercentageError()
>>> mean_abs_percentage_error(preds, target)
tensor(0.2667)
"""
is_differentiable: bool = True
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
sum_abs_per_error: Tensor
total: Tensor
def __init__(
self,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.add_state("sum_abs_per_error", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0.0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
sum_abs_per_error, num_obs = _mean_absolute_percentage_error_update(preds, target)
self.sum_abs_per_error += sum_abs_per_error
self.total += num_obs
def compute(self) -> Tensor:
"""Compute mean absolute percentage error over state."""
return _mean_absolute_percentage_error_compute(self.sum_abs_per_error, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import MeanAbsolutePercentageError
>>> metric = MeanAbsolutePercentageError()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import MeanAbsolutePercentageError
>>> metric = MeanAbsolutePercentageError()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/regression/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.regression.concordance import ConcordanceCorrCoef
from torchmetrics.regression.cosine_similarity import CosineSimilarity
from torchmetrics.regression.explained_variance import ExplainedVariance
from torchmetrics.regression.kendall import KendallRankCorrCoef
from torchmetrics.regression.kl_divergence import KLDivergence
from torchmetrics.regression.log_cosh import LogCoshError
from torchmetrics.regression.log_mse import MeanSquaredLogError
from torchmetrics.regression.mae import MeanAbsoluteError
from torchmetrics.regression.mape import MeanAbsolutePercentageError
from torchmetrics.regression.minkowski import MinkowskiDistance
from torchmetrics.regression.mse import MeanSquaredError
from torchmetrics.regression.pearson import PearsonCorrCoef
from torchmetrics.regression.r2 import R2Score
from torchmetrics.regression.rse import RelativeSquaredError
from torchmetrics.regression.spearman import SpearmanCorrCoef
from torchmetrics.regression.symmetric_mape import SymmetricMeanAbsolutePercentageError
from torchmetrics.regression.tweedie_deviance import TweedieDevianceScore
from torchmetrics.regression.wmape import WeightedMeanAbsolutePercentageError
__all__ = [
"ConcordanceCorrCoef",
"CosineSimilarity",
"ExplainedVariance",
"KendallRankCorrCoef",
"KLDivergence",
"LogCoshError",
"MeanSquaredLogError",
"MeanAbsoluteError",
"MeanAbsolutePercentageError",
"MinkowskiDistance",
"MeanSquaredError",
"PearsonCorrCoef",
"R2Score",
"RelativeSquaredError",
"SpearmanCorrCoef",
"SymmetricMeanAbsolutePercentageError",
"TweedieDevianceScore",
"WeightedMeanAbsolutePercentageError",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/nominal/fleiss_kappa.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.nominal.fleiss_kappa import _fleiss_kappa_compute, _fleiss_kappa_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["FleissKappa.plot"]
class FleissKappa(Metric):
r"""Calculatees `Fleiss kappa`_ a statistical measure for inter agreement between raters.
.. math::
\kappa = \frac{\bar{p} - \bar{p_e}}{1 - \bar{p_e}}
where :math:`\bar{p}` is the mean of the agreement probability over all raters and :math:`\bar{p_e}` is the mean
agreement probability over all raters if they were randomly assigned. If the raters are in complete agreement then
the score 1 is returned, if there is no agreement among the raters (other than what would be expected by chance)
then a score smaller than 0 is returned.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``ratings`` (:class:`~torch.Tensor`): Ratings of shape ``[n_samples, n_categories]`` or
``[n_samples, n_categories, n_raters]`` depedenent on ``mode``. If ``mode`` is ``counts``, ``ratings`` must be
integer and contain the number of raters that chose each category. If ``mode`` is ``probs``, ``ratings`` must be
floating point and contain the probability/logits that each rater chose each category.
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``fleiss_k`` (:class:`~torch.Tensor`): A float scalar tensor with the calculated Fleiss' kappa score.
Args:
mode: Whether `ratings` will be provided as counts or probabilities.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> # Ratings are provided as counts
>>> import torch
>>> from torchmetrics.nominal import FleissKappa
>>> _ = torch.manual_seed(42)
>>> ratings = torch.randint(0, 10, size=(100, 5)).long() # 100 samples, 5 categories, 10 raters
>>> metric = FleissKappa(mode='counts')
>>> metric(ratings)
tensor(0.0089)
Example:
>>> # Ratings are provided as probabilities
>>> import torch
>>> from torchmetrics.nominal import FleissKappa
>>> _ = torch.manual_seed(42)
>>> ratings = torch.randn(100, 5, 10).softmax(dim=1) # 100 samples, 5 categories, 10 raters
>>> metric = FleissKappa(mode='probs')
>>> metric(ratings)
tensor(-0.0105)
"""
full_state_update: bool = False
is_differentiable: bool = False
higher_is_better: bool = True
plot_upper_bound: float = 1.0
counts: List[Tensor]
def __init__(self, mode: Literal["counts", "probs"] = "counts", **kwargs: Any) -> None:
super().__init__(**kwargs)
if mode not in ["counts", "probs"]:
raise ValueError("Argument ``mode`` must be one of 'counts' or 'probs'.")
self.mode = mode
self.add_state("counts", default=[], dist_reduce_fx="cat")
def update(self, ratings: Tensor) -> None:
"""Updates the counts for fleiss kappa metric."""
counts = _fleiss_kappa_update(ratings, self.mode)
self.counts.append(counts)
def compute(self) -> Tensor:
"""Computes Fleiss' kappa."""
counts = dim_zero_cat(self.counts)
return _fleiss_kappa_compute(counts)
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.nominal import FleissKappa
>>> metric = FleissKappa(mode="probs")
>>> metric.update(torch.randn(100, 5, 10).softmax(dim=1))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.nominal import FleissKappa
>>> metric = FleissKappa(mode="probs")
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randn(100, 5, 10).softmax(dim=1)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/nominal/tschuprows.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.nominal.tschuprows import _tschuprows_t_compute, _tschuprows_t_update
from torchmetrics.functional.nominal.utils import _nominal_input_validation
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["TschuprowsT.plot"]
class TschuprowsT(Metric):
r"""Compute `Tschuprow's T`_ statistic measuring the association between two categorical (nominal) data series.
.. math::
T = \sqrt{\frac{\chi^2 / n}{\sqrt{(r - 1) * (k - 1)}}}
where
.. math::
\chi^2 = \sum_{i,j} \ frac{\left(n_{ij} - \frac{n_{i.} n_{.j}}{n}\right)^2}{\frac{n_{i.} n_{.j}}{n}}
where :math:`n_{ij}` denotes the number of times the values :math:`(A_i, B_j)` are observed with :math:`A_i, B_j`
represent frequencies of values in ``preds`` and ``target``, respectively. Tschuprow's T is a symmetric coefficient,
i.e. :math:`T(preds, target) = T(target, preds)`, so order of input arguments does not matter. The output values
lies in [0, 1] with 1 meaning the perfect association.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Either 1D or 2D tensor of categorical (nominal) data from the first data
series with shape ``(batch_size,)`` or ``(batch_size, num_classes)``, respectively.
- ``target`` (:class:`~torch.Tensor`): Either 1D or 2D tensor of categorical (nominal) data from the second data
series with shape ``(batch_size,)`` or ``(batch_size, num_classes)``, respectively.
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``tschuprows_t`` (:class:`~torch.Tensor`): Scalar tensor containing the Tschuprow's T statistic.
Args:
num_classes: Integer specifying the number of classes
bias_correction: Indication of whether to use bias correction.
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If `nan_strategy` is not one of `'replace'` and `'drop'`
ValueError:
If `nan_strategy` is equal to `'replace'` and `nan_replace_value` is not an `int` or `float`
Example::
>>> from torchmetrics.nominal import TschuprowsT
>>> _ = torch.manual_seed(42)
>>> preds = torch.randint(0, 4, (100,))
>>> target = torch.round(preds + torch.randn(100)).clamp(0, 4)
>>> tschuprows_t = TschuprowsT(num_classes=5)
>>> tschuprows_t(preds, target)
tensor(0.4930)
"""
full_state_update: bool = False
is_differentiable: bool = False
higher_is_better: bool = True
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
confmat: Tensor
def __init__(
self,
num_classes: int,
bias_correction: bool = True,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.num_classes = num_classes
self.bias_correction = bias_correction
_nominal_input_validation(nan_strategy, nan_replace_value)
self.nan_strategy = nan_strategy
self.nan_replace_value = nan_replace_value
self.add_state("confmat", torch.zeros(num_classes, num_classes), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
confmat = _tschuprows_t_update(preds, target, self.num_classes, self.nan_strategy, self.nan_replace_value)
self.confmat += confmat
def compute(self) -> Tensor:
"""Compute Tschuprow's T statistic."""
return _tschuprows_t_compute(self.confmat, self.bias_correction)
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.nominal import TschuprowsT
>>> metric = TschuprowsT(num_classes=5)
>>> metric.update(torch.randint(0, 4, (100,)), torch.randint(0, 4, (100,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.nominal import TschuprowsT
>>> metric = TschuprowsT(num_classes=5)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randint(0, 4, (100,)), torch.randint(0, 4, (100,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/nominal/pearson.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.nominal.pearson import (
_pearsons_contingency_coefficient_compute,
_pearsons_contingency_coefficient_update,
)
from torchmetrics.functional.nominal.utils import _nominal_input_validation
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["PearsonsContingencyCoefficient.plot"]
class PearsonsContingencyCoefficient(Metric):
r"""Compute `Pearson's Contingency Coefficient`_ statistic.
This metric measures the association between two categorical (nominal) data series.
.. math::
Pearson = \sqrt{\frac{\chi^2 / n}{1 + \chi^2 / n}}
where
.. math::
\chi^2 = \sum_{i,j} \ frac{\left(n_{ij} - \frac{n_{i.} n_{.j}}{n}\right)^2}{\frac{n_{i.} n_{.j}}{n}}
where :math:`n_{ij}` denotes the number of times the values :math:`(A_i, B_j)` are observed with :math:`A_i, B_j`
represent frequencies of values in ``preds`` and ``target``, respectively. Pearson's Contingency Coefficient is a
symmetric coefficient, i.e. :math:`Pearson(preds, target) = Pearson(target, preds)`, so order of input arguments
does not matter. The output values lies in [0, 1] with 1 meaning the perfect association.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Either 1D or 2D tensor of categorical (nominal) data from the first data
series with shape ``(batch_size,)`` or ``(batch_size, num_classes)``, respectively.
- ``target`` (:class:`~torch.Tensor`): Either 1D or 2D tensor of categorical (nominal) data from the second data
series with shape ``(batch_size,)`` or ``(batch_size, num_classes)``, respectively.
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``pearsons_cc`` (:class:`~torch.Tensor`): Scalar tensor containing the Pearsons Contingency Coefficient statistic.
Args:
num_classes: Integer specifying the number of classes
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If `nan_strategy` is not one of `'replace'` and `'drop'`
ValueError:
If `nan_strategy` is equal to `'replace'` and `nan_replace_value` is not an `int` or `float`
Example::
>>> from torchmetrics.nominal import PearsonsContingencyCoefficient
>>> _ = torch.manual_seed(42)
>>> preds = torch.randint(0, 4, (100,))
>>> target = torch.round(preds + torch.randn(100)).clamp(0, 4)
>>> pearsons_contingency_coefficient = PearsonsContingencyCoefficient(num_classes=5)
>>> pearsons_contingency_coefficient(preds, target)
tensor(0.6948)
"""
full_state_update: bool = False
is_differentiable: bool = False
higher_is_better: bool = True
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
confmat: Tensor
def __init__(
self,
num_classes: int,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.num_classes = num_classes
_nominal_input_validation(nan_strategy, nan_replace_value)
self.nan_strategy = nan_strategy
self.nan_replace_value = nan_replace_value
self.add_state("confmat", torch.zeros(num_classes, num_classes), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
confmat = _pearsons_contingency_coefficient_update(
preds, target, self.num_classes, self.nan_strategy, self.nan_replace_value
)
self.confmat += confmat
def compute(self) -> Tensor:
"""Compute Pearson's Contingency Coefficient statistic."""
return _pearsons_contingency_coefficient_compute(self.confmat)
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.nominal import PearsonsContingencyCoefficient
>>> metric = PearsonsContingencyCoefficient(num_classes=5)
>>> metric.update(torch.randint(0, 4, (100,)), torch.randint(0, 4, (100,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.nominal import PearsonsContingencyCoefficient
>>> metric = PearsonsContingencyCoefficient(num_classes=5)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randint(0, 4, (100,)), torch.randint(0, 4, (100,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/nominal/cramers.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.nominal.cramers import _cramers_v_compute, _cramers_v_update
from torchmetrics.functional.nominal.utils import _nominal_input_validation
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["CramersV.plot"]
class CramersV(Metric):
r"""Compute `Cramer's V`_ statistic measuring the association between two categorical (nominal) data series.
.. math::
V = \sqrt{\frac{\chi^2 / n}{\min(r - 1, k - 1)}}
where
.. math::
\chi^2 = \sum_{i,j} \ frac{\left(n_{ij} - \frac{n_{i.} n_{.j}}{n}\right)^2}{\frac{n_{i.} n_{.j}}{n}}
where :math:`n_{ij}` denotes the number of times the values :math:`(A_i, B_j)` are observed with :math:`A_i, B_j`
represent frequencies of values in ``preds`` and ``target``, respectively. Cramer's V is a symmetric coefficient,
i.e. :math:`V(preds, target) = V(target, preds)`, so order of input arguments does not matter. The output values
lies in [0, 1] with 1 meaning the perfect association.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Either 1D or 2D tensor of categorical (nominal) data from the first data
series with shape ``(batch_size,)`` or ``(batch_size, num_classes)``, respectively.
- ``target`` (:class:`~torch.Tensor`): Either 1D or 2D tensor of categorical (nominal) data from the second data
series with shape ``(batch_size,)`` or ``(batch_size, num_classes)``, respectively.
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``cramers_v`` (:class:`~torch.Tensor`): Scalar tensor containing the Cramer's V statistic.
Args:
num_classes: Integer specifying the number of classes
bias_correction: Indication of whether to use bias correction.
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If `nan_strategy` is not one of `'replace'` and `'drop'`
ValueError:
If `nan_strategy` is equal to `'replace'` and `nan_replace_value` is not an `int` or `float`
Example::
>>> from torchmetrics.nominal import CramersV
>>> _ = torch.manual_seed(42)
>>> preds = torch.randint(0, 4, (100,))
>>> target = torch.round(preds + torch.randn(100)).clamp(0, 4)
>>> cramers_v = CramersV(num_classes=5)
>>> cramers_v(preds, target)
tensor(0.5284)
"""
full_state_update: bool = False
is_differentiable: bool = False
higher_is_better: bool = True
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
confmat: Tensor
def __init__(
self,
num_classes: int,
bias_correction: bool = True,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.num_classes = num_classes
self.bias_correction = bias_correction
_nominal_input_validation(nan_strategy, nan_replace_value)
self.nan_strategy = nan_strategy
self.nan_replace_value = nan_replace_value
self.add_state("confmat", torch.zeros(num_classes, num_classes), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
confmat = _cramers_v_update(preds, target, self.num_classes, self.nan_strategy, self.nan_replace_value)
self.confmat += confmat
def compute(self) -> Tensor:
"""Compute Cramer's V statistic."""
return _cramers_v_compute(self.confmat, self.bias_correction)
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.nominal import CramersV
>>> metric = CramersV(num_classes=5)
>>> metric.update(torch.randint(0, 4, (100,)), torch.randint(0, 4, (100,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.nominal import CramersV
>>> metric = CramersV(num_classes=5)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randint(0, 4, (100,)), torch.randint(0, 4, (100,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/nominal/theils_u.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.nominal.theils_u import _theils_u_compute, _theils_u_update
from torchmetrics.functional.nominal.utils import _nominal_input_validation
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["TheilsU.plot"]
class TheilsU(Metric):
r"""Compute `Theil's U`_ statistic measuring the association between two categorical (nominal) data series.
.. math::
U(X|Y) = \frac{H(X) - H(X|Y)}{H(X)}
where :math:`H(X)` is entropy of variable :math:`X` while :math:`H(X|Y)` is the conditional entropy of :math:`X`
given :math:`Y`. It is also know as the Uncertainty Coefficient. Theils's U is an asymmetric coefficient, i.e.
:math:`TheilsU(preds, target) \neq TheilsU(target, preds)`, so the order of the inputs matters. The output values
lies in [0, 1], where a 0 means y has no information about x while value 1 means y has complete information about x.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Either 1D or 2D tensor of categorical (nominal) data from the first data
series (called X in the above definition) with shape ``(batch_size,)`` or ``(batch_size, num_classes)``,
respectively.
- ``target`` (:class:`~torch.Tensor`): Either 1D or 2D tensor of categorical (nominal) data from the second data
series (called Y in the above definition) with shape ``(batch_size,)`` or ``(batch_size, num_classes)``,
respectively.
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``theils_u`` (:class:`~torch.Tensor`): Scalar tensor containing the Theil's U statistic.
Args:
num_classes: Integer specifying the number of classes
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
>>> from torchmetrics.nominal import TheilsU
>>> _ = torch.manual_seed(42)
>>> preds = torch.randint(10, (10,))
>>> target = torch.randint(10, (10,))
>>> metric = TheilsU(num_classes=10)
>>> metric(preds, target)
tensor(0.8530)
"""
full_state_update: bool = False
is_differentiable: bool = False
higher_is_better: bool = True
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
confmat: Tensor
def __init__(
self,
num_classes: int,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.num_classes = num_classes
_nominal_input_validation(nan_strategy, nan_replace_value)
self.nan_strategy = nan_strategy
self.nan_replace_value = nan_replace_value
self.add_state("confmat", torch.zeros(num_classes, num_classes), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
confmat = _theils_u_update(preds, target, self.num_classes, self.nan_strategy, self.nan_replace_value)
self.confmat += confmat
def compute(self) -> Tensor:
"""Compute Theil's U statistic."""
return _theils_u_compute(self.confmat)
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.nominal import TheilsU
>>> metric = TheilsU(num_classes=10)
>>> metric.update(torch.randint(10, (10,)), torch.randint(10, (10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.nominal import TheilsU
>>> metric = TheilsU(num_classes=10)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randint(10, (10,)), torch.randint(10, (10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/nominal/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.nominal.cramers import CramersV
from torchmetrics.nominal.fleiss_kappa import FleissKappa
from torchmetrics.nominal.pearson import PearsonsContingencyCoefficient
from torchmetrics.nominal.theils_u import TheilsU
from torchmetrics.nominal.tschuprows import TschuprowsT
__all__ = [
"CramersV",
"FleissKappa",
"PearsonsContingencyCoefficient",
"TheilsU",
"TschuprowsT",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/precision_recall.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.stat_scores import BinaryStatScores, MulticlassStatScores, MultilabelStatScores
from torchmetrics.functional.classification.precision_recall import _precision_recall_reduce
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = [
"BinaryPrecision.plot",
"MulticlassPrecision.plot",
"MultilabelPrecision.plot",
"BinaryRecall.plot",
"MulticlassRecall.plot",
"MultilabelRecall.plot",
]
class BinaryPrecision(BinaryStatScores):
r"""Compute `Precision`_ for binary tasks.
.. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and false positives
respectively. The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0`. If this case is
encountered a score of 0 is returned.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A int or float tensor of shape ``(N, ...)``. If preds is a floating point
tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``bp`` (:class:`~torch.Tensor`): If ``multidim_average`` is set to ``global``, the metric returns a scalar
value. If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a
scalar value per sample.
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryPrecision
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> metric = BinaryPrecision()
>>> metric(preds, target)
tensor(0.6667)
Example (preds is float tensor):
>>> from torchmetrics.classification import BinaryPrecision
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> metric = BinaryPrecision()
>>> metric(preds, target)
tensor(0.6667)
Example (multidim tensors):
>>> from torchmetrics.classification import BinaryPrecision
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = BinaryPrecision(multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.4000, 0.0000])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _precision_recall_reduce(
"precision", tp, fp, tn, fn, average="binary", multidim_average=self.multidim_average
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import BinaryPrecision
>>> metric = BinaryPrecision()
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import BinaryPrecision
>>> metric = BinaryPrecision()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(10), randint(2,(10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassPrecision(MulticlassStatScores):
r"""Compute `Precision`_ for multiclass tasks.
.. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and false positives
respectively. The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0`. If this case is
encountered for any class, the metric for that class will be set to 0 and the overall metric may therefore be
affected in turn.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
probabilities/logits into an int tensor.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mcp`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average``
arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassPrecision
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> metric = MulticlassPrecision(num_classes=3)
>>> metric(preds, target)
tensor(0.8333)
>>> mcp = MulticlassPrecision(num_classes=3, average=None)
>>> mcp(preds, target)
tensor([1.0000, 0.5000, 1.0000])
Example (preds is float tensor):
>>> from torchmetrics.classification import MulticlassPrecision
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> metric = MulticlassPrecision(num_classes=3)
>>> metric(preds, target)
tensor(0.8333)
>>> mcp = MulticlassPrecision(num_classes=3, average=None)
>>> mcp(preds, target)
tensor([1.0000, 0.5000, 1.0000])
Example (multidim tensors):
>>> from torchmetrics.classification import MulticlassPrecision
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> metric = MulticlassPrecision(num_classes=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.3889, 0.2778])
>>> mcp = MulticlassPrecision(num_classes=3, multidim_average='samplewise', average=None)
>>> mcp(preds, target)
tensor([[0.6667, 0.0000, 0.5000],
[0.0000, 0.5000, 0.3333]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _precision_recall_reduce(
"precision", tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a single value per class
>>> from torchmetrics.classification import MulticlassPrecision
>>> metric = MulticlassPrecision(num_classes=3, average=None)
>>> metric.update(randint(3, (20,)), randint(3, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a multiple values per class
>>> from torchmetrics.classification import MulticlassPrecision
>>> metric = MulticlassPrecision(num_classes=3, average=None)
>>> values = []
>>> for _ in range(20):
... values.append(metric(randint(3, (20,)), randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelPrecision(MultilabelStatScores):
r"""Compute `Precision`_ for multilabel tasks.
.. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and false positives
respectively. The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0`. If this case is
encountered for any label, the metric for that label will be set to 0 and the overall metric may therefore be
affected in turn.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, C, ...)``.
If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and
will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value
in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mlp`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average``
arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelPrecision
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> metric = MultilabelPrecision(num_labels=3)
>>> metric(preds, target)
tensor(0.5000)
>>> mlp = MultilabelPrecision(num_labels=3, average=None)
>>> mlp(preds, target)
tensor([1.0000, 0.0000, 0.5000])
Example (preds is float tensor):
>>> from torchmetrics.classification import MultilabelPrecision
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> metric = MultilabelPrecision(num_labels=3)
>>> metric(preds, target)
tensor(0.5000)
>>> mlp = MultilabelPrecision(num_labels=3, average=None)
>>> mlp(preds, target)
tensor([1.0000, 0.0000, 0.5000])
Example (multidim tensors):
>>> from torchmetrics.classification import MultilabelPrecision
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = MultilabelPrecision(num_labels=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.3333, 0.0000])
>>> mlp = MultilabelPrecision(num_labels=3, multidim_average='samplewise', average=None)
>>> mlp(preds, target)
tensor([[0.5000, 0.5000, 0.0000],
[0.0000, 0.0000, 0.0000]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _precision_recall_reduce(
"precision", tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average, multilabel=True
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MultilabelPrecision
>>> metric = MultilabelPrecision(num_labels=3)
>>> metric.update(randint(2, (20, 3)), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import MultilabelPrecision
>>> metric = MultilabelPrecision(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(randint(2, (20, 3)), randint(2, (20, 3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class BinaryRecall(BinaryStatScores):
r"""Compute `Recall`_ for binary tasks.
.. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and false negatives
respectively. The metric is only proper defined when :math:`\text{TP} + \text{FN} \neq 0`. If this case is
encountered a score of 0 is returned.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, ...)``. If preds is a
floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``br`` (:class:`~torch.Tensor`): If ``multidim_average`` is set to ``global``, the metric returns a scalar
value. If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of
a scalar value per sample.
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryRecall
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> metric = BinaryRecall()
>>> metric(preds, target)
tensor(0.6667)
Example (preds is float tensor):
>>> from torchmetrics.classification import BinaryRecall
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> metric = BinaryRecall()
>>> metric(preds, target)
tensor(0.6667)
Example (multidim tensors):
>>> from torchmetrics.classification import BinaryRecall
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = BinaryRecall(multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.6667, 0.0000])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _precision_recall_reduce(
"recall", tp, fp, tn, fn, average="binary", multidim_average=self.multidim_average
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import BinaryRecall
>>> metric = BinaryRecall()
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import BinaryRecall
>>> metric = BinaryRecall()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(10), randint(2,(10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassRecall(MulticlassStatScores):
r"""Compute `Recall`_ for multiclass tasks.
.. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and false negatives
respectively. The metric is only proper defined when :math:`\text{TP} + \text{FN} \neq 0`. If this case is
encountered for any class, the metric for that class will be set to 0 and the overall metric may therefore be
affected in turn.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``
If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
probabilities/logits into an int tensor.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mcr`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average``
arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassRecall
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> metric = MulticlassRecall(num_classes=3)
>>> metric(preds, target)
tensor(0.8333)
>>> mcr = MulticlassRecall(num_classes=3, average=None)
>>> mcr(preds, target)
tensor([0.5000, 1.0000, 1.0000])
Example (preds is float tensor):
>>> from torchmetrics.classification import MulticlassRecall
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> metric = MulticlassRecall(num_classes=3)
>>> metric(preds, target)
tensor(0.8333)
>>> mcr = MulticlassRecall(num_classes=3, average=None)
>>> mcr(preds, target)
tensor([0.5000, 1.0000, 1.0000])
Example (multidim tensors):
>>> from torchmetrics.classification import MulticlassRecall
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> metric = MulticlassRecall(num_classes=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.5000, 0.2778])
>>> mcr = MulticlassRecall(num_classes=3, multidim_average='samplewise', average=None)
>>> mcr(preds, target)
tensor([[1.0000, 0.0000, 0.5000],
[0.0000, 0.3333, 0.5000]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _precision_recall_reduce(
"recall", tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a single value per class
>>> from torchmetrics.classification import MulticlassRecall
>>> metric = MulticlassRecall(num_classes=3, average=None)
>>> metric.update(randint(3, (20,)), randint(3, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a multiple values per class
>>> from torchmetrics.classification import MulticlassRecall
>>> metric = MulticlassRecall(num_classes=3, average=None)
>>> values = []
>>> for _ in range(20):
... values.append(metric(randint(3, (20,)), randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelRecall(MultilabelStatScores):
r"""Compute `Recall`_ for multilabel tasks.
.. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and false negatives
respectively. The metric is only proper defined when :math:`\text{TP} + \text{FN} \neq 0`. If this case is
encountered for any label, the metric for that label will be set to 0 and the overall metric may therefore be
affected in turn.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating
point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mlr`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average``
arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelRecall
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> metric = MultilabelRecall(num_labels=3)
>>> metric(preds, target)
tensor(0.6667)
>>> mlr = MultilabelRecall(num_labels=3, average=None)
>>> mlr(preds, target)
tensor([1., 0., 1.])
Example (preds is float tensor):
>>> from torchmetrics.classification import MultilabelRecall
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> metric = MultilabelRecall(num_labels=3)
>>> metric(preds, target)
tensor(0.6667)
>>> mlr = MultilabelRecall(num_labels=3, average=None)
>>> mlr(preds, target)
tensor([1., 0., 1.])
Example (multidim tensors):
>>> from torchmetrics.classification import MultilabelRecall
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = MultilabelRecall(num_labels=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.6667, 0.0000])
>>> mlr = MultilabelRecall(num_labels=3, multidim_average='samplewise', average=None)
>>> mlr(preds, target)
tensor([[1., 1., 0.],
[0., 0., 0.]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _precision_recall_reduce(
"recall", tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average, multilabel=True
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MultilabelRecall
>>> metric = MultilabelRecall(num_labels=3)
>>> metric.update(randint(2, (20, 3)), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import MultilabelRecall
>>> metric = MultilabelRecall(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(randint(2, (20, 3)), randint(2, (20, 3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class Precision(_ClassificationTaskWrapper):
r"""Compute `Precision`_.
.. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and false positives
respectively. The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0`. If this case is
encountered for any class/label, the metric for that class/label will be set to 0 and the overall metric may
therefore be affected in turn.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryPrecision`, :class:`~torchmetrics.classification.MulticlassPrecision` and
:class:`~torchmetrics.classification.MultilabelPrecision` for the specific details of each argument influence and
examples.
Legacy Example:
>>> from torch import tensor
>>> preds = tensor([2, 0, 2, 1])
>>> target = tensor([1, 1, 2, 0])
>>> precision = Precision(task="multiclass", average='macro', num_classes=3)
>>> precision(preds, target)
tensor(0.1667)
>>> precision = Precision(task="multiclass", average='micro', num_classes=3)
>>> precision(preds, target)
tensor(0.2500)
"""
def __new__(
cls: Type["Precision"],
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
assert multidim_average is not None # noqa: S101 # needed for mypy
kwargs.update(
{"multidim_average": multidim_average, "ignore_index": ignore_index, "validate_args": validate_args}
)
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return BinaryPrecision(threshold, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return MulticlassPrecision(num_classes, top_k, average, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelPrecision(num_labels, threshold, average, **kwargs)
raise ValueError(f"Task {task} not supported!")
class Recall(_ClassificationTaskWrapper):
r"""Compute `Recall`_.
.. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
false negatives respectively. The metric is only proper defined when :math:`\text{TP} + \text{FN} \neq 0`. If this
case is encountered for any class/label, the metric for that class/label will be set to 0 and the overall metric may
therefore be affected in turn.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryRecall`,
:class:`~torchmetrics.classification.MulticlassRecall` and :class:`~torchmetrics.classification.MultilabelRecall`
for the specific details of each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> preds = tensor([2, 0, 2, 1])
>>> target = tensor([1, 1, 2, 0])
>>> recall = Recall(task="multiclass", average='macro', num_classes=3)
>>> recall(preds, target)
tensor(0.3333)
>>> recall = Recall(task="multiclass", average='micro', num_classes=3)
>>> recall(preds, target)
tensor(0.2500)
"""
def __new__(
cls: Type["Recall"],
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
assert multidim_average is not None # noqa: S101 # needed for mypy
kwargs.update(
{"multidim_average": multidim_average, "ignore_index": ignore_index, "validate_args": validate_args}
)
if task == ClassificationTask.BINARY:
return BinaryRecall(threshold, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return MulticlassRecall(num_classes, top_k, average, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelRecall(num_labels, threshold, average, **kwargs)
return None
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/f_beta.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.stat_scores import BinaryStatScores, MulticlassStatScores, MultilabelStatScores
from torchmetrics.functional.classification.f_beta import (
_binary_fbeta_score_arg_validation,
_fbeta_reduce,
_multiclass_fbeta_score_arg_validation,
_multilabel_fbeta_score_arg_validation,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = [
"BinaryFBetaScore.plot",
"MulticlassFBetaScore.plot",
"MultilabelFBetaScore.plot",
"BinaryF1Score.plot",
"MulticlassF1Score.plot",
"MultilabelF1Score.plot",
]
class BinaryFBetaScore(BinaryStatScores):
r"""Compute `F-score`_ metric for binary tasks.
.. math::
F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
{(\beta^2 * \text{precision}) + \text{recall}}
The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
positives and false negatives respectively. If this case is encountered a score of 0 is returned.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, ...)``. If preds is a floating
point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``bfbs`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``multidim_average`` argument:
- If ``multidim_average`` is set to ``global`` the output will be a scalar tensor
- If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)`` consisting of
a scalar value per sample.
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryFBetaScore
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> metric = BinaryFBetaScore(beta=2.0)
>>> metric(preds, target)
tensor(0.6667)
Example (preds is float tensor):
>>> from torchmetrics.classification import BinaryFBetaScore
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> metric = BinaryFBetaScore(beta=2.0)
>>> metric(preds, target)
tensor(0.6667)
Example (multidim tensors):
>>> from torchmetrics.classification import BinaryFBetaScore
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = BinaryFBetaScore(beta=2.0, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.5882, 0.0000])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
beta: float,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
threshold=threshold,
multidim_average=multidim_average,
ignore_index=ignore_index,
validate_args=False,
**kwargs,
)
if validate_args:
_binary_fbeta_score_arg_validation(beta, threshold, multidim_average, ignore_index)
self.validate_args = validate_args
self.beta = beta
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _fbeta_reduce(tp, fp, tn, fn, self.beta, average="binary", multidim_average=self.multidim_average)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import BinaryFBetaScore
>>> metric = BinaryFBetaScore(beta=2.0)
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import BinaryFBetaScore
>>> metric = BinaryFBetaScore(beta=2.0)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(10), randint(2,(10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassFBetaScore(MulticlassStatScores):
r"""Compute `F-score`_ metric for multiclass tasks.
.. math::
F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
{(\beta^2 * \text{precision}) + \text{recall}}
The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
positives and false negatives respectively. If this case is encountered for any class, the metric for that class
will be set to 0 and the overall metric may therefore be affected in turn.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
probabilities/logits into an int tensor.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mcfbs`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassFBetaScore
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> metric = MulticlassFBetaScore(beta=2.0, num_classes=3)
>>> metric(preds, target)
tensor(0.7963)
>>> mcfbs = MulticlassFBetaScore(beta=2.0, num_classes=3, average=None)
>>> mcfbs(preds, target)
tensor([0.5556, 0.8333, 1.0000])
Example (preds is float tensor):
>>> from torchmetrics.classification import MulticlassFBetaScore
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> metric = MulticlassFBetaScore(beta=2.0, num_classes=3)
>>> metric(preds, target)
tensor(0.7963)
>>> mcfbs = MulticlassFBetaScore(beta=2.0, num_classes=3, average=None)
>>> mcfbs(preds, target)
tensor([0.5556, 0.8333, 1.0000])
Example (multidim tensors):
>>> from torchmetrics.classification import MulticlassFBetaScore
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> metric = MulticlassFBetaScore(beta=2.0, num_classes=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.4697, 0.2706])
>>> mcfbs = MulticlassFBetaScore(beta=2.0, num_classes=3, multidim_average='samplewise', average=None)
>>> mcfbs(preds, target)
tensor([[0.9091, 0.0000, 0.5000],
[0.0000, 0.3571, 0.4545]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def __init__(
self,
beta: float,
num_classes: int,
top_k: int = 1,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_classes=num_classes,
top_k=top_k,
average=average,
multidim_average=multidim_average,
ignore_index=ignore_index,
validate_args=False,
**kwargs,
)
if validate_args:
_multiclass_fbeta_score_arg_validation(beta, num_classes, top_k, average, multidim_average, ignore_index)
self.validate_args = validate_args
self.beta = beta
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _fbeta_reduce(tp, fp, tn, fn, self.beta, average=self.average, multidim_average=self.multidim_average)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a single value per class
>>> from torchmetrics.classification import MulticlassFBetaScore
>>> metric = MulticlassFBetaScore(num_classes=3, beta=2.0, average=None)
>>> metric.update(randint(3, (20,)), randint(3, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a multiple values per class
>>> from torchmetrics.classification import MulticlassFBetaScore
>>> metric = MulticlassFBetaScore(num_classes=3, beta=2.0, average=None)
>>> values = []
>>> for _ in range(20):
... values.append(metric(randint(3, (20,)), randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelFBetaScore(MultilabelStatScores):
r"""Compute `F-score`_ metric for multilabel tasks.
.. math::
F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
{(\beta^2 * \text{precision}) + \text{recall}}
The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
positives and false negatives respectively. If this case is encountered for any label, the metric for that label
will be set to 0 and the overall metric may therefore be affected in turn.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating
point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mlfbs`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelFBetaScore
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> metric = MultilabelFBetaScore(beta=2.0, num_labels=3)
>>> metric(preds, target)
tensor(0.6111)
>>> mlfbs = MultilabelFBetaScore(beta=2.0, num_labels=3, average=None)
>>> mlfbs(preds, target)
tensor([1.0000, 0.0000, 0.8333])
Example (preds is float tensor):
>>> from torchmetrics.classification import MultilabelFBetaScore
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> metric = MultilabelFBetaScore(beta=2.0, num_labels=3)
>>> metric(preds, target)
tensor(0.6111)
>>> mlfbs = MultilabelFBetaScore(beta=2.0, num_labels=3, average=None)
>>> mlfbs(preds, target)
tensor([1.0000, 0.0000, 0.8333])
Example (multidim tensors):
>>> from torchmetrics.classification import MultilabelFBetaScore
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = MultilabelFBetaScore(num_labels=3, beta=2.0, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.5556, 0.0000])
>>> mlfbs = MultilabelFBetaScore(num_labels=3, beta=2.0, multidim_average='samplewise', average=None)
>>> mlfbs(preds, target)
tensor([[0.8333, 0.8333, 0.0000],
[0.0000, 0.0000, 0.0000]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def __init__(
self,
beta: float,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_labels=num_labels,
threshold=threshold,
average=average,
multidim_average=multidim_average,
ignore_index=ignore_index,
validate_args=False,
**kwargs,
)
if validate_args:
_multilabel_fbeta_score_arg_validation(beta, num_labels, threshold, average, multidim_average, ignore_index)
self.validate_args = validate_args
self.beta = beta
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _fbeta_reduce(
tp, fp, tn, fn, self.beta, average=self.average, multidim_average=self.multidim_average, multilabel=True
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MultilabelFBetaScore
>>> metric = MultilabelFBetaScore(num_labels=3, beta=2.0)
>>> metric.update(randint(2, (20, 3)), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import MultilabelFBetaScore
>>> metric = MultilabelFBetaScore(num_labels=3, beta=2.0)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(randint(2, (20, 3)), randint(2, (20, 3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class BinaryF1Score(BinaryFBetaScore):
r"""Compute F-1 score for binary tasks.
.. math::
F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}
The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
positives and false negatives respectively. If this case is encountered a score of 0 is returned.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point
tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``bf1s`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``multidim_average`` argument:
- If ``multidim_average`` is set to ``global``, the metric returns a scalar value.
- If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar
value per sample.
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryF1Score
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> metric = BinaryF1Score()
>>> metric(preds, target)
tensor(0.6667)
Example (preds is float tensor):
>>> from torchmetrics.classification import BinaryF1Score
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> metric = BinaryF1Score()
>>> metric(preds, target)
tensor(0.6667)
Example (multidim tensors):
>>> from torchmetrics.classification import BinaryF1Score
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = BinaryF1Score(multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.5000, 0.0000])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
beta=1.0,
threshold=threshold,
multidim_average=multidim_average,
ignore_index=ignore_index,
validate_args=validate_args,
**kwargs,
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import BinaryF1Score
>>> metric = BinaryF1Score()
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import BinaryF1Score
>>> metric = BinaryF1Score()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(10), randint(2,(10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassF1Score(MulticlassFBetaScore):
r"""Compute F-1 score for multiclass tasks.
.. math::
F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}
The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
positives and false negatives respectively. If this case is encountered for any class, the metric for that class
will be set to 0 and the overall metric may therefore be affected in turn.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
probabilities/logits into an int tensor.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mcf1s`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassF1Score
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> metric = MulticlassF1Score(num_classes=3)
>>> metric(preds, target)
tensor(0.7778)
>>> mcf1s = MulticlassF1Score(num_classes=3, average=None)
>>> mcf1s(preds, target)
tensor([0.6667, 0.6667, 1.0000])
Example (preds is float tensor):
>>> from torchmetrics.classification import MulticlassF1Score
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> metric = MulticlassF1Score(num_classes=3)
>>> metric(preds, target)
tensor(0.7778)
>>> mcf1s = MulticlassF1Score(num_classes=3, average=None)
>>> mcf1s(preds, target)
tensor([0.6667, 0.6667, 1.0000])
Example (multidim tensors):
>>> from torchmetrics.classification import MulticlassF1Score
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> metric = MulticlassF1Score(num_classes=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.4333, 0.2667])
>>> mcf1s = MulticlassF1Score(num_classes=3, multidim_average='samplewise', average=None)
>>> mcf1s(preds, target)
tensor([[0.8000, 0.0000, 0.5000],
[0.0000, 0.4000, 0.4000]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def __init__(
self,
num_classes: int,
top_k: int = 1,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
beta=1.0,
num_classes=num_classes,
top_k=top_k,
average=average,
multidim_average=multidim_average,
ignore_index=ignore_index,
validate_args=validate_args,
**kwargs,
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a single value per class
>>> from torchmetrics.classification import MulticlassF1Score
>>> metric = MulticlassF1Score(num_classes=3, average=None)
>>> metric.update(randint(3, (20,)), randint(3, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a multiple values per class
>>> from torchmetrics.classification import MulticlassF1Score
>>> metric = MulticlassF1Score(num_classes=3, average=None)
>>> values = []
>>> for _ in range(20):
... values.append(metric(randint(3, (20,)), randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelF1Score(MultilabelFBetaScore):
r"""Compute F-1 score for multilabel tasks.
.. math::
F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}
The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
positives and false negatives respectively. If this case is encountered for any label, the metric for that label
will be set to 0 and the overall metric may therefore be affected in turn.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``.
If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and
will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value
in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mlf1s`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)```
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelF1Score
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> metric = MultilabelF1Score(num_labels=3)
>>> metric(preds, target)
tensor(0.5556)
>>> mlf1s = MultilabelF1Score(num_labels=3, average=None)
>>> mlf1s(preds, target)
tensor([1.0000, 0.0000, 0.6667])
Example (preds is float tensor):
>>> from torchmetrics.classification import MultilabelF1Score
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> metric = MultilabelF1Score(num_labels=3)
>>> metric(preds, target)
tensor(0.5556)
>>> mlf1s = MultilabelF1Score(num_labels=3, average=None)
>>> mlf1s(preds, target)
tensor([1.0000, 0.0000, 0.6667])
Example (multidim tensors):
>>> from torchmetrics.classification import MultilabelF1Score
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = MultilabelF1Score(num_labels=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.4444, 0.0000])
>>> mlf1s = MultilabelF1Score(num_labels=3, multidim_average='samplewise', average=None)
>>> mlf1s(preds, target)
tensor([[0.6667, 0.6667, 0.0000],
[0.0000, 0.0000, 0.0000]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def __init__(
self,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
beta=1.0,
num_labels=num_labels,
threshold=threshold,
average=average,
multidim_average=multidim_average,
ignore_index=ignore_index,
validate_args=validate_args,
**kwargs,
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MultilabelF1Score
>>> metric = MultilabelF1Score(num_labels=3)
>>> metric.update(randint(2, (20, 3)), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import MultilabelF1Score
>>> metric = MultilabelF1Score(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(randint(2, (20, 3)), randint(2, (20, 3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class FBetaScore(_ClassificationTaskWrapper):
r"""Compute `F-score`_ metric.
.. math::
F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
{(\beta^2 * \text{precision}) + \text{recall}}
The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
positives and false negatives respectively. If this case is encountered for any class/label, the metric for that
class/label will be set to 0 and the overall metric may therefore be affected in turn.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryFBetaScore`,
:class:`~torchmetrics.classification.MulticlassFBetaScore` and
:class:`~torchmetrics.classification.MultilabelFBetaScore` for the specific details of each argument influence
and examples.
Legcy Example:
>>> from torch import tensor
>>> target = tensor([0, 1, 2, 0, 1, 2])
>>> preds = tensor([0, 2, 1, 0, 0, 1])
>>> f_beta = FBetaScore(task="multiclass", num_classes=3, beta=0.5)
>>> f_beta(preds, target)
tensor(0.3333)
"""
def __new__(
cls: Type["FBetaScore"],
task: Literal["binary", "multiclass", "multilabel"],
beta: float = 1.0,
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
assert multidim_average is not None # noqa: S101 # needed for mypy
kwargs.update(
{"multidim_average": multidim_average, "ignore_index": ignore_index, "validate_args": validate_args}
)
if task == ClassificationTask.BINARY:
return BinaryFBetaScore(beta, threshold, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return MulticlassFBetaScore(beta, num_classes, top_k, average, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelFBetaScore(beta, num_labels, threshold, average, **kwargs)
raise ValueError(f"Task {task} not supported!")
class F1Score(_ClassificationTaskWrapper):
r"""Compute F-1 score.
.. math::
F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}
The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
positives and false negatives respectively. If this case is encountered for any class/label, the metric for that
class/label will be set to 0 and the overall metric may therefore be affected in turn.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryF1Score`, :class:`~torchmetrics.classification.MulticlassF1Score` and
:class:`~torchmetrics.classification.MultilabelF1Score` for the specific details of each argument influence and
examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([0, 1, 2, 0, 1, 2])
>>> preds = tensor([0, 2, 1, 0, 0, 1])
>>> f1 = F1Score(task="multiclass", num_classes=3)
>>> f1(preds, target)
tensor(0.3333)
"""
def __new__(
cls: Type["F1Score"],
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
assert multidim_average is not None # noqa: S101 # needed for mypy
kwargs.update(
{"multidim_average": multidim_average, "ignore_index": ignore_index, "validate_args": validate_args}
)
if task == ClassificationTask.BINARY:
return BinaryF1Score(threshold, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return MulticlassF1Score(num_classes, top_k, average, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelF1Score(num_labels, threshold, average, **kwargs)
raise ValueError(f"Task {task} not supported!")
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/average_precision.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.precision_recall_curve import (
BinaryPrecisionRecallCurve,
MulticlassPrecisionRecallCurve,
MultilabelPrecisionRecallCurve,
)
from torchmetrics.functional.classification.average_precision import (
_binary_average_precision_compute,
_multiclass_average_precision_arg_validation,
_multiclass_average_precision_compute,
_multilabel_average_precision_arg_validation,
_multilabel_average_precision_compute,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = [
"BinaryAveragePrecision.plot",
"MulticlassAveragePrecision.plot",
"MultilabelAveragePrecision.plot",
]
class BinaryAveragePrecision(BinaryPrecisionRecallCurve):
r"""Compute the average precision (AP) score for binary tasks.
The AP score summarizes a precision-recall curve as an weighted mean of precisions at each threshold, with the
difference in recall from the previous threshold as weight:
.. math::
AP = \sum_{n} (R_n - R_{n-1}) P_n
where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is
equivalent to the area under the precision-recall curve (AUPRC).
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)`` containing probabilities or logits for
each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
therefore only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the
positive class.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``bap`` (:class:`~torch.Tensor`): A single scalar with the average precision score
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
Args:
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryAveragePrecision
>>> preds = tensor([0, 0.5, 0.7, 0.8])
>>> target = tensor([0, 1, 1, 0])
>>> metric = BinaryAveragePrecision(thresholds=None)
>>> metric(preds, target)
tensor(0.5833)
>>> bap = BinaryAveragePrecision(thresholds=5)
>>> bap(preds, target)
tensor(0.6667)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def compute(self) -> Tensor: # type: ignore[override]
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _binary_average_precision_compute(state, self.thresholds)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single
>>> import torch
>>> from torchmetrics.classification import BinaryAveragePrecision
>>> metric = BinaryAveragePrecision()
>>> metric.update(torch.rand(20,), torch.randint(2, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.classification import BinaryAveragePrecision
>>> metric = BinaryAveragePrecision()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(20,), torch.randint(2, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassAveragePrecision(MulticlassPrecisionRecallCurve):
r"""Compute the average precision (AP) score for multiclass tasks.
The AP score summarizes a precision-recall curve as an weighted mean of precisions at each threshold, with the
difference in recall from the previous threshold as weight:
.. math::
AP = \sum_{n} (R_n - R_{n-1}) P_n
where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is
equivalent to the area under the precision-recall curve (AUPRC).
For multiclass the metric is calculated by iteratively treating each class as the positive class and all other
classes as the negative, which is referred to as the one-vs-rest approach. One-vs-one is currently not supported by
this metric. By default the reported metric is then the average over all classes, but this behavior can be changed
by setting the ``average`` argument.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits
for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto
apply softmax per sample.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
therefore only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mcap`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be
returned with AP score per class. If `average="macro"|"weighted"` then a single scalar is returned.
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
Args:
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over classes. Should be one of the following:
- ``macro``: Calculate score for each class and average them
- ``weighted``: calculates score for each class and computes weighted average using their support
- ``"none"`` or ``None``: calculates score for each class and applies no reduction
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassAveragePrecision
>>> preds = tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = tensor([0, 1, 3, 2])
>>> metric = MulticlassAveragePrecision(num_classes=5, average="macro", thresholds=None)
>>> metric(preds, target)
tensor(0.6250)
>>> mcap = MulticlassAveragePrecision(num_classes=5, average=None, thresholds=None)
>>> mcap(preds, target)
tensor([1.0000, 1.0000, 0.2500, 0.2500, nan])
>>> mcap = MulticlassAveragePrecision(num_classes=5, average="macro", thresholds=5)
>>> mcap(preds, target)
tensor(0.5000)
>>> mcap = MulticlassAveragePrecision(num_classes=5, average=None, thresholds=5)
>>> mcap(preds, target)
tensor([1.0000, 1.0000, 0.2500, 0.2500, -0.0000])
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def __init__(
self,
num_classes: int,
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_classes=num_classes, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
)
if validate_args:
_multiclass_average_precision_arg_validation(num_classes, average, thresholds, ignore_index)
self.average = average # type: ignore[assignment]
self.validate_args = validate_args
def compute(self) -> Tensor: # type: ignore[override]
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _multiclass_average_precision_compute(
state, self.num_classes, self.average, self.thresholds # type: ignore[arg-type]
)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single
>>> import torch
>>> from torchmetrics.classification import MulticlassAveragePrecision
>>> metric = MulticlassAveragePrecision(num_classes=3)
>>> metric.update(torch.randn(20, 3), torch.randint(3,(20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.classification import MulticlassAveragePrecision
>>> metric = MulticlassAveragePrecision(num_classes=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randn(20, 3), torch.randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelAveragePrecision(MultilabelPrecisionRecallCurve):
r"""Compute the average precision (AP) score for multilabel tasks.
The AP score summarizes a precision-recall curve as an weighted mean of precisions at each threshold, with the
difference in recall from the previous threshold as weight:
.. math::
AP = \sum_{n} (R_n - R_{n-1}) P_n
where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is
equivalent to the area under the precision-recall curve (AUPRC).
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits
for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto
apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` containing ground truth labels, and
therefore only contain {0,1} values (except if `ignore_index` is specified).
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mlap`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be
returned with AP score per class. If `average="micro|macro"|"weighted"` then a single scalar is returned.
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned
version that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate
the non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the
`thresholds` argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
Args:
num_labels: Integer specifying the number of labels
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum score over all labels
- ``macro``: Calculate score for each label and average them
- ``weighted``: calculates score for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates score for each label and applies no reduction
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelAveragePrecision
>>> preds = tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> metric = MultilabelAveragePrecision(num_labels=3, average="macro", thresholds=None)
>>> metric(preds, target)
tensor(0.7500)
>>> mlap = MultilabelAveragePrecision(num_labels=3, average=None, thresholds=None)
>>> mlap(preds, target)
tensor([0.7500, 0.5833, 0.9167])
>>> mlap = MultilabelAveragePrecision(num_labels=3, average="macro", thresholds=5)
>>> mlap(preds, target)
tensor(0.7778)
>>> mlap = MultilabelAveragePrecision(num_labels=3, average=None, thresholds=5)
>>> mlap(preds, target)
tensor([0.7500, 0.6667, 0.9167])
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def __init__(
self,
num_labels: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_labels=num_labels, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
)
if validate_args:
_multilabel_average_precision_arg_validation(num_labels, average, thresholds, ignore_index)
self.average = average
self.validate_args = validate_args
def compute(self) -> Tensor: # type: ignore[override]
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _multilabel_average_precision_compute(
state, self.num_labels, self.average, self.thresholds, self.ignore_index
)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single
>>> import torch
>>> from torchmetrics.classification import MultilabelAveragePrecision
>>> metric = MultilabelAveragePrecision(num_labels=3)
>>> metric.update(torch.rand(20,3), torch.randint(2, (20,3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.classification import MultilabelAveragePrecision
>>> metric = MultilabelAveragePrecision(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(20,3), torch.randint(2, (20,3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class AveragePrecision(_ClassificationTaskWrapper):
r"""Compute the average precision (AP) score.
The AP score summarizes a precision-recall curve as an weighted mean of precisions at each threshold, with the
difference in recall from the previous threshold as weight:
.. math::
AP = \sum_{n} (R_n - R_{n-1}) P_n
where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is
equivalent to the area under the precision-recall curve (AUPRC).
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryAveragePrecision`,
:class:`~torchmetrics.classification.MulticlassAveragePrecision` and
:class:`~torchmetrics.classification.MultilabelAveragePrecision` for the specific details of each argument
influence and examples.
Legacy Example:
>>> from torch import tensor
>>> pred = tensor([0, 0.1, 0.8, 0.4])
>>> target = tensor([0, 1, 1, 1])
>>> average_precision = AveragePrecision(task="binary")
>>> average_precision(pred, target)
tensor(1.)
>>> pred = tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = tensor([0, 1, 3, 2])
>>> average_precision = AveragePrecision(task="multiclass", num_classes=5, average=None)
>>> average_precision(pred, target)
tensor([1.0000, 1.0000, 0.2500, 0.2500, nan])
"""
def __new__( # type: ignore[misc]
cls: Type["AveragePrecision"],
task: Literal["binary", "multiclass", "multilabel"],
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
kwargs.update({"thresholds": thresholds, "ignore_index": ignore_index, "validate_args": validate_args})
if task == ClassificationTask.BINARY:
return BinaryAveragePrecision(**kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return MulticlassAveragePrecision(num_classes, average, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelAveragePrecision(num_labels, average, **kwargs)
raise ValueError(f"Task {task} not supported!")
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/recall_fixed_precision.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Tuple, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.precision_recall_curve import (
BinaryPrecisionRecallCurve,
MulticlassPrecisionRecallCurve,
MultilabelPrecisionRecallCurve,
)
from torchmetrics.functional.classification.recall_fixed_precision import (
_binary_recall_at_fixed_precision_arg_validation,
_binary_recall_at_fixed_precision_compute,
_multiclass_recall_at_fixed_precision_arg_compute,
_multiclass_recall_at_fixed_precision_arg_validation,
_multilabel_recall_at_fixed_precision_arg_compute,
_multilabel_recall_at_fixed_precision_arg_validation,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = [
"BinaryRecallAtFixedPrecision.plot",
"MulticlassRecallAtFixedPrecision.plot",
"MultilabelRecallAtFixedPrecision.plot",
]
class BinaryRecallAtFixedPrecision(BinaryPrecisionRecallCurve):
r"""Compute the highest possible recall value given the minimum precision thresholds provided.
This is done by first calculating the precision-recall curve for different thresholds and the find the recall for
a given precision level.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``. Preds should be a tensor containing
probabilities or logits for each observation. If preds has values outside [0,1] range we consider the input
to be logits and will auto apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified). The value
1 always encodes the positive class.
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``recall`` (:class:`~torch.Tensor`): A scalar tensor with the maximum recall for the given precision level
- ``threshold`` (:class:`~torch.Tensor`): A scalar tensor with the corresponding threshold level
.. note::
The implementation both supports calculating the metric in a non-binned but accurate version and a
binned version that is less accurate but more memory efficient. Setting the `thresholds` argument to ``None``
will activate the non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting
the `thresholds` argument to either an integer, list or a 1d tensor will use a binned version that uses memory
of size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
Args:
min_precision: float value specifying minimum precision threshold.
thresholds:
Can be one of:
- If set to ``None``, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an ``int`` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an ``list`` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d :class:`~torch.Tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryRecallAtFixedPrecision
>>> preds = tensor([0, 0.5, 0.7, 0.8])
>>> target = tensor([0, 1, 1, 0])
>>> metric = BinaryRecallAtFixedPrecision(min_precision=0.5, thresholds=None)
>>> metric(preds, target)
(tensor(1.), tensor(0.5000))
>>> metric = BinaryRecallAtFixedPrecision(min_precision=0.5, thresholds=5)
>>> metric(preds, target)
(tensor(1.), tensor(0.5000))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
min_precision: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(thresholds, ignore_index, validate_args=False, **kwargs)
if validate_args:
_binary_recall_at_fixed_precision_arg_validation(min_precision, thresholds, ignore_index)
self.validate_args = validate_args
self.min_precision = min_precision
def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore[override]
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _binary_recall_at_fixed_precision_compute(state, self.thresholds, self.min_precision)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import BinaryRecallAtFixedPrecision
>>> metric = BinaryRecallAtFixedPrecision(min_precision=0.5)
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot() # the returned plot only shows the maximum recall value by default
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import BinaryRecallAtFixedPrecision
>>> metric = BinaryRecallAtFixedPrecision(min_precision=0.5)
>>> values = [ ]
>>> for _ in range(10):
... # we index by 0 such that only the maximum recall value is plotted
... values.append(metric(rand(10), randint(2,(10,)))[0])
>>> fig_, ax_ = metric.plot(values)
"""
val = val or self.compute()[0] # by default we select the maximum recall value to plot
return self._plot(val, ax)
class MulticlassRecallAtFixedPrecision(MulticlassPrecisionRecallCurve):
r"""Compute the highest possible recall value given the minimum precision thresholds provided.
This is done by first calculating the precision-recall curve for different thresholds and the find the recall for
a given precision level.
For multiclass the metric is calculated by iteratively treating each class as the positive class and all other
classes as the negative, which is referred to as the one-vs-rest approach. One-vs-one is currently not supported by
this metric.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor
containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider
the input to be logits and will auto apply softmax per sample.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
ground truth labels, and therefore only contain values in the [0, n_classes-1] range (except if `ignore_index`
is specified).
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns a tuple of either 2 tensors or 2 lists containing:
- ``recall`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_classes, )`` with the maximum recall for the
given precision level per class
- ``threshold`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_classes, )`` with the corresponding threshold
level per class
.. note::
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to ``None`` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
Args:
num_classes: Integer specifying the number of classes
min_precision: float value specifying minimum precision threshold.
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an ``int`` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an ``list`` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d :class:`~torch.Tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassRecallAtFixedPrecision
>>> preds = tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = tensor([0, 1, 3, 2])
>>> metric = MulticlassRecallAtFixedPrecision(num_classes=5, min_precision=0.5, thresholds=None)
>>> metric(preds, target)
(tensor([1., 1., 0., 0., 0.]), tensor([7.5000e-01, 7.5000e-01, 1.0000e+06, 1.0000e+06, 1.0000e+06]))
>>> mcrafp = MulticlassRecallAtFixedPrecision(num_classes=5, min_precision=0.5, thresholds=5)
>>> mcrafp(preds, target)
(tensor([1., 1., 0., 0., 0.]), tensor([7.5000e-01, 7.5000e-01, 1.0000e+06, 1.0000e+06, 1.0000e+06]))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def __init__(
self,
num_classes: int,
min_precision: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_classes=num_classes, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
)
if validate_args:
_multiclass_recall_at_fixed_precision_arg_validation(num_classes, min_precision, thresholds, ignore_index)
self.validate_args = validate_args
self.min_precision = min_precision
def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore[override]
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _multiclass_recall_at_fixed_precision_arg_compute(
state, self.num_classes, self.thresholds, self.min_precision
)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value per class
>>> from torchmetrics.classification import MulticlassRecallAtFixedPrecision
>>> metric = MulticlassRecallAtFixedPrecision(num_classes=3, min_precision=0.5)
>>> metric.update(rand(20, 3).softmax(dim=-1), randint(3, (20,)))
>>> fig_, ax_ = metric.plot() # the returned plot only shows the maximum recall value by default
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a multiple values per class
>>> from torchmetrics.classification import MulticlassRecallAtFixedPrecision
>>> metric = MulticlassRecallAtFixedPrecision(num_classes=3, min_precision=0.5)
>>> values = []
>>> for _ in range(20):
... # we index by 0 such that only the maximum recall value is plotted
... values.append(metric(rand(20, 3).softmax(dim=-1), randint(3, (20,)))[0])
>>> fig_, ax_ = metric.plot(values)
"""
val = val or self.compute()[0] # by default we select the maximum recall value to plot
return self._plot(val, ax)
class MultilabelRecallAtFixedPrecision(MultilabelPrecisionRecallCurve):
r"""Compute the highest possible recall value given the minimum precision thresholds provided.
This is done by first calculating the precision-recall curve for different thresholds and the find the recall for
a given precision level.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor
containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider
the input to be logits and will auto apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified). The value
1 always encodes the positive class.
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns a tuple of either 2 tensors or 2 lists containing:
- ``recall`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_classes, )`` with the maximum recall for the
given precision level per class
- ``threshold`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_classes, )`` with the corresponding threshold
level per class
.. note::
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to ```None``` will activate
the non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the
`thresholds` argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
Args:
num_labels: Integer specifying the number of labels
min_precision: float value specifying minimum precision threshold.
thresholds:
Can be one of:
- If set to ``None``, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an ``int`` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an ``list`` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d :class:`~torch.Tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelRecallAtFixedPrecision
>>> preds = tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> metric = MultilabelRecallAtFixedPrecision(num_labels=3, min_precision=0.5, thresholds=None)
>>> metric(preds, target)
(tensor([1., 1., 1.]), tensor([0.0500, 0.5500, 0.0500]))
>>> mlrafp = MultilabelRecallAtFixedPrecision(num_labels=3, min_precision=0.5, thresholds=5)
>>> mlrafp(preds, target)
(tensor([1., 1., 1.]), tensor([0.0000, 0.5000, 0.0000]))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def __init__(
self,
num_labels: int,
min_precision: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_labels=num_labels, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
)
if validate_args:
_multilabel_recall_at_fixed_precision_arg_validation(num_labels, min_precision, thresholds, ignore_index)
self.validate_args = validate_args
self.min_precision = min_precision
def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore[override]
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _multilabel_recall_at_fixed_precision_arg_compute(
state, self.num_labels, self.thresholds, self.ignore_index, self.min_precision
)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MultilabelRecallAtFixedPrecision
>>> metric = MultilabelRecallAtFixedPrecision(num_labels=3, min_precision=0.5)
>>> metric.update(rand(20, 3), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot() # the returned plot only shows the maximum recall value by default
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import MultilabelRecallAtFixedPrecision
>>> metric = MultilabelRecallAtFixedPrecision(num_labels=3, min_precision=0.5)
>>> values = [ ]
>>> for _ in range(10):
... # we index by 0 such that only the maximum recall value is plotted
... values.append(metric(rand(20, 3), randint(2, (20, 3)))[0])
>>> fig_, ax_ = metric.plot(values)
"""
val = val or self.compute()[0] # by default we select the maximum recall value to plot
return self._plot(val, ax)
class RecallAtFixedPrecision(_ClassificationTaskWrapper):
r"""Compute the highest possible recall value given the minimum precision thresholds provided.
This is done by first calculating the precision-recall curve for different thresholds and the find the recall for
a given precision level.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryRecallAtFixedPrecision`,
:class:`~torchmetrics.classification.MulticlassRecallAtFixedPrecision` and
:class:`~torchmetrics.classification.MultilabelRecallAtFixedPrecision` for the specific details of each argument
influence and examples.
"""
def __new__( # type: ignore[misc]
cls: Type["RecallAtFixedPrecision"],
task: Literal["binary", "multiclass", "multilabel"],
min_precision: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return BinaryRecallAtFixedPrecision(min_precision, thresholds, ignore_index, validate_args, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return MulticlassRecallAtFixedPrecision(
num_classes, min_precision, thresholds, ignore_index, validate_args, **kwargs
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelRecallAtFixedPrecision(
num_labels, min_precision, thresholds, ignore_index, validate_args, **kwargs
)
raise ValueError(f"Task {task} not supported!")
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/group_fairness.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.group_fairness import (
_binary_groups_stat_scores,
_compute_binary_demographic_parity,
_compute_binary_equal_opportunity,
)
from torchmetrics.functional.classification.stat_scores import _binary_stat_scores_arg_validation
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["BinaryFairness.plot"]
class _AbstractGroupStatScores(Metric):
"""Create and update states for computing group stats tp, fp, tn and fn."""
tp: Tensor
fp: Tensor
tn: Tensor
fn: Tensor
def _create_states(self, num_groups: int) -> None:
default = lambda: torch.zeros(num_groups, dtype=torch.long)
self.add_state("tp", default(), dist_reduce_fx="sum")
self.add_state("fp", default(), dist_reduce_fx="sum")
self.add_state("tn", default(), dist_reduce_fx="sum")
self.add_state("fn", default(), dist_reduce_fx="sum")
def _update_states(self, group_stats: List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]) -> None:
for group, stats in enumerate(group_stats):
tp, fp, tn, fn = stats
self.tp[group] += tp
self.fp[group] += fp
self.tn[group] += tn
self.fn[group] += fn
class BinaryGroupStatRates(_AbstractGroupStatScores):
r"""Computes the true/false positives and true/false negatives rates for binary classification by group.
Related to `Type I and Type II errors`_.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``.
- ``groups`` (int tensor): ``(N, ...)``. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.
The additional dimensions are flatted along the batch dimension.
Args:
num_groups: The number of groups.
threshold: Threshold for transforming probability to binary {0,1} predictions.
ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Returns:
The metric returns a dict with a group identifier as key and a tensor with the tp, fp, tn and fn rates as value.
Example (preds is int tensor):
>>> from torchmetrics.classification import BinaryGroupStatRates
>>> target = torch.tensor([0, 1, 0, 1, 0, 1])
>>> preds = torch.tensor([0, 1, 0, 1, 0, 1])
>>> groups = torch.tensor([0, 1, 0, 1, 0, 1])
>>> metric = BinaryGroupStatRates(num_groups=2)
>>> metric(preds, target, groups)
{'group_0': tensor([0., 0., 1., 0.]), 'group_1': tensor([1., 0., 0., 0.])}
Example (preds is float tensor):
>>> from torchmetrics.classification import BinaryGroupStatRates
>>> target = torch.tensor([0, 1, 0, 1, 0, 1])
>>> preds = torch.tensor([0.11, 0.84, 0.22, 0.73, 0.33, 0.92])
>>> groups = torch.tensor([0, 1, 0, 1, 0, 1])
>>> metric = BinaryGroupStatRates(num_groups=2)
>>> metric(preds, target, groups)
{'group_0': tensor([0., 0., 1., 0.]), 'group_1': tensor([1., 0., 0., 0.])}
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
num_groups: int,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__()
if validate_args:
_binary_stat_scores_arg_validation(threshold, "global", ignore_index)
if not isinstance(num_groups, int) and num_groups < 2:
raise ValueError(f"Expected argument `num_groups` to be an int larger than 1, but got {num_groups}")
self.num_groups = num_groups
self.threshold = threshold
self.ignore_index = ignore_index
self.validate_args = validate_args
self._create_states(self.num_groups)
def update(self, preds: Tensor, target: Tensor, groups: Tensor) -> None:
"""Update state with predictions, target and group identifiers.
Args:
preds: Tensor with predictions.
target: Tensor with true labels.
groups: Tensor with group identifiers. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.
"""
group_stats = _binary_groups_stat_scores(
preds, target, groups, self.num_groups, self.threshold, self.ignore_index, self.validate_args
)
self._update_states(group_stats)
def compute(
self,
) -> Dict[str, Tensor]:
"""Compute tp, fp, tn and fn rates based on inputs passed in to ``update`` previously."""
results = torch.stack((self.tp, self.fp, self.tn, self.fn), dim=1)
return {f"group_{i}": group / group.sum() for i, group in enumerate(results)}
class BinaryFairness(_AbstractGroupStatScores):
r"""Computes `Demographic parity`_ and `Equal opportunity`_ ratio for binary classification problems.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``groups`` (int tensor): ``(N, ...)``. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.
- ``target`` (int tensor): ``(N, ...)``.
The additional dimensions are flatted along the batch dimension.
This class computes the ratio between positivity rates and true positives rates for different groups.
If more than two groups are present, the disparity between the lowest and highest group is reported.
A disparity between positivity rates indicates a potential violation of demographic parity, and between
true positive rates indicates a potential violation of equal opportunity.
The lowest rate is divided by the highest, so a lower value means more discrimination against the numerator.
In the results this is also indicated as the key of dict is {metric}_{identifier_low_group}_{identifier_high_group}.
Args:
num_groups: The number of groups.
task: The task to compute. Can be either ``demographic_parity`` or ``equal_oppotunity`` or ``all``.
threshold: Threshold for transforming probability to binary {0,1} predictions.
ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Returns:
The metric returns a dict where the key identifies the metric and groups with the lowest and highest true
positives rates as follows: {metric}__{identifier_low_group}_{identifier_high_group}.
The value is a tensor with the disparity rate.
Example (preds is int tensor):
>>> from torchmetrics.classification import BinaryFairness
>>> target = torch.tensor([0, 1, 0, 1, 0, 1])
>>> preds = torch.tensor([0, 1, 0, 1, 0, 1])
>>> groups = torch.tensor([0, 1, 0, 1, 0, 1])
>>> metric = BinaryFairness(2)
>>> metric(preds, target, groups)
{'DP_0_1': tensor(0.), 'EO_0_1': tensor(0.)}
Example (preds is float tensor):
>>> from torchmetrics.classification import BinaryFairness
>>> target = torch.tensor([0, 1, 0, 1, 0, 1])
>>> preds = torch.tensor([0.11, 0.84, 0.22, 0.73, 0.33, 0.92])
>>> groups = torch.tensor([0, 1, 0, 1, 0, 1])
>>> metric = BinaryFairness(2)
>>> metric(preds, target, groups)
{'DP_0_1': tensor(0.), 'EO_0_1': tensor(0.)}
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
num_groups: int,
task: Literal["demographic_parity", "equal_opportunity", "all"] = "all",
threshold: float = 0.5,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__()
if task not in ["demographic_parity", "equal_opportunity", "all"]:
raise ValueError(
f"Expected argument `task` to either be ``demographic_parity``,"
f"``equal_opportunity`` or ``all`` but got {task}."
)
if validate_args:
_binary_stat_scores_arg_validation(threshold, "global", ignore_index)
if not isinstance(num_groups, int) and num_groups < 2:
raise ValueError(f"Expected argument `num_groups` to be an int larger than 1, but got {num_groups}")
self.num_groups = num_groups
self.task = task
self.threshold = threshold
self.ignore_index = ignore_index
self.validate_args = validate_args
self._create_states(self.num_groups)
def update(self, preds: Tensor, target: Tensor, groups: Tensor) -> None:
"""Update state with predictions, groups, and target.
Args:
preds: Tensor with predictions.
target: Tensor with true labels.
groups: Tensor with group identifiers. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.
"""
if self.task == "demographic_parity":
if target is not None:
rank_zero_warn("The task demographic_parity does not require a target.", UserWarning)
target = torch.zeros(preds.shape)
group_stats = _binary_groups_stat_scores(
preds, target, groups, self.num_groups, self.threshold, self.ignore_index, self.validate_args
)
self._update_states(group_stats)
def compute(
self,
) -> Dict[str, torch.Tensor]:
"""Compute fairness criteria based on inputs passed in to ``update`` previously."""
if self.task == "demographic_parity":
return _compute_binary_demographic_parity(self.tp, self.fp, self.tn, self.fn)
if self.task == "equal_opportunity":
return _compute_binary_equal_opportunity(self.tp, self.fp, self.tn, self.fn)
if self.task == "all":
return {
**_compute_binary_demographic_parity(self.tp, self.fp, self.tn, self.fn),
**_compute_binary_equal_opportunity(self.tp, self.fp, self.tn, self.fn),
}
return None
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> import torch
>>> _ = torch.manual_seed(42)
>>> # Example plotting a single value
>>> from torchmetrics.classification import BinaryFairness
>>> metric = BinaryFairness(2)
>>> metric.update(torch.rand(20), torch.randint(2,(20,)), torch.randint(2,(20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> import torch
>>> _ = torch.manual_seed(42)
>>> # Example plotting multiple values
>>> from torchmetrics.classification import BinaryFairness
>>> metric = BinaryFairness(2)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(20), torch.randint(2,(20,)), torch.ones(20).long()))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/accuracy.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.stat_scores import BinaryStatScores, MulticlassStatScores, MultilabelStatScores
from torchmetrics.functional.classification.accuracy import _accuracy_reduce
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["BinaryAccuracy.plot", "MulticlassAccuracy.plot", "MultilabelAccuracy.plot"]
class BinaryAccuracy(BinaryStatScores):
r"""Compute `Accuracy`_ for binary tasks.
.. math::
\text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating
point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``acc`` (:class:`~torch.Tensor`): If ``multidim_average`` is set to ``global``, metric returns a scalar value.
If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar
value per sample.
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryAccuracy
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> metric = BinaryAccuracy()
>>> metric(preds, target)
tensor(0.6667)
Example (preds is float tensor):
>>> from torchmetrics.classification import BinaryAccuracy
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> metric = BinaryAccuracy()
>>> metric(preds, target)
tensor(0.6667)
Example (multidim tensors):
>>> from torchmetrics.classification import BinaryAccuracy
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = BinaryAccuracy(multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.3333, 0.1667])
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def compute(self) -> Tensor:
"""Compute accuracy based on inputs passed in to ``update`` previously."""
tp, fp, tn, fn = self._final_state()
return _accuracy_reduce(tp, fp, tn, fn, average="binary", multidim_average=self.multidim_average)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import BinaryAccuracy
>>> metric = BinaryAccuracy()
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import BinaryAccuracy
>>> metric = BinaryAccuracy()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(10), randint(2,(10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassAccuracy(MulticlassStatScores):
r"""Compute `Accuracy`_ for multiclass tasks.
.. math::
\text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor
of shape ``(N, C, ..)``. If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension
to automatically convert probabilities/logits into an int tensor.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mca`` (:class:`~torch.Tensor`): A tensor with the accuracy score whose returned shape depends on the
``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassAccuracy
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> metric = MulticlassAccuracy(num_classes=3)
>>> metric(preds, target)
tensor(0.8333)
>>> mca = MulticlassAccuracy(num_classes=3, average=None)
>>> mca(preds, target)
tensor([0.5000, 1.0000, 1.0000])
Example (preds is float tensor):
>>> from torchmetrics.classification import MulticlassAccuracy
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> metric = MulticlassAccuracy(num_classes=3)
>>> metric(preds, target)
tensor(0.8333)
>>> mca = MulticlassAccuracy(num_classes=3, average=None)
>>> mca(preds, target)
tensor([0.5000, 1.0000, 1.0000])
Example (multidim tensors):
>>> from torchmetrics.classification import MulticlassAccuracy
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> metric = MulticlassAccuracy(num_classes=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.5000, 0.2778])
>>> mca = MulticlassAccuracy(num_classes=3, multidim_average='samplewise', average=None)
>>> mca(preds, target)
tensor([[1.0000, 0.0000, 0.5000],
[0.0000, 0.3333, 0.5000]])
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def compute(self) -> Tensor:
"""Compute accuracy based on inputs passed in to ``update`` previously."""
tp, fp, tn, fn = self._final_state()
return _accuracy_reduce(tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a single value per class
>>> from torchmetrics.classification import MulticlassAccuracy
>>> metric = MulticlassAccuracy(num_classes=3, average=None)
>>> metric.update(randint(3, (20,)), randint(3, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a multiple values per class
>>> from torchmetrics.classification import MulticlassAccuracy
>>> metric = MulticlassAccuracy(num_classes=3, average=None)
>>> values = []
>>> for _ in range(20):
... values.append(metric(randint(3, (20,)), randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelAccuracy(MultilabelStatScores):
r"""Compute `Accuracy`_ for multilabel tasks.
.. math::
\text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating
point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mla`` (:class:`~torch.Tensor`): A tensor with the accuracy score whose returned shape depends on the
``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelAccuracy
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> metric = MultilabelAccuracy(num_labels=3)
>>> metric(preds, target)
tensor(0.6667)
>>> mla = MultilabelAccuracy(num_labels=3, average=None)
>>> mla(preds, target)
tensor([1.0000, 0.5000, 0.5000])
Example (preds is float tensor):
>>> from torchmetrics.classification import MultilabelAccuracy
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> metric = MultilabelAccuracy(num_labels=3)
>>> metric(preds, target)
tensor(0.6667)
>>> mla = MultilabelAccuracy(num_labels=3, average=None)
>>> mla(preds, target)
tensor([1.0000, 0.5000, 0.5000])
Example (multidim tensors):
>>> from torchmetrics.classification import MultilabelAccuracy
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor(
... [
... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
... ]
... )
>>> mla = MultilabelAccuracy(num_labels=3, multidim_average='samplewise')
>>> mla(preds, target)
tensor([0.3333, 0.1667])
>>> mla = MultilabelAccuracy(num_labels=3, multidim_average='samplewise', average=None)
>>> mla(preds, target)
tensor([[0.5000, 0.5000, 0.0000],
[0.0000, 0.0000, 0.5000]])
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def compute(self) -> Tensor:
"""Compute accuracy based on inputs passed in to ``update`` previously."""
tp, fp, tn, fn = self._final_state()
return _accuracy_reduce(
tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average, multilabel=True
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MultilabelAccuracy
>>> metric = MultilabelAccuracy(num_labels=3)
>>> metric.update(randint(2, (20, 3)), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import MultilabelAccuracy
>>> metric = MultilabelAccuracy(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(randint(2, (20, 3)), randint(2, (20, 3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class Accuracy(_ClassificationTaskWrapper):
r"""Compute `Accuracy`_.
.. math::
\text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
This module is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryAccuracy`, :class:`~torchmetrics.classification.MulticlassAccuracy` and
:class:`~torchmetrics.classification.MultilabelAccuracy` for the specific details of each argument influence and
examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([0, 1, 2, 3])
>>> preds = tensor([0, 2, 1, 3])
>>> accuracy = Accuracy(task="multiclass", num_classes=4)
>>> accuracy(preds, target)
tensor(0.5000)
>>> target = tensor([0, 1, 2])
>>> preds = tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]])
>>> accuracy = Accuracy(task="multiclass", num_classes=3, top_k=2)
>>> accuracy(preds, target)
tensor(0.6667)
"""
def __new__( # type: ignore[misc]
cls: Type["Accuracy"],
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Literal["global", "samplewise"] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
kwargs.update(
{"multidim_average": multidim_average, "ignore_index": ignore_index, "validate_args": validate_args}
)
if task == ClassificationTask.BINARY:
return BinaryAccuracy(threshold, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(
f"Optional arg `num_classes` must be type `int` when task is {task}. Got {type(num_classes)}"
)
if not isinstance(top_k, int):
raise ValueError(f"Optional arg `top_k` must be type `int` when task is {task}. Got {type(top_k)}")
return MulticlassAccuracy(num_classes, top_k, average, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(
f"Optional arg `num_labels` must be type `int` when task is {task}. Got {type(num_labels)}"
)
return MultilabelAccuracy(num_labels, threshold, average, **kwargs)
raise ValueError(f"Not handled value: {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/ranking.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor
from torchmetrics.functional.classification.ranking import (
_multilabel_confusion_matrix_arg_validation,
_multilabel_confusion_matrix_format,
_multilabel_coverage_error_update,
_multilabel_ranking_average_precision_update,
_multilabel_ranking_loss_update,
_multilabel_ranking_tensor_validation,
_ranking_reduce,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = [
"MultilabelCoverageError.plot",
"MultilabelRankingAveragePrecision.plot",
"MultilabelRankingLoss.plot",
]
class MultilabelCoverageError(Metric):
"""Compute `Multilabel coverage error`_.
The score measure how far we need to go through the ranked scores to cover all true labels. The best value is equal
to the average number of labels in the target tensor per sample.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor
containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider
the input to be logits and will auto apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. Target should be a tensor
containing ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified).
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mlce`` (:class:`~torch.Tensor`): A tensor containing the multilabel coverage error.
Args:
num_labels: Integer specifying the number of labels
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example:
>>> from torchmetrics.classification import MultilabelCoverageError
>>> _ = torch.manual_seed(42)
>>> preds = torch.rand(10, 5)
>>> target = torch.randint(2, (10, 5))
>>> mlce = MultilabelCoverageError(num_labels=5)
>>> mlce(preds, target)
tensor(3.9000)
"""
higher_is_better: bool = False
is_differentiable: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def __init__(
self,
num_labels: int,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_multilabel_confusion_matrix_arg_validation(num_labels, threshold=0.0, ignore_index=ignore_index)
self.validate_args = validate_args
self.num_labels = num_labels
self.ignore_index = ignore_index
self.add_state("measure", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update metric states."""
if self.validate_args:
_multilabel_ranking_tensor_validation(preds, target, self.num_labels, self.ignore_index)
preds, target = _multilabel_confusion_matrix_format(
preds, target, self.num_labels, threshold=0.0, ignore_index=self.ignore_index, should_threshold=False
)
measure, num_elements = _multilabel_coverage_error_update(preds, target)
self.measure += measure
self.total += num_elements
def compute(self) -> Tensor:
"""Compute metric."""
return _ranking_reduce(self.measure, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MultilabelCoverageError
>>> metric = MultilabelCoverageError(num_labels=3)
>>> metric.update(rand(20, 3), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import MultilabelCoverageError
>>> metric = MultilabelCoverageError(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(20, 3), randint(2, (20, 3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelRankingAveragePrecision(Metric):
"""Compute label ranking average precision score for multilabel data [1].
The score is the average over each ground truth label assigned to each sample of the ratio of true vs. total labels
with lower score. Best score is 1.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor
containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider
the input to be logits and will auto apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. Target should be a tensor
containing ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified).
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mlrap`` (:class:`~torch.Tensor`): A tensor containing the multilabel ranking average precision.
Args:
num_labels: Integer specifying the number of labels
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example:
>>> from torchmetrics.classification import MultilabelRankingAveragePrecision
>>> _ = torch.manual_seed(42)
>>> preds = torch.rand(10, 5)
>>> target = torch.randint(2, (10, 5))
>>> mlrap = MultilabelRankingAveragePrecision(num_labels=5)
>>> mlrap(preds, target)
tensor(0.7744)
"""
higher_is_better: bool = True
is_differentiable: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def __init__(
self,
num_labels: int,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_multilabel_confusion_matrix_arg_validation(num_labels, threshold=0.0, ignore_index=ignore_index)
self.validate_args = validate_args
self.num_labels = num_labels
self.ignore_index = ignore_index
self.add_state("measure", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update metric states."""
if self.validate_args:
_multilabel_ranking_tensor_validation(preds, target, self.num_labels, self.ignore_index)
preds, target = _multilabel_confusion_matrix_format(
preds, target, self.num_labels, threshold=0.0, ignore_index=self.ignore_index, should_threshold=False
)
measure, num_elements = _multilabel_ranking_average_precision_update(preds, target)
self.measure += measure
self.total += num_elements
def compute(self) -> Tensor:
"""Compute metric."""
return _ranking_reduce(self.measure, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MultilabelRankingAveragePrecision
>>> metric = MultilabelRankingAveragePrecision(num_labels=3)
>>> metric.update(rand(20, 3), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import MultilabelRankingAveragePrecision
>>> metric = MultilabelRankingAveragePrecision(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(20, 3), randint(2, (20, 3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelRankingLoss(Metric):
"""Compute the label ranking loss for multilabel data [1].
The score is corresponds to the average number of label pairs that are incorrectly ordered given some predictions
weighted by the size of the label set and the number of labels not in the label set. The best score is 0.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor
containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider
the input to be logits and will auto apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. Target should be a tensor
containing ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified).
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mlrl`` (:class:`~torch.Tensor`): A tensor containing the multilabel ranking loss.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example:
>>> from torchmetrics.classification import MultilabelRankingLoss
>>> _ = torch.manual_seed(42)
>>> preds = torch.rand(10, 5)
>>> target = torch.randint(2, (10, 5))
>>> mlrl = MultilabelRankingLoss(num_labels=5)
>>> mlrl(preds, target)
tensor(0.4167)
"""
higher_is_better: bool = False
is_differentiable: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def __init__(
self,
num_labels: int,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_multilabel_confusion_matrix_arg_validation(num_labels, threshold=0.0, ignore_index=ignore_index)
self.validate_args = validate_args
self.num_labels = num_labels
self.ignore_index = ignore_index
self.add_state("measure", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update metric states."""
if self.validate_args:
_multilabel_ranking_tensor_validation(preds, target, self.num_labels, self.ignore_index)
preds, target = _multilabel_confusion_matrix_format(
preds, target, self.num_labels, threshold=0.0, ignore_index=self.ignore_index, should_threshold=False
)
measure, num_elements = _multilabel_ranking_loss_update(preds, target)
self.measure += measure
self.total += num_elements
def compute(self) -> Tensor:
"""Compute metric."""
return _ranking_reduce(self.measure, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MultilabelRankingLoss
>>> metric = MultilabelRankingLoss(num_labels=3)
>>> metric.update(rand(20, 3), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import MultilabelRankingLoss
>>> metric = MultilabelRankingLoss(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(20, 3), randint(2, (20, 3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/specificity.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.stat_scores import BinaryStatScores, MulticlassStatScores, MultilabelStatScores
from torchmetrics.functional.classification.specificity import _specificity_reduce
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["BinarySpecificity.plot", "MulticlassSpecificity.plot", "MultilabelSpecificity.plot"]
class BinarySpecificity(BinaryStatScores):
r"""Compute `Specificity`_ for binary tasks.
.. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}}
Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and false positives
respectively. The metric is only proper defined when :math:`\text{TN} + \text{FP} \neq 0`. If this case is
encountered a score of 0 is returned.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point
tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``bs`` (:class:`~torch.Tensor`): If ``multidim_average`` is set to ``global``, the metric returns a scalar value.
If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value
per sample.
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import BinarySpecificity
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> metric = BinarySpecificity()
>>> metric(preds, target)
tensor(0.6667)
Example (preds is float tensor):
>>> from torchmetrics.classification import BinarySpecificity
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> metric = BinarySpecificity()
>>> metric(preds, target)
tensor(0.6667)
Example (multidim tensors):
>>> from torchmetrics.classification import BinarySpecificity
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = BinarySpecificity(multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.0000, 0.3333])
"""
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _specificity_reduce(tp, fp, tn, fn, average="binary", multidim_average=self.multidim_average)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import BinarySpecificity
>>> metric = BinarySpecificity()
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import BinarySpecificity
>>> metric = BinarySpecificity()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(10), randint(2,(10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassSpecificity(MulticlassStatScores):
r"""Compute `Specificity`_ for multiclass tasks.
.. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}}
Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and false positives
respectively. The metric is only proper defined when :math:`\text{TN} + \text{FP} \neq 0`. If this case is
encountered for any class, the metric for that class will be set to 0 and the overall metric may therefore be
affected in turn.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
probabilities/logits into an int tensor.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mcs`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average``
arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassSpecificity
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> metric = MulticlassSpecificity(num_classes=3)
>>> metric(preds, target)
tensor(0.8889)
>>> mcs = MulticlassSpecificity(num_classes=3, average=None)
>>> mcs(preds, target)
tensor([1.0000, 0.6667, 1.0000])
Example (preds is float tensor):
>>> from torchmetrics.classification import MulticlassSpecificity
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> metric = MulticlassSpecificity(num_classes=3)
>>> metric(preds, target)
tensor(0.8889)
>>> mcs = MulticlassSpecificity(num_classes=3, average=None)
>>> mcs(preds, target)
tensor([1.0000, 0.6667, 1.0000])
Example (multidim tensors):
>>> from torchmetrics.classification import MulticlassSpecificity
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> metric = MulticlassSpecificity(num_classes=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.7500, 0.6556])
>>> mcs = MulticlassSpecificity(num_classes=3, multidim_average='samplewise', average=None)
>>> mcs(preds, target)
tensor([[0.7500, 0.7500, 0.7500],
[0.8000, 0.6667, 0.5000]])
"""
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _specificity_reduce(tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a single value per class
>>> from torchmetrics.classification import MulticlassSpecificity
>>> metric = MulticlassSpecificity(num_classes=3, average=None)
>>> metric.update(randint(3, (20,)), randint(3, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a multiple values per class
>>> from torchmetrics.classification import MulticlassSpecificity
>>> metric = MulticlassSpecificity(num_classes=3, average=None)
>>> values = []
>>> for _ in range(20):
... values.append(metric(randint(3, (20,)), randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelSpecificity(MultilabelStatScores):
r"""Compute `Specificity`_ for multilabel tasks.
.. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}}
Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and false positives
respectively. The metric is only proper defined when :math:`\text{TN} + \text{FP} \neq 0`. If this case is
encountered for any label, the metric for that label will be set to 0 and the overall metric may therefore be
affected in turn.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating
point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mls`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average``
arguments:
- If ``multidim_average`` is set to ``global``
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average: Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelSpecificity
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> metric = MultilabelSpecificity(num_labels=3)
>>> metric(preds, target)
tensor(0.6667)
>>> mls = MultilabelSpecificity(num_labels=3, average=None)
>>> mls(preds, target)
tensor([1., 1., 0.])
Example (preds is float tensor):
>>> from torchmetrics.classification import MultilabelSpecificity
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> metric = MultilabelSpecificity(num_labels=3)
>>> metric(preds, target)
tensor(0.6667)
>>> mls = MultilabelSpecificity(num_labels=3, average=None)
>>> mls(preds, target)
tensor([1., 1., 0.])
Example (multidim tensors):
>>> from torchmetrics.classification import MultilabelSpecificity
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = MultilabelSpecificity(num_labels=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.0000, 0.3333])
>>> mls = MultilabelSpecificity(num_labels=3, multidim_average='samplewise', average=None)
>>> mls(preds, target)
tensor([[0., 0., 0.],
[0., 0., 1.]])
"""
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _specificity_reduce(
tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average, multilabel=True
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MultilabelSpecificity
>>> metric = MultilabelSpecificity(num_labels=3)
>>> metric.update(randint(2, (20, 3)), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import MultilabelSpecificity
>>> metric = MultilabelSpecificity(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(randint(2, (20, 3)), randint(2, (20, 3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class Specificity(_ClassificationTaskWrapper):
r"""Compute `Specificity`_.
.. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}}
Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and false positives
respectively. The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0`. If this case is
encountered for any class/label, the metric for that class/label will be set to 0 and the overall metric may
therefore be affected in turn.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinarySpecificity`, :class:`~torchmetrics.classification.MulticlassSpecificity`
and :class:`~torchmetrics.classification.MultilabelSpecificity` for the specific details of each argument influence
and examples.
Legacy Example:
>>> from torch import tensor
>>> preds = tensor([2, 0, 2, 1])
>>> target = tensor([1, 1, 2, 0])
>>> specificity = Specificity(task="multiclass", average='macro', num_classes=3)
>>> specificity(preds, target)
tensor(0.6111)
>>> specificity = Specificity(task="multiclass", average='micro', num_classes=3)
>>> specificity(preds, target)
tensor(0.6250)
"""
def __new__( # type: ignore[misc]
cls: Type["Specificity"],
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
assert multidim_average is not None # noqa: S101 # needed for mypy
kwargs.update(
{"multidim_average": multidim_average, "ignore_index": ignore_index, "validate_args": validate_args}
)
if task == ClassificationTask.BINARY:
return BinarySpecificity(threshold, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return MulticlassSpecificity(num_classes, top_k, average, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelSpecificity(num_labels, threshold, average, **kwargs)
raise ValueError(f"Task {task} not supported!")
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/precision_fixed_recall.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Tuple, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.precision_recall_curve import (
BinaryPrecisionRecallCurve,
MulticlassPrecisionRecallCurve,
MultilabelPrecisionRecallCurve,
)
from torchmetrics.functional.classification.precision_fixed_recall import _precision_at_recall
from torchmetrics.functional.classification.recall_fixed_precision import (
_binary_recall_at_fixed_precision_arg_validation,
_binary_recall_at_fixed_precision_compute,
_multiclass_recall_at_fixed_precision_arg_compute,
_multiclass_recall_at_fixed_precision_arg_validation,
_multilabel_recall_at_fixed_precision_arg_compute,
_multilabel_recall_at_fixed_precision_arg_validation,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = [
"BinaryPrecisionAtFixedRecall.plot",
"MulticlassPrecisionAtFixedRecall.plot",
"MultilabelPrecisionAtFixedRecall.plot",
]
class BinaryPrecisionAtFixedRecall(BinaryPrecisionRecallCurve):
r"""Compute the highest possible precision value given the minimum recall thresholds provided.
This is done by first calculating the precision-recall curve for different thresholds and the find the precision for
a given recall level.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``. Preds should be a tensor containing
probabilities or logits for each observation. If preds has values outside [0,1] range we consider the input
to be logits and will auto apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified). The value
1 always encodes the positive class.
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``precision`` (:class:`~torch.Tensor`): A scalar tensor with the maximum precision for the given recall level
- ``threshold`` (:class:`~torch.Tensor`): A scalar tensor with the corresponding threshold level
.. note::
The implementation both supports calculating the metric in a non-binned but accurate version and a
binned version that is less accurate but more memory efficient. Setting the `thresholds` argument to ``None``
will activate the non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting
the `thresholds` argument to either an integer, list or a 1d tensor will use a binned version that uses memory
of size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
Args:
min_recall: float value specifying minimum recall threshold.
thresholds:
Can be one of:
- If set to ``None``, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an ``int`` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an ``list`` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d :class:`~torch.Tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryPrecisionAtFixedRecall
>>> preds = tensor([0, 0.5, 0.7, 0.8])
>>> target = tensor([0, 1, 1, 0])
>>> metric = BinaryPrecisionAtFixedRecall(min_recall=0.5, thresholds=None)
>>> metric(preds, target)
(tensor(0.6667), tensor(0.5000))
>>> metric = BinaryPrecisionAtFixedRecall(min_recall=0.5, thresholds=5)
>>> metric(preds, target)
(tensor(0.6667), tensor(0.5000))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
min_recall: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(thresholds, ignore_index, validate_args=False, **kwargs)
if validate_args:
_binary_recall_at_fixed_precision_arg_validation(min_recall, thresholds, ignore_index)
self.validate_args = validate_args
self.min_recall = min_recall
def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore[override]
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _binary_recall_at_fixed_precision_compute(
state, self.thresholds, self.min_recall, reduce_fn=_precision_at_recall
)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import BinaryPrecisionAtFixedRecall
>>> metric = BinaryPrecisionAtFixedRecall(min_recall=0.5)
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot() # the returned plot only shows the maximum recall value by default
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import BinaryPrecisionAtFixedRecall
>>> metric = BinaryPrecisionAtFixedRecall(min_recall=0.5)
>>> values = [ ]
>>> for _ in range(10):
... # we index by 0 such that only the maximum recall value is plotted
... values.append(metric(rand(10), randint(2,(10,)))[0])
>>> fig_, ax_ = metric.plot(values)
"""
val = val or self.compute()[0] # by default we select the maximum recall value to plot
return self._plot(val, ax)
class MulticlassPrecisionAtFixedRecall(MulticlassPrecisionRecallCurve):
r"""Compute the highest possible precision value given the minimum recall thresholds provided.
This is done by first calculating the precision-recall curve for different thresholds and the find the precision for
a given recall level.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor
containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider
the input to be logits and will auto apply softmax per sample.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
ground truth labels, and therefore only contain values in the [0, n_classes-1] range (except if `ignore_index`
is specified).
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns a tuple of either 2 tensors or 2 lists containing:
- ``precision`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_classes, )`` with the maximum precision for the
given recall level per class
- ``threshold`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_classes, )`` with the corresponding threshold
level per class
.. note::
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to ``None`` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
Args:
num_classes: Integer specifying the number of classes
min_recall: float value specifying minimum recall threshold.
thresholds:
Can be one of:
- If set to ``None``, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an ``int`` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an ``list`` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d :class:`~torch.Tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassPrecisionAtFixedRecall
>>> preds = tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = tensor([0, 1, 3, 2])
>>> metric = MulticlassPrecisionAtFixedRecall(num_classes=5, min_recall=0.5, thresholds=None)
>>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE
(tensor([1.0000, 1.0000, 0.2500, 0.2500, 0.0000]),
tensor([7.5000e-01, 7.5000e-01, 5.0000e-02, 5.0000e-02, 1.0000e+06]))
>>> mcrafp = MulticlassPrecisionAtFixedRecall(num_classes=5, min_recall=0.5, thresholds=5)
>>> mcrafp(preds, target) # doctest: +NORMALIZE_WHITESPACE
(tensor([1.0000, 1.0000, 0.2500, 0.2500, 0.0000]),
tensor([7.5000e-01, 7.5000e-01, 0.0000e+00, 0.0000e+00, 1.0000e+06]))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def __init__(
self,
num_classes: int,
min_recall: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_classes=num_classes, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
)
if validate_args:
_multiclass_recall_at_fixed_precision_arg_validation(num_classes, min_recall, thresholds, ignore_index)
self.validate_args = validate_args
self.min_recall = min_recall
def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore[override]
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _multiclass_recall_at_fixed_precision_arg_compute(
state, self.num_classes, self.thresholds, self.min_recall, reduce_fn=_precision_at_recall
)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value per class
>>> from torchmetrics.classification import MulticlassPrecisionAtFixedRecall
>>> metric = MulticlassPrecisionAtFixedRecall(num_classes=3, min_recall=0.5)
>>> metric.update(rand(20, 3).softmax(dim=-1), randint(3, (20,)))
>>> fig_, ax_ = metric.plot() # the returned plot only shows the maximum recall value by default
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a multiple values per class
>>> from torchmetrics.classification import MulticlassPrecisionAtFixedRecall
>>> metric = MulticlassPrecisionAtFixedRecall(num_classes=3, min_recall=0.5)
>>> values = []
>>> for _ in range(20):
... # we index by 0 such that only the maximum recall value is plotted
... values.append(metric(rand(20, 3).softmax(dim=-1), randint(3, (20,)))[0])
>>> fig_, ax_ = metric.plot(values)
"""
val = val or self.compute()[0] # by default we select the maximum recall value to plot
return self._plot(val, ax)
class MultilabelPrecisionAtFixedRecall(MultilabelPrecisionRecallCurve):
r"""Compute the highest possible precision value given the minimum recall thresholds provided.
This is done by first calculating the precision-recall curve for different thresholds and the find the precision for
a given recall level.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor
containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider
the input to be logits and will auto apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified). The value
1 always encodes the positive class.
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns a tuple of either 2 tensors or 2 lists containing:
- ``precision`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_classes, )`` with the maximum precision for the
given recall level per class
- ``threshold`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_classes, )`` with the corresponding threshold
level per class
.. note::
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to ``None`` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
Args:
num_labels: Integer specifying the number of labels
min_recall: float value specifying minimum recall threshold.
thresholds:
Can be one of:
- If set to ``None``, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an ``int`` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an ``list`` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d :class:`~torch.Tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelPrecisionAtFixedRecall
>>> preds = tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> metric = MultilabelPrecisionAtFixedRecall(num_labels=3, min_recall=0.5, thresholds=None)
>>> metric(preds, target)
(tensor([1.0000, 0.6667, 1.0000]), tensor([0.7500, 0.5500, 0.3500]))
>>> mlrafp = MultilabelPrecisionAtFixedRecall(num_labels=3, min_recall=0.5, thresholds=5)
>>> mlrafp(preds, target)
(tensor([1.0000, 0.6667, 1.0000]), tensor([0.7500, 0.5000, 0.2500]))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def __init__(
self,
num_labels: int,
min_recall: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_labels=num_labels, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
)
if validate_args:
_multilabel_recall_at_fixed_precision_arg_validation(num_labels, min_recall, thresholds, ignore_index)
self.validate_args = validate_args
self.min_recall = min_recall
def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore[override]
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _multilabel_recall_at_fixed_precision_arg_compute(
state, self.num_labels, self.thresholds, self.ignore_index, self.min_recall, reduce_fn=_precision_at_recall
)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MultilabelPrecisionAtFixedRecall
>>> metric = MultilabelPrecisionAtFixedRecall(num_labels=3, min_recall=0.5)
>>> metric.update(rand(20, 3), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot() # the returned plot only shows the maximum recall value by default
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import MultilabelPrecisionAtFixedRecall
>>> metric = MultilabelPrecisionAtFixedRecall(num_labels=3, min_recall=0.5)
>>> values = [ ]
>>> for _ in range(10):
... # we index by 0 such that only the maximum recall value is plotted
... values.append(metric(rand(20, 3), randint(2, (20, 3)))[0])
>>> fig_, ax_ = metric.plot(values)
"""
val = val or self.compute()[0] # by default we select the maximum recall value to plot
return self._plot(val, ax)
class PrecisionAtFixedRecall(_ClassificationTaskWrapper):
r"""Compute the highest possible recall value given the minimum precision thresholds provided.
This is done by first calculating the precision-recall curve for different thresholds and the find the recall for
a given precision level.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryPrecisionAtFixedRecall`,
:class:`~torchmetrics.classification.MulticlassPrecisionAtFixedRecall` and
:class:`~torchmetrics.classification.MultilabelPrecisionAtFixedRecall` for the specific details of each argument
influence and examples.
"""
def __new__( # type: ignore[misc]
cls: Type["PrecisionAtFixedRecall"],
task: Literal["binary", "multiclass", "multilabel"],
min_recall: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return BinaryPrecisionAtFixedRecall(min_recall, thresholds, ignore_index, validate_args, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return MulticlassPrecisionAtFixedRecall(
num_classes, min_recall, thresholds, ignore_index, validate_args, **kwargs
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelPrecisionAtFixedRecall(
num_labels, min_recall, thresholds, ignore_index, validate_args, **kwargs
)
raise ValueError(f"Task {task} not supported!")
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/precision_recall_curve.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Tuple, Type, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.functional.classification.auroc import _reduce_auroc
from torchmetrics.functional.classification.precision_recall_curve import (
_adjust_threshold_arg,
_binary_precision_recall_curve_arg_validation,
_binary_precision_recall_curve_compute,
_binary_precision_recall_curve_format,
_binary_precision_recall_curve_tensor_validation,
_binary_precision_recall_curve_update,
_multiclass_precision_recall_curve_arg_validation,
_multiclass_precision_recall_curve_compute,
_multiclass_precision_recall_curve_format,
_multiclass_precision_recall_curve_tensor_validation,
_multiclass_precision_recall_curve_update,
_multilabel_precision_recall_curve_arg_validation,
_multilabel_precision_recall_curve_compute,
_multilabel_precision_recall_curve_format,
_multilabel_precision_recall_curve_tensor_validation,
_multilabel_precision_recall_curve_update,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.compute import _auc_compute_without_check
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE, plot_curve
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = [
"BinaryPrecisionRecallCurve.plot",
"MulticlassPrecisionRecallCurve.plot",
"MultilabelPrecisionRecallCurve.plot",
]
class BinaryPrecisionRecallCurve(Metric):
r"""Compute the precision-recall curve for binary tasks.
The curve consist of multiple pairs of precision and recall values evaluated at different thresholds, such that the
tradeoff between the two values can been seen.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``. Preds should be a tensor containing
probabilities or logits for each observation. If preds has values outside [0,1] range we consider the input
to be logits and will auto apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified). The value
1 always encodes the positive class.
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``precision`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each class is returned with an 1d
tensor of size ``(n_thresholds+1, )`` with precision values (length may differ between classes). If `thresholds`
is set to something else, then a single 2d tensor of size ``(n_classes, n_thresholds+1)`` with precision values
is returned.
- ``recall`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each class is returned with an 1d tensor
of size ``(n_thresholds+1, )`` with recall values (length may differ between classes). If `thresholds` is set to
something else, then a single 2d tensor of size ``(n_classes, n_thresholds+1)`` with recall values is returned.
- ``thresholds`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each class is returned with an 1d
tensor of size ``(n_thresholds, )`` with increasing threshold values (length may differ between classes). If
`threshold` is set to something else, then a single 1d tensor of size ``(n_thresholds, )`` is returned with
shared threshold values for all classes.
.. note::
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
Args:
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.classification import BinaryPrecisionRecallCurve
>>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
>>> target = torch.tensor([0, 1, 1, 0])
>>> bprc = BinaryPrecisionRecallCurve(thresholds=None)
>>> bprc(preds, target) # doctest: +NORMALIZE_WHITESPACE
(tensor([0.5000, 0.6667, 0.5000, 0.0000, 1.0000]),
tensor([1.0000, 1.0000, 0.5000, 0.0000, 0.0000]),
tensor([0.0000, 0.5000, 0.7000, 0.8000]))
>>> bprc = BinaryPrecisionRecallCurve(thresholds=5)
>>> bprc(preds, target) # doctest: +NORMALIZE_WHITESPACE
(tensor([0.5000, 0.6667, 0.6667, 0.0000, 0.0000, 1.0000]),
tensor([1., 1., 1., 0., 0., 0.]),
tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
preds: List[Tensor]
target: List[Tensor]
confmat: Tensor
def __init__(
self,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
self.ignore_index = ignore_index
self.validate_args = validate_args
thresholds = _adjust_threshold_arg(thresholds)
if thresholds is None:
self.thresholds = thresholds
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
else:
self.register_buffer("thresholds", thresholds, persistent=False)
self.add_state(
"confmat", default=torch.zeros(len(thresholds), 2, 2, dtype=torch.long), dist_reduce_fx="sum"
)
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update metric states."""
if self.validate_args:
_binary_precision_recall_curve_tensor_validation(preds, target, self.ignore_index)
preds, target, _ = _binary_precision_recall_curve_format(preds, target, self.thresholds, self.ignore_index)
state = _binary_precision_recall_curve_update(preds, target, self.thresholds)
if isinstance(state, Tensor):
self.confmat += state
else:
self.preds.append(state[0])
self.target.append(state[1])
def compute(self) -> Tuple[Tensor, Tensor, Tensor]:
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _binary_precision_recall_curve_compute(state, self.thresholds)
def plot(
self,
curve: Optional[Tuple[Tensor, Tensor, Tensor]] = None,
score: Optional[Union[Tensor, bool]] = None,
ax: Optional[_AX_TYPE] = None,
) -> _PLOT_OUT_TYPE:
"""Plot a single curve from the metric.
Args:
curve: the output of either `metric.compute` or `metric.forward`. If no value is provided, will
automatically call `metric.compute` and plot that result.
score: Provide a area-under-the-curve score to be displayed on the plot. If `True` and no curve is provided,
will automatically compute the score.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> from torchmetrics.classification import BinaryPrecisionRecallCurve
>>> preds = rand(20)
>>> target = randint(2, (20,))
>>> metric = BinaryPrecisionRecallCurve()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot(score=True)
"""
curve_computed = curve or self.compute()
# switch order as the standard way is recall along x-axis and precision along y-axis
curve_computed = (curve_computed[1], curve_computed[0], curve_computed[2])
score = (
_auc_compute_without_check(curve_computed[0], curve_computed[1], 1.0)
if not curve and score is True
else None
)
return plot_curve(
curve_computed, score=score, ax=ax, label_names=("Recall", "Precision"), name=self.__class__.__name__
)
class MulticlassPrecisionRecallCurve(Metric):
r"""Compute the precision-recall curve for multiclass tasks.
The curve consist of multiple pairs of precision and recall values evaluated at different thresholds, such that the
tradeoff between the two values can been seen.
For multiclass the metric is calculated by iteratively treating each class as the positive class and all other
classes as the negative, which is referred to as the one-vs-rest approach. One-vs-one is currently not supported by
this metric.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor containing
probabilities or logits for each observation. If preds has values outside [0,1] range we consider the input to
be logits and will auto apply softmax per sample.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
ground truth labels, and therefore only contain values in the [0, n_classes-1] range (except if `ignore_index`
is specified).
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``precision`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_thresholds+1, )`` with precision values
- ``recall`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_thresholds+1, )`` with recall values
- ``thresholds`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_thresholds, )`` with increasing threshold values
.. note::
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
Args:
num_classes: Integer specifying the number of classes
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to a 1D `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
average:
If aggregation of curves should be applied. By default, the curves are not aggregated and a curve for
each class is returned. If `average` is set to ``"micro"``, the metric will aggregate the curves by one hot
encoding the targets and flattening the predictions, considering all classes jointly as a binary problem.
If `average` is set to ``"macro"``, the metric will aggregate the curves by first interpolating the curves
from each class at a combined set of thresholds and then average over the classwise interpolated curves.
See `averaging curve objects`_ for more info on the different averaging methods.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.classification import MulticlassPrecisionRecallCurve
>>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> mcprc = MulticlassPrecisionRecallCurve(num_classes=5, thresholds=None)
>>> precision, recall, thresholds = mcprc(preds, target)
>>> precision # doctest: +NORMALIZE_WHITESPACE
[tensor([0.2500, 1.0000, 1.0000]), tensor([0.2500, 1.0000, 1.0000]), tensor([0.2500, 0.0000, 1.0000]),
tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])]
>>> recall
[tensor([1., 1., 0.]), tensor([1., 1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])]
>>> thresholds
[tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]),
tensor(0.0500)]
>>> mcprc = MulticlassPrecisionRecallCurve(num_classes=5, thresholds=5)
>>> mcprc(preds, target) # doctest: +NORMALIZE_WHITESPACE
(tensor([[0.2500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000],
[0.2500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000],
[0.2500, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
[0.2500, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]),
tensor([[1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 0., 0.],
[1., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.]]),
tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
preds: List[Tensor]
target: List[Tensor]
confmat: Tensor
def __init__(
self,
num_classes: int,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
average: Optional[Literal["micro", "macro"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index, average)
self.num_classes = num_classes
self.average = average
self.ignore_index = ignore_index
self.validate_args = validate_args
thresholds = _adjust_threshold_arg(thresholds)
if thresholds is None:
self.thresholds = thresholds
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
else:
self.register_buffer("thresholds", thresholds, persistent=False)
self.add_state(
"confmat",
default=torch.zeros(len(thresholds), num_classes, 2, 2, dtype=torch.long),
dist_reduce_fx="sum",
)
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update metric states."""
if self.validate_args:
_multiclass_precision_recall_curve_tensor_validation(preds, target, self.num_classes, self.ignore_index)
preds, target, _ = _multiclass_precision_recall_curve_format(
preds, target, self.num_classes, self.thresholds, self.ignore_index, self.average
)
state = _multiclass_precision_recall_curve_update(
preds, target, self.num_classes, self.thresholds, self.average
)
if isinstance(state, Tensor):
self.confmat += state
else:
self.preds.append(state[0])
self.target.append(state[1])
def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _multiclass_precision_recall_curve_compute(state, self.num_classes, self.thresholds, self.average)
def plot(
self,
curve: Optional[Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]] = None,
score: Optional[Union[Tensor, bool]] = None,
ax: Optional[_AX_TYPE] = None,
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
curve: the output of either `metric.compute` or `metric.forward`. If no value is provided, will
automatically call `metric.compute` and plot that result.
score: Provide a area-under-the-curve score to be displayed on the plot. If `True` and no curve is provided,
will automatically compute the score.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn, randint
>>> from torchmetrics.classification import MulticlassPrecisionRecallCurve
>>> preds = randn(20, 3).softmax(dim=-1)
>>> target = randint(3, (20,))
>>> metric = MulticlassPrecisionRecallCurve(num_classes=3)
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot(score=True)
"""
curve_computed = curve or self.compute()
# switch order as the standard way is recall along x-axis and precision along y-axis
curve_computed = (curve_computed[1], curve_computed[0], curve_computed[2])
score = (
_reduce_auroc(curve_computed[0], curve_computed[1], average=None) if not curve and score is True else None
)
return plot_curve(
curve_computed, score=score, ax=ax, label_names=("Recall", "Precision"), name=self.__class__.__name__
)
class MultilabelPrecisionRecallCurve(Metric):
r"""Compute the precision-recall curve for multilabel tasks.
The curve consist of multiple pairs of precision and recall values evaluated at different thresholds, such that the
tradeoff between the two values can been seen.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor containing
probabilities or logits for each observation. If preds has values outside [0,1] range we consider the input to
be logits and will auto apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. Target should be a tensor containing
ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified).
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following a tuple of either 3 tensors or
3 lists containing:
- ``precision`` (:class:`~torch.Tensor` or :class:`~List`): if `thresholds=None` a list for each label is returned
with an 1d tensor of size ``(n_thresholds+1, )`` with precision values (length may differ between labels). If
`thresholds` is set to something else, then a single 2d tensor of size ``(n_labels, n_thresholds+1)`` with
precision values is returned.
- ``recall`` (:class:`~torch.Tensor` or :class:`~List`): if `thresholds=None` a list for each label is returned
with an 1d tensor of size ``(n_thresholds+1, )`` with recall values (length may differ between labels). If
`thresholds` is set to something else, then a single 2d tensor of size ``(n_labels, n_thresholds+1)`` with recall
values is returned.
- ``thresholds`` (:class:`~torch.Tensor` or :class:`~List`): if `thresholds=None` a list for each label is
returned with an 1d tensor of size ``(n_thresholds, )`` with increasing threshold values (length may differ
between labels). If `threshold` is set to something else, then a single 1d tensor of size ``(n_thresholds, )``
is returned with shared threshold values for all labels.
.. note::
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example:
>>> from torchmetrics.classification import MultilabelPrecisionRecallCurve
>>> preds = torch.tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = torch.tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> mlprc = MultilabelPrecisionRecallCurve(num_labels=3, thresholds=None)
>>> precision, recall, thresholds = mlprc(preds, target)
>>> precision # doctest: +NORMALIZE_WHITESPACE
[tensor([0.5000, 0.5000, 1.0000, 1.0000]), tensor([0.5000, 0.6667, 0.5000, 0.0000, 1.0000]),
tensor([0.7500, 1.0000, 1.0000, 1.0000])]
>>> recall # doctest: +NORMALIZE_WHITESPACE
[tensor([1.0000, 0.5000, 0.5000, 0.0000]), tensor([1.0000, 1.0000, 0.5000, 0.0000, 0.0000]),
tensor([1.0000, 0.6667, 0.3333, 0.0000])]
>>> thresholds # doctest: +NORMALIZE_WHITESPACE
[tensor([0.0500, 0.4500, 0.7500]), tensor([0.0500, 0.5500, 0.6500, 0.7500]), tensor([0.0500, 0.3500, 0.7500])]
>>> mlprc = MultilabelPrecisionRecallCurve(num_labels=3, thresholds=5)
>>> mlprc(preds, target) # doctest: +NORMALIZE_WHITESPACE
(tensor([[0.5000, 0.5000, 1.0000, 1.0000, 0.0000, 1.0000],
[0.5000, 0.6667, 0.6667, 0.0000, 0.0000, 1.0000],
[0.7500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000]]),
tensor([[1.0000, 0.5000, 0.5000, 0.5000, 0.0000, 0.0000],
[1.0000, 1.0000, 1.0000, 0.0000, 0.0000, 0.0000],
[1.0000, 0.6667, 0.3333, 0.3333, 0.0000, 0.0000]]),
tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
preds: List[Tensor]
target: List[Tensor]
confmat: Tensor
def __init__(
self,
num_labels: int,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
self.num_labels = num_labels
self.ignore_index = ignore_index
self.validate_args = validate_args
thresholds = _adjust_threshold_arg(thresholds)
if thresholds is None:
self.thresholds = thresholds
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
else:
self.register_buffer("thresholds", thresholds, persistent=False)
self.add_state(
"confmat",
default=torch.zeros(len(thresholds), num_labels, 2, 2, dtype=torch.long),
dist_reduce_fx="sum",
)
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update metric states."""
if self.validate_args:
_multilabel_precision_recall_curve_tensor_validation(preds, target, self.num_labels, self.ignore_index)
preds, target, _ = _multilabel_precision_recall_curve_format(
preds, target, self.num_labels, self.thresholds, self.ignore_index
)
state = _multilabel_precision_recall_curve_update(preds, target, self.num_labels, self.thresholds)
if isinstance(state, Tensor):
self.confmat += state
else:
self.preds.append(state[0])
self.target.append(state[1])
def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _multilabel_precision_recall_curve_compute(state, self.num_labels, self.thresholds, self.ignore_index)
def plot(
self,
curve: Optional[Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]] = None,
score: Optional[Union[Tensor, bool]] = None,
ax: Optional[_AX_TYPE] = None,
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
curve: the output of either `metric.compute` or `metric.forward`. If no value is provided, will
automatically call `metric.compute` and plot that result.
score: Provide a area-under-the-curve score to be displayed on the plot. If `True` and no curve is provided,
will automatically compute the score.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> from torchmetrics.classification import MultilabelPrecisionRecallCurve
>>> preds = rand(20, 3)
>>> target = randint(2, (20,3))
>>> metric = MultilabelPrecisionRecallCurve(num_labels=3)
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot(score=True)
"""
curve_computed = curve or self.compute()
# switch order as the standard way is recall along x-axis and precision along y-axis
curve_computed = (curve_computed[1], curve_computed[0], curve_computed[2])
score = (
_reduce_auroc(curve_computed[0], curve_computed[1], average=None) if not curve and score is True else None
)
return plot_curve(
curve_computed, score=score, ax=ax, label_names=("Recall", "Precision"), name=self.__class__.__name__
)
class PrecisionRecallCurve(_ClassificationTaskWrapper):
r"""Compute the precision-recall curve.
The curve consist of multiple pairs of precision and recall values evaluated at different thresholds, such that the
tradeoff between the two values can been seen.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryPrecisionRecallCurve`,
:class:`~torchmetrics.classification.MulticlassPrecisionRecallCurve` and
:class:`~torchmetrics.classification.MultilabelPrecisionRecallCurve` for the specific details of each argument
influence and examples.
Legacy Example:
>>> pred = torch.tensor([0, 0.1, 0.8, 0.4])
>>> target = torch.tensor([0, 1, 1, 0])
>>> pr_curve = PrecisionRecallCurve(task="binary")
>>> precision, recall, thresholds = pr_curve(pred, target)
>>> precision
tensor([0.5000, 0.6667, 0.5000, 1.0000, 1.0000])
>>> recall
tensor([1.0000, 1.0000, 0.5000, 0.5000, 0.0000])
>>> thresholds
tensor([0.0000, 0.1000, 0.4000, 0.8000])
>>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> pr_curve = PrecisionRecallCurve(task="multiclass", num_classes=5)
>>> precision, recall, thresholds = pr_curve(pred, target)
>>> precision
[tensor([0.2500, 1.0000, 1.0000]), tensor([0.2500, 1.0000, 1.0000]), tensor([0.2500, 0.0000, 1.0000]),
tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])]
>>> recall
[tensor([1., 1., 0.]), tensor([1., 1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])]
>>> thresholds
[tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]),
tensor(0.0500)]
"""
def __new__( # type: ignore[misc]
cls: Type["PrecisionRecallCurve"],
task: Literal["binary", "multiclass", "multilabel"],
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
kwargs.update({"thresholds": thresholds, "ignore_index": ignore_index, "validate_args": validate_args})
if task == ClassificationTask.BINARY:
return BinaryPrecisionRecallCurve(**kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return MulticlassPrecisionRecallCurve(num_classes, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelPrecisionRecallCurve(num_labels, **kwargs)
raise ValueError(f"Task {task} not supported!")
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/auroc.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.precision_recall_curve import (
BinaryPrecisionRecallCurve,
MulticlassPrecisionRecallCurve,
MultilabelPrecisionRecallCurve,
)
from torchmetrics.functional.classification.auroc import (
_binary_auroc_arg_validation,
_binary_auroc_compute,
_multiclass_auroc_arg_validation,
_multiclass_auroc_compute,
_multilabel_auroc_arg_validation,
_multilabel_auroc_compute,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["BinaryAUROC.plot", "MulticlassAUROC.plot", "MultilabelAUROC.plot"]
class BinaryAUROC(BinaryPrecisionRecallCurve):
r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for binary tasks.
The AUROC score summarizes the ROC curve into an single number that describes the performance of a model for
multiple thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
corresponds to random guessing.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)`` containing probabilities or logits for
each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
therefore only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the
positive class.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``b_auroc`` (:class:`~torch.Tensor`): A single scalar with the auroc score.
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a
binned version that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will
activate the non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the
`thresholds` argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
Args:
max_fpr: If not ``None``, calculates standardized partial AUC over the range ``[0, max_fpr]``.
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryAUROC
>>> preds = tensor([0, 0.5, 0.7, 0.8])
>>> target = tensor([0, 1, 1, 0])
>>> metric = BinaryAUROC(thresholds=None)
>>> metric(preds, target)
tensor(0.5000)
>>> b_auroc = BinaryAUROC(thresholds=5)
>>> b_auroc(preds, target)
tensor(0.5000)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
max_fpr: Optional[float] = None,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs)
if validate_args:
_binary_auroc_arg_validation(max_fpr, thresholds, ignore_index)
self.max_fpr = max_fpr
def compute(self) -> Tensor: # type: ignore[override]
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _binary_auroc_compute(state, self.thresholds, self.max_fpr)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single
>>> import torch
>>> from torchmetrics.classification import BinaryAUROC
>>> metric = BinaryAUROC()
>>> metric.update(torch.rand(20,), torch.randint(2, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.classification import BinaryAUROC
>>> metric = BinaryAUROC()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(20,), torch.randint(2, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassAUROC(MulticlassPrecisionRecallCurve):
r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multiclass tasks.
The AUROC score summarizes the ROC curve into an single number that describes the performance of a model for
multiple thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
corresponds to random guessing.
For multiclass the metric is calculated by iteratively treating each class as the positive class and all other
classes as the negative, which is referred to as the one-vs-rest approach. One-vs-one is currently not supported by
this metric. By default the reported metric is then the average over all classes, but this behavior can be changed
by setting the ``average`` argument.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits
for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto
apply softmax per sample.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
therefore only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mc_auroc`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will
be returned with auroc score per class. If `average="macro"|"weighted"` then a single scalar is returned.
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
Args:
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over classes. Should be one of the following:
- ``macro``: Calculate score for each class and average them
- ``weighted``: calculates score for each class and computes weighted average using their support
- ``"none"`` or ``None``: calculates score for each class and applies no reduction
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassAUROC
>>> preds = tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = tensor([0, 1, 3, 2])
>>> metric = MulticlassAUROC(num_classes=5, average="macro", thresholds=None)
>>> metric(preds, target)
tensor(0.5333)
>>> mc_auroc = MulticlassAUROC(num_classes=5, average=None, thresholds=None)
>>> mc_auroc(preds, target)
tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000])
>>> mc_auroc = MulticlassAUROC(num_classes=5, average="macro", thresholds=5)
>>> mc_auroc(preds, target)
tensor(0.5333)
>>> mc_auroc = MulticlassAUROC(num_classes=5, average=None, thresholds=5)
>>> mc_auroc(preds, target)
tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000])
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def __init__(
self,
num_classes: int,
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_classes=num_classes, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
)
if validate_args:
_multiclass_auroc_arg_validation(num_classes, average, thresholds, ignore_index)
self.average = average # type: ignore[assignment]
self.validate_args = validate_args
def compute(self) -> Tensor: # type: ignore[override]
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _multiclass_auroc_compute(
state, self.num_classes, self.average, self.thresholds # type: ignore[arg-type]
)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single
>>> import torch
>>> from torchmetrics.classification import MulticlassAUROC
>>> metric = MulticlassAUROC(num_classes=3)
>>> metric.update(torch.randn(20, 3), torch.randint(3,(20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.classification import MulticlassAUROC
>>> metric = MulticlassAUROC(num_classes=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randn(20, 3), torch.randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelAUROC(MultilabelPrecisionRecallCurve):
r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multilabel tasks.
The AUROC score summarizes the ROC curve into an single number that describes the performance of a model for
multiple thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
corresponds to random guessing.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits
for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto
apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` containing ground truth labels, and
therefore only contain {0,1} values (except if `ignore_index` is specified).
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``ml_auroc`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will
be returned with auroc score per class. If `average="micro|macro"|"weighted"` then a single scalar is returned.
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
Args:
num_labels: Integer specifying the number of labels
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum score over all labels
- ``macro``: Calculate score for each label and average them
- ``weighted``: calculates score for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates score for each label and applies no reduction
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelAUROC
>>> preds = tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> ml_auroc = MultilabelAUROC(num_labels=3, average="macro", thresholds=None)
>>> ml_auroc(preds, target)
tensor(0.6528)
>>> ml_auroc = MultilabelAUROC(num_labels=3, average=None, thresholds=None)
>>> ml_auroc(preds, target)
tensor([0.6250, 0.5000, 0.8333])
>>> ml_auroc = MultilabelAUROC(num_labels=3, average="macro", thresholds=5)
>>> ml_auroc(preds, target)
tensor(0.6528)
>>> ml_auroc = MultilabelAUROC(num_labels=3, average=None, thresholds=5)
>>> ml_auroc(preds, target)
tensor([0.6250, 0.5000, 0.8333])
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def __init__(
self,
num_labels: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_labels=num_labels, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
)
if validate_args:
_multilabel_auroc_arg_validation(num_labels, average, thresholds, ignore_index)
self.average = average
self.validate_args = validate_args
def compute(self) -> Tensor: # type: ignore[override]
"""Compute metric."""
state = (dim_zero_cat(self.preds), dim_zero_cat(self.target)) if self.thresholds is None else self.confmat
return _multilabel_auroc_compute(state, self.num_labels, self.average, self.thresholds, self.ignore_index)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single
>>> import torch
>>> from torchmetrics.classification import MultilabelAUROC
>>> metric = MultilabelAUROC(num_labels=3)
>>> metric.update(torch.rand(20,3), torch.randint(2, (20,3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.classification import MultilabelAUROC
>>> metric = MultilabelAUROC(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(20,3), torch.randint(2, (20,3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class AUROC(_ClassificationTaskWrapper):
r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_).
The AUROC score summarizes the ROC curve into an single number that describes the performance of a model for
multiple thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
corresponds to random guessing.
This module is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryAUROC`, :class:`~torchmetrics.classification.MulticlassAUROC` and
:class:`~torchmetrics.classification.MultilabelAUROC` for the specific details of each argument influence and
examples.
Legacy Example:
>>> from torch import tensor
>>> preds = tensor([0.13, 0.26, 0.08, 0.19, 0.34])
>>> target = tensor([0, 0, 1, 1, 1])
>>> auroc = AUROC(task="binary")
>>> auroc(preds, target)
tensor(0.5000)
>>> preds = tensor([[0.90, 0.05, 0.05],
... [0.05, 0.90, 0.05],
... [0.05, 0.05, 0.90],
... [0.85, 0.05, 0.10],
... [0.10, 0.10, 0.80]])
>>> target = tensor([0, 1, 1, 2, 2])
>>> auroc = AUROC(task="multiclass", num_classes=3)
>>> auroc(preds, target)
tensor(0.7778)
"""
def __new__( # type: ignore[misc]
cls: Type["AUROC"],
task: Literal["binary", "multiclass", "multilabel"],
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
max_fpr: Optional[float] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
kwargs.update({"thresholds": thresholds, "ignore_index": ignore_index, "validate_args": validate_args})
if task == ClassificationTask.BINARY:
return BinaryAUROC(max_fpr, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return MulticlassAUROC(num_classes, average, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelAUROC(num_labels, average, **kwargs)
raise ValueError(f"Task {task} not supported!")
def update(self, *args: Any, **kwargs: Any) -> None:
"""Update metric state."""
raise NotImplementedError(
f"{self.__class__.__name__} metric does not have a global `update` method. Use the task specific metric."
)
def compute(self) -> None:
"""Compute metric."""
raise NotImplementedError(
f"{self.__class__.__name__} metric does not have a global `compute` method. Use the task specific metric."
)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/base.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from torchmetrics.metric import Metric
class _ClassificationTaskWrapper(Metric):
"""Base class for wrapper metrics for classification tasks."""
def update(self, *args: Any, **kwargs: Any) -> None:
"""Update metric state."""
raise NotImplementedError(
f"{self.__class__.__name__} metric does not have a global `update` method. Use the task specific metric."
)
def compute(self) -> None:
"""Compute metric."""
raise NotImplementedError(
f"{self.__class__.__name__} metric does not have a global `compute` method. Use the task specific metric."
)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/jaccard.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.confusion_matrix import (
BinaryConfusionMatrix,
MulticlassConfusionMatrix,
MultilabelConfusionMatrix,
)
from torchmetrics.functional.classification.jaccard import (
_jaccard_index_reduce,
_multiclass_jaccard_index_arg_validation,
_multilabel_jaccard_index_arg_validation,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["BinaryJaccardIndex.plot", "MulticlassJaccardIndex.plot", "MultilabelJaccardIndex.plot"]
class BinaryJaccardIndex(BinaryConfusionMatrix):
r"""Calculate the Jaccard index for binary tasks.
The `Jaccard index`_ (also known as the intersetion over union or jaccard similarity coefficient) is an statistic
that can be used to determine the similarity and diversity of a sample set. It is defined as the size of the
intersection divided by the union of the sample sets:
.. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|}
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A int or float tensor of shape ``(N, ...)``. If preds is a floating point
tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per element.
Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``bji`` (:class:`~torch.Tensor`): A tensor containing the Binary Jaccard Index.
Args:
threshold: Threshold for transforming probability to binary (0,1) predictions
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryJaccardIndex
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0, 1, 0, 0])
>>> metric = BinaryJaccardIndex()
>>> metric(preds, target)
tensor(0.5000)
Example (preds is float tensor):
>>> from torchmetrics.classification import BinaryJaccardIndex
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0.35, 0.85, 0.48, 0.01])
>>> metric = BinaryJaccardIndex()
>>> metric(preds, target)
tensor(0.5000)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
threshold=threshold, ignore_index=ignore_index, normalize=None, validate_args=validate_args, **kwargs
)
def compute(self) -> Tensor:
"""Compute metric."""
return _jaccard_index_reduce(self.confmat, average="binary")
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torch import rand, randint
>>> from torchmetrics.classification import BinaryJaccardIndex
>>> metric = BinaryJaccardIndex()
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torch import rand, randint
>>> from torchmetrics.classification import BinaryJaccardIndex
>>> metric = BinaryJaccardIndex()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(10), randint(2,(10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassJaccardIndex(MulticlassConfusionMatrix):
r"""Calculate the Jaccard index for multiclass tasks.
The `Jaccard index`_ (also known as the intersetion over union or jaccard similarity coefficient) is an statistic
that can be used to determine the similarity and diversity of a sample set. It is defined as the size of the
intersection divided by the union of the sample sets:
.. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|}
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
probabilities/logits into an int tensor.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mcji`` (:class:`~torch.Tensor`): A tensor containing the Multi-class Jaccard Index.
Args:
num_classes: Integer specifying the number of classes
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (pred is integer tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassJaccardIndex
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> metric = MulticlassJaccardIndex(num_classes=3)
>>> metric(preds, target)
tensor(0.6667)
Example (pred is float tensor):
>>> from torchmetrics.classification import MulticlassJaccardIndex
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> metric = MulticlassJaccardIndex(num_classes=3)
>>> metric(preds, target)
tensor(0.6667)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def __init__(
self,
num_classes: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_classes=num_classes, ignore_index=ignore_index, normalize=None, validate_args=False, **kwargs
)
if validate_args:
_multiclass_jaccard_index_arg_validation(num_classes, ignore_index, average)
self.validate_args = validate_args
self.average = average
def compute(self) -> Tensor:
"""Compute metric."""
return _jaccard_index_reduce(self.confmat, average=self.average, ignore_index=self.ignore_index)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value per class
>>> from torch import randint
>>> from torchmetrics.classification import MulticlassJaccardIndex
>>> metric = MulticlassJaccardIndex(num_classes=3, average=None)
>>> metric.update(randint(3, (20,)), randint(3, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting a multiple values per class
>>> from torch import randint
>>> from torchmetrics.classification import MulticlassJaccardIndex
>>> metric = MulticlassJaccardIndex(num_classes=3, average=None)
>>> values = []
>>> for _ in range(20):
... values.append(metric(randint(3, (20,)), randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelJaccardIndex(MultilabelConfusionMatrix):
r"""Calculate the Jaccard index for multilabel tasks.
The `Jaccard index`_ (also known as the intersetion over union or jaccard similarity coefficient) is an statistic
that can be used to determine the similarity and diversity of a sample set. It is defined as the size of the
intersection divided by the union of the sample sets:
.. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|}
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A int tensor or float tensor of shape ``(N, C, ...)``. If preds is a
floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mlji`` (:class:`~torch.Tensor`): A tensor containing the Multi-label Jaccard Index loss.
Args:
num_classes: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelJaccardIndex
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> metric = MultilabelJaccardIndex(num_labels=3)
>>> metric(preds, target)
tensor(0.5000)
Example (preds is float tensor):
>>> from torchmetrics.classification import MultilabelJaccardIndex
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> metric = MultilabelJaccardIndex(num_labels=3)
>>> metric(preds, target)
tensor(0.5000)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def __init__(
self,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_labels=num_labels,
threshold=threshold,
ignore_index=ignore_index,
normalize=None,
validate_args=False,
**kwargs,
)
if validate_args:
_multilabel_jaccard_index_arg_validation(num_labels, threshold, ignore_index, average)
self.validate_args = validate_args
self.average = average
def compute(self) -> Tensor:
"""Compute metric."""
return _jaccard_index_reduce(self.confmat, average=self.average)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torch import rand, randint
>>> from torchmetrics.classification import MultilabelJaccardIndex
>>> metric = MultilabelJaccardIndex(num_labels=3)
>>> metric.update(randint(2, (20, 3)), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torch import rand, randint
>>> from torchmetrics.classification import MultilabelJaccardIndex
>>> metric = MultilabelJaccardIndex(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(randint(2, (20, 3)), randint(2, (20, 3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class JaccardIndex(_ClassificationTaskWrapper):
r"""Calculate the Jaccard index for multilabel tasks.
The `Jaccard index`_ (also known as the intersetion over union or jaccard similarity coefficient) is an statistic
that can be used to determine the similarity and diversity of a sample set. It is defined as the size of the
intersection divided by the union of the sample sets:
.. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|}
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryJaccardIndex`,
:class:`~torchmetrics.classification.MulticlassJaccardIndex` and
:class:`~torchmetrics.classification.MultilabelJaccardIndex` for the specific details of each argument influence
and examples.
Legacy Example:
>>> from torch import randint, tensor
>>> target = randint(0, 2, (10, 25, 25))
>>> pred = tensor(target)
>>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15]
>>> jaccard = JaccardIndex(task="multiclass", num_classes=2)
>>> jaccard(pred, target)
tensor(0.9660)
"""
def __new__( # type: ignore[misc]
cls: Type["JaccardIndex"],
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
kwargs.update({"ignore_index": ignore_index, "validate_args": validate_args})
if task == ClassificationTask.BINARY:
return BinaryJaccardIndex(threshold, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return MulticlassJaccardIndex(num_classes, average, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelJaccardIndex(num_labels, threshold, average, **kwargs)
raise ValueError(f"Task {task} not supported!")
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/hinge.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Type, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.functional.classification.hinge import (
_binary_confusion_matrix_format,
_binary_hinge_loss_arg_validation,
_binary_hinge_loss_tensor_validation,
_binary_hinge_loss_update,
_hinge_loss_compute,
_multiclass_confusion_matrix_format,
_multiclass_hinge_loss_arg_validation,
_multiclass_hinge_loss_tensor_validation,
_multiclass_hinge_loss_update,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import ClassificationTaskNoMultilabel
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["BinaryHingeLoss.plot", "MulticlassHingeLoss.plot"]
class BinaryHingeLoss(Metric):
r"""Compute the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs) for binary tasks.
.. math::
\text{Hinge loss} = \max(0, 1 - y \times \hat{y})
Where :math:`y \in {-1, 1}` is the target, and :math:`\hat{y} \in \mathbb{R}` is the prediction.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``. Preds should be a tensor containing
probabilities or logits for each observation. If preds has values outside [0,1] range we consider the input
to be logits and will auto apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified). The value
1 always encodes the positive class.
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``bhl`` (:class:`~torch.Tensor`): A tensor containing the hinge loss.
Args:
squared:
If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.classification import BinaryHingeLoss
>>> preds = torch.tensor([0.25, 0.25, 0.55, 0.75, 0.75])
>>> target = torch.tensor([0, 0, 1, 1, 1])
>>> bhl = BinaryHingeLoss()
>>> bhl(preds, target)
tensor(0.6900)
>>> bhl = BinaryHingeLoss(squared=True)
>>> bhl(preds, target)
tensor(0.6905)
"""
is_differentiable: bool = True
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
measures: Tensor
total: Tensor
def __init__(
self,
squared: bool = False,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_binary_hinge_loss_arg_validation(squared, ignore_index)
self.validate_args = validate_args
self.squared = squared
self.ignore_index = ignore_index
self.add_state("measures", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update metric state."""
if self.validate_args:
_binary_hinge_loss_tensor_validation(preds, target, self.ignore_index)
preds, target = _binary_confusion_matrix_format(
preds, target, threshold=0.0, ignore_index=self.ignore_index, convert_to_labels=False
)
measures, total = _binary_hinge_loss_update(preds, target, self.squared)
self.measures += measures
self.total += total
def compute(self) -> Tensor:
"""Compute metric."""
return _hinge_loss_compute(self.measures, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torch import rand, randint
>>> from torchmetrics.classification import BinaryHingeLoss
>>> metric = BinaryHingeLoss()
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torch import rand, randint
>>> from torchmetrics.classification import BinaryHingeLoss
>>> metric = BinaryHingeLoss()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(10), randint(2,(10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassHingeLoss(Metric):
r"""Compute the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs) for multiclass tasks.
The metric can be computed in two ways. Either, the definition by Crammer and Singer is used:
.. math::
\text{Hinge loss} = \max\left(0, 1 - \hat{y}_y + \max_{i \ne y} (\hat{y}_i)\right)
Where :math:`y \in {0, ..., \mathrm{C}}` is the target class (where :math:`\mathrm{C}` is the number of classes),
and :math:`\hat{y} \in \mathbb{R}^\mathrm{C}` is the predicted output per class. Alternatively, the metric can
also be computed in one-vs-all approach, where each class is valued against all other classes in a binary fashion.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor
containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider
the input to be logits and will auto apply softmax per sample.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
ground truth labels, and therefore only contain values in the [0, n_classes-1] range (except if `ignore_index`
is specified).
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mchl`` (:class:`~torch.Tensor`): A tensor containing the multi-class hinge loss.
Args:
num_classes: Integer specifying the number of classes
squared:
If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss.
multiclass_mode:
Determines how to compute the metric
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.classification import MulticlassHingeLoss
>>> preds = torch.tensor([[0.25, 0.20, 0.55],
... [0.55, 0.05, 0.40],
... [0.10, 0.30, 0.60],
... [0.90, 0.05, 0.05]])
>>> target = torch.tensor([0, 1, 2, 0])
>>> mchl = MulticlassHingeLoss(num_classes=3)
>>> mchl(preds, target)
tensor(0.9125)
>>> mchl = MulticlassHingeLoss(num_classes=3, squared=True)
>>> mchl(preds, target)
tensor(1.1131)
>>> mchl = MulticlassHingeLoss(num_classes=3, multiclass_mode='one-vs-all')
>>> mchl(preds, target)
tensor([0.8750, 1.1250, 1.1000])
"""
is_differentiable: bool = True
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
measures: Tensor
total: Tensor
def __init__(
self,
num_classes: int,
squared: bool = False,
multiclass_mode: Literal["crammer-singer", "one-vs-all"] = "crammer-singer",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_multiclass_hinge_loss_arg_validation(num_classes, squared, multiclass_mode, ignore_index)
self.validate_args = validate_args
self.num_classes = num_classes
self.squared = squared
self.multiclass_mode = multiclass_mode
self.ignore_index = ignore_index
self.add_state(
"measures",
default=torch.tensor(0.0)
if self.multiclass_mode == "crammer-singer"
else torch.zeros(
num_classes,
),
dist_reduce_fx="sum",
)
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update metric state."""
if self.validate_args:
_multiclass_hinge_loss_tensor_validation(preds, target, self.num_classes, self.ignore_index)
preds, target = _multiclass_confusion_matrix_format(preds, target, self.ignore_index, convert_to_labels=False)
measures, total = _multiclass_hinge_loss_update(preds, target, self.squared, self.multiclass_mode)
self.measures += measures
self.total += total
def compute(self) -> Tensor:
"""Compute metric."""
return _hinge_loss_compute(self.measures, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value per class
>>> from torch import randint, randn
>>> from torchmetrics.classification import MulticlassHingeLoss
>>> metric = MulticlassHingeLoss(num_classes=3)
>>> metric.update(randn(20, 3), randint(3, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting a multiple values per class
>>> from torch import randint, randn
>>> from torchmetrics.classification import MulticlassHingeLoss
>>> metric = MulticlassHingeLoss(num_classes=3)
>>> values = []
>>> for _ in range(20):
... values.append(metric(randn(20, 3), randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class HingeLoss(_ClassificationTaskWrapper):
r"""Compute the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs).
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of
:class:`~torchmetrics.classification.BinaryHingeLoss` and :class:`~torchmetrics.classification.MulticlassHingeLoss`
for the specific details of each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([0, 1, 1])
>>> preds = tensor([0.5, 0.7, 0.1])
>>> hinge = HingeLoss(task="binary")
>>> hinge(preds, target)
tensor(0.9000)
>>> target = tensor([0, 1, 2])
>>> preds = tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]])
>>> hinge = HingeLoss(task="multiclass", num_classes=3)
>>> hinge(preds, target)
tensor(1.5551)
>>> target = tensor([0, 1, 2])
>>> preds = tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]])
>>> hinge = HingeLoss(task="multiclass", num_classes=3, multiclass_mode="one-vs-all")
>>> hinge(preds, target)
tensor([1.3743, 1.1945, 1.2359])
"""
def __new__( # type: ignore[misc]
cls: Type["HingeLoss"],
task: Literal["binary", "multiclass"],
num_classes: Optional[int] = None,
squared: bool = False,
multiclass_mode: Optional[Literal["crammer-singer", "one-vs-all"]] = "crammer-singer",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTaskNoMultilabel.from_str(task)
kwargs.update({"ignore_index": ignore_index, "validate_args": validate_args})
if task == ClassificationTaskNoMultilabel.BINARY:
return BinaryHingeLoss(squared, **kwargs)
if task == ClassificationTaskNoMultilabel.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if multiclass_mode not in ("crammer-singer", "one-vs-all"):
raise ValueError(
f"`multiclass_mode` is expected to be one of 'crammer-singer' or 'one-vs-all' but "
f"`{multiclass_mode}` was passed."
)
return MulticlassHingeLoss(num_classes, squared, multiclass_mode, **kwargs)
raise ValueError(f"Unsupported task `{task}`")
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/classification/hamming.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.stat_scores import BinaryStatScores, MulticlassStatScores, MultilabelStatScores
from torchmetrics.functional.classification.hamming import _hamming_distance_reduce
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = [
"BinaryHammingDistance.plot",
"MulticlassHammingDistance.plot",
"MultilabelHammingDistance.plot",
]
class BinaryHammingDistance(BinaryStatScores):
r"""Compute the average `Hamming distance`_ (also known as Hamming loss) for binary tasks.
.. math::
\text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
tensor.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point
tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``bhd`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``, the metric returns a scalar value.
- If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a
scalar value per sample.
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryHammingDistance
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> metric = BinaryHammingDistance()
>>> metric(preds, target)
tensor(0.3333)
Example (preds is float tensor):
>>> from torchmetrics.classification import BinaryHammingDistance
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> metric = BinaryHammingDistance()
>>> metric(preds, target)
tensor(0.3333)
Example (multidim tensors):
>>> from torchmetrics.classification import BinaryHammingDistance
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = BinaryHammingDistance(multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.6667, 0.8333])
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _hamming_distance_reduce(tp, fp, tn, fn, average="binary", multidim_average=self.multidim_average)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torch import rand, randint
>>> from torchmetrics.classification import BinaryHammingDistance
>>> metric = BinaryHammingDistance()
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torch import rand, randint
>>> from torchmetrics.classification import BinaryHammingDistance
>>> metric = BinaryHammingDistance()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(10), randint(2,(10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassHammingDistance(MulticlassStatScores):
r"""Compute the average `Hamming distance`_ (also known as Hamming loss) for multiclass tasks.
.. math::
\text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
tensor.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
probabilities/logits into an int tensor.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mchd`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassHammingDistance
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> metric = MulticlassHammingDistance(num_classes=3)
>>> metric(preds, target)
tensor(0.1667)
>>> mchd = MulticlassHammingDistance(num_classes=3, average=None)
>>> mchd(preds, target)
tensor([0.5000, 0.0000, 0.0000])
Example (preds is float tensor):
>>> from torchmetrics.classification import MulticlassHammingDistance
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> metric = MulticlassHammingDistance(num_classes=3)
>>> metric(preds, target)
tensor(0.1667)
>>> mchd = MulticlassHammingDistance(num_classes=3, average=None)
>>> mchd(preds, target)
tensor([0.5000, 0.0000, 0.0000])
Example (multidim tensors):
>>> from torchmetrics.classification import MulticlassHammingDistance
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> metric = MulticlassHammingDistance(num_classes=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.5000, 0.7222])
>>> mchd = MulticlassHammingDistance(num_classes=3, multidim_average='samplewise', average=None)
>>> mchd(preds, target)
tensor([[0.0000, 1.0000, 0.5000],
[1.0000, 0.6667, 0.5000]])
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _hamming_distance_reduce(tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value per class
>>> from torch import randint
>>> from torchmetrics.classification import MulticlassHammingDistance
>>> metric = MulticlassHammingDistance(num_classes=3, average=None)
>>> metric.update(randint(3, (20,)), randint(3, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting a multiple values per class
>>> from torch import randint
>>> from torchmetrics.classification import MulticlassHammingDistance
>>> metric = MulticlassHammingDistance(num_classes=3, average=None)
>>> values = []
>>> for _ in range(20):
... values.append(metric(randint(3, (20,)), randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelHammingDistance(MultilabelStatScores):
r"""Compute the average `Hamming distance`_ (also known as Hamming loss) for multilabel tasks.
.. math::
\text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
tensor.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, C, ...)``. If preds is a
floating point tensor with values outside [0,1] range we consider the input to be logits and will auto
apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in
``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mlhd`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelHammingDistance
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> metric = MultilabelHammingDistance(num_labels=3)
>>> metric(preds, target)
tensor(0.3333)
>>> mlhd = MultilabelHammingDistance(num_labels=3, average=None)
>>> mlhd(preds, target)
tensor([0.0000, 0.5000, 0.5000])
Example (preds is float tensor):
>>> from torchmetrics.classification import MultilabelHammingDistance
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> metric = MultilabelHammingDistance(num_labels=3)
>>> metric(preds, target)
tensor(0.3333)
>>> mlhd = MultilabelHammingDistance(num_labels=3, average=None)
>>> mlhd(preds, target)
tensor([0.0000, 0.5000, 0.5000])
Example (multidim tensors):
>>> from torchmetrics.classification import MultilabelHammingDistance
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = MultilabelHammingDistance(num_labels=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0.6667, 0.8333])
>>> mlhd = MultilabelHammingDistance(num_labels=3, multidim_average='samplewise', average=None)
>>> mlhd(preds, target)
tensor([[0.5000, 0.5000, 1.0000],
[1.0000, 1.0000, 0.5000]])
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, tn, fn = self._final_state()
return _hamming_distance_reduce(
tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average, multilabel=True
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torch import rand, randint
>>> from torchmetrics.classification import MultilabelHammingDistance
>>> metric = MultilabelHammingDistance(num_labels=3)
>>> metric.update(randint(2, (20, 3)), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torch import rand, randint
>>> from torchmetrics.classification import MultilabelHammingDistance
>>> metric = MultilabelHammingDistance(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(randint(2, (20, 3)), randint(2, (20, 3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class HammingDistance(_ClassificationTaskWrapper):
r"""Compute the average `Hamming distance`_ (also known as Hamming loss).
.. math::
\text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
tensor.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryHammingDistance`,
:class:`~torchmetrics.classification.MulticlassHammingDistance` and
:class:`~torchmetrics.classification.MultilabelHammingDistance` for the specific details of each argument influence
and examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([[0, 1], [1, 1]])
>>> preds = tensor([[0, 1], [0, 1]])
>>> hamming_distance = HammingDistance(task="multilabel", num_labels=2)
>>> hamming_distance(preds, target)
tensor(0.2500)
"""
def __new__( # type: ignore[misc]
cls: Type["HammingDistance"],
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
assert multidim_average is not None # noqa: S101 # needed for mypy
kwargs.update(
{"multidim_average": multidim_average, "ignore_index": ignore_index, "validate_args": validate_args}
)
if task == ClassificationTask.BINARY:
return BinaryHammingDistance(threshold, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return MulticlassHammingDistance(num_classes, top_k, average, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelHammingDistance(num_labels, threshold, average, **kwargs)
raise ValueError(f"Task {task} not supported!")
| 0 |