koichi12 commited on
Commit
95b994f
·
verified ·
1 Parent(s): c7b7007

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. .venv/lib/python3.11/site-packages/_pytest/_argcomplete.py +117 -0
  3. .venv/lib/python3.11/site-packages/_pytest/cacheprovider.py +626 -0
  4. .venv/lib/python3.11/site-packages/_pytest/compat.py +351 -0
  5. .venv/lib/python3.11/site-packages/_pytest/doctest.py +755 -0
  6. .venv/lib/python3.11/site-packages/_pytest/helpconfig.py +276 -0
  7. .venv/lib/python3.11/site-packages/_pytest/hookspec.py +1333 -0
  8. .venv/lib/python3.11/site-packages/_pytest/legacypath.py +468 -0
  9. .venv/lib/python3.11/site-packages/_pytest/logging.py +955 -0
  10. .venv/lib/python3.11/site-packages/_pytest/nodes.py +766 -0
  11. .venv/lib/python3.11/site-packages/_pytest/python.py +1679 -0
  12. .venv/lib/python3.11/site-packages/_pytest/python_api.py +1028 -0
  13. .venv/lib/python3.11/site-packages/_pytest/recwarn.py +364 -0
  14. .venv/lib/python3.11/site-packages/_pytest/tmpdir.py +314 -0
  15. .venv/lib/python3.11/site-packages/_pytest/unittest.py +435 -0
  16. .venv/lib/python3.11/site-packages/cpuinfo/__init__.py +5 -0
  17. .venv/lib/python3.11/site-packages/cpuinfo/__pycache__/__init__.cpython-311.pyc +0 -0
  18. .venv/lib/python3.11/site-packages/cpuinfo/__pycache__/__main__.cpython-311.pyc +0 -0
  19. .venv/lib/python3.11/site-packages/outlines_core-0.1.26.dist-info/INSTALLER +1 -0
  20. .venv/lib/python3.11/site-packages/outlines_core-0.1.26.dist-info/LICENSE +201 -0
  21. .venv/lib/python3.11/site-packages/outlines_core-0.1.26.dist-info/METADATA +124 -0
  22. .venv/lib/python3.11/site-packages/outlines_core-0.1.26.dist-info/RECORD +21 -0
  23. .venv/lib/python3.11/site-packages/outlines_core-0.1.26.dist-info/WHEEL +6 -0
  24. .venv/lib/python3.11/site-packages/outlines_core-0.1.26.dist-info/top_level.txt +1 -0
  25. .venv/lib/python3.11/site-packages/ray/cloudpickle/__init__.py +47 -0
  26. .venv/lib/python3.11/site-packages/ray/cloudpickle/__pycache__/__init__.cpython-311.pyc +0 -0
  27. .venv/lib/python3.11/site-packages/ray/cloudpickle/__pycache__/cloudpickle.cpython-311.pyc +0 -0
  28. .venv/lib/python3.11/site-packages/ray/cloudpickle/__pycache__/cloudpickle_fast.cpython-311.pyc +0 -0
  29. .venv/lib/python3.11/site-packages/ray/cloudpickle/__pycache__/compat.cpython-311.pyc +0 -0
  30. .venv/lib/python3.11/site-packages/ray/cloudpickle/__pycache__/py_pickle.cpython-311.pyc +0 -0
  31. .venv/lib/python3.11/site-packages/ray/cloudpickle/cloudpickle.py +1487 -0
  32. .venv/lib/python3.11/site-packages/ray/cloudpickle/cloudpickle_fast.py +13 -0
  33. .venv/lib/python3.11/site-packages/ray/cloudpickle/compat.py +19 -0
  34. .venv/lib/python3.11/site-packages/ray/cloudpickle/py_pickle.py +30 -0
  35. .venv/lib/python3.11/site-packages/ray/scripts/__init__.py +0 -0
  36. .venv/lib/python3.11/site-packages/ray/scripts/__pycache__/__init__.cpython-311.pyc +0 -0
  37. .venv/lib/python3.11/site-packages/ray/scripts/scripts.py +2695 -0
  38. .venv/lib/python3.11/site-packages/ray/thirdparty_files/colorama-0.4.6.dist-info/METADATA +441 -0
  39. .venv/lib/python3.11/site-packages/ray/thirdparty_files/colorama-0.4.6.dist-info/RECORD +32 -0
  40. .venv/lib/python3.11/site-packages/ray/thirdparty_files/colorama-0.4.6.dist-info/REQUESTED +0 -0
  41. .venv/lib/python3.11/site-packages/ray/thirdparty_files/colorama-0.4.6.dist-info/WHEEL +5 -0
  42. .venv/lib/python3.11/site-packages/ray/thirdparty_files/colorama-0.4.6.dist-info/licenses/LICENSE.txt +27 -0
  43. .venv/lib/python3.11/site-packages/ray/thirdparty_files/psutil/_psutil_linux.abi3.so +3 -0
  44. .venv/lib/python3.11/site-packages/ray/thirdparty_files/setproctitle-1.2.2.dist-info/METADATA +327 -0
  45. .venv/lib/python3.11/site-packages/ray/thirdparty_files/setproctitle-1.2.2.dist-info/RECORD +7 -0
  46. .venv/lib/python3.11/site-packages/yaml/__init__.py +390 -0
  47. .venv/lib/python3.11/site-packages/yaml/__pycache__/composer.cpython-311.pyc +0 -0
  48. .venv/lib/python3.11/site-packages/yaml/__pycache__/constructor.cpython-311.pyc +0 -0
  49. .venv/lib/python3.11/site-packages/yaml/__pycache__/cyaml.cpython-311.pyc +0 -0
  50. .venv/lib/python3.11/site-packages/yaml/__pycache__/dumper.cpython-311.pyc +0 -0
.gitattributes CHANGED
@@ -180,3 +180,4 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/torch/_inductor/_
180
  .venv/lib/python3.11/site-packages/ray/rllib/algorithms/__pycache__/algorithm_config.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
181
  .venv/lib/python3.11/site-packages/ray/rllib/env/__pycache__/multi_agent_episode.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
182
  .venv/lib/python3.11/site-packages/ray/tune/execution/__pycache__/tune_controller.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
 
 
180
  .venv/lib/python3.11/site-packages/ray/rllib/algorithms/__pycache__/algorithm_config.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
181
  .venv/lib/python3.11/site-packages/ray/rllib/env/__pycache__/multi_agent_episode.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
182
  .venv/lib/python3.11/site-packages/ray/tune/execution/__pycache__/tune_controller.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
183
+ .venv/lib/python3.11/site-packages/ray/thirdparty_files/psutil/_psutil_linux.abi3.so filter=lfs diff=lfs merge=lfs -text
.venv/lib/python3.11/site-packages/_pytest/_argcomplete.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Allow bash-completion for argparse with argcomplete if installed.
2
+
3
+ Needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
4
+ to find the magic string, so _ARGCOMPLETE env. var is never set, and
5
+ this does not need special code).
6
+
7
+ Function try_argcomplete(parser) should be called directly before
8
+ the call to ArgumentParser.parse_args().
9
+
10
+ The filescompleter is what you normally would use on the positional
11
+ arguments specification, in order to get "dirname/" after "dirn<TAB>"
12
+ instead of the default "dirname ":
13
+
14
+ optparser.add_argument(Config._file_or_dir, nargs='*').completer=filescompleter
15
+
16
+ Other, application specific, completers should go in the file
17
+ doing the add_argument calls as they need to be specified as .completer
18
+ attributes as well. (If argcomplete is not installed, the function the
19
+ attribute points to will not be used).
20
+
21
+ SPEEDUP
22
+ =======
23
+
24
+ The generic argcomplete script for bash-completion
25
+ (/etc/bash_completion.d/python-argcomplete.sh)
26
+ uses a python program to determine startup script generated by pip.
27
+ You can speed up completion somewhat by changing this script to include
28
+ # PYTHON_ARGCOMPLETE_OK
29
+ so the python-argcomplete-check-easy-install-script does not
30
+ need to be called to find the entry point of the code and see if that is
31
+ marked with PYTHON_ARGCOMPLETE_OK.
32
+
33
+ INSTALL/DEBUGGING
34
+ =================
35
+
36
+ To include this support in another application that has setup.py generated
37
+ scripts:
38
+
39
+ - Add the line:
40
+ # PYTHON_ARGCOMPLETE_OK
41
+ near the top of the main python entry point.
42
+
43
+ - Include in the file calling parse_args():
44
+ from _argcomplete import try_argcomplete, filescompleter
45
+ Call try_argcomplete just before parse_args(), and optionally add
46
+ filescompleter to the positional arguments' add_argument().
47
+
48
+ If things do not work right away:
49
+
50
+ - Switch on argcomplete debugging with (also helpful when doing custom
51
+ completers):
52
+ export _ARC_DEBUG=1
53
+
54
+ - Run:
55
+ python-argcomplete-check-easy-install-script $(which appname)
56
+ echo $?
57
+ will echo 0 if the magic line has been found, 1 if not.
58
+
59
+ - Sometimes it helps to find early on errors using:
60
+ _ARGCOMPLETE=1 _ARC_DEBUG=1 appname
61
+ which should throw a KeyError: 'COMPLINE' (which is properly set by the
62
+ global argcomplete script).
63
+ """
64
+
65
+ from __future__ import annotations
66
+
67
+ import argparse
68
+ from glob import glob
69
+ import os
70
+ import sys
71
+ from typing import Any
72
+
73
+
74
+ class FastFilesCompleter:
75
+ """Fast file completer class."""
76
+
77
+ def __init__(self, directories: bool = True) -> None:
78
+ self.directories = directories
79
+
80
+ def __call__(self, prefix: str, **kwargs: Any) -> list[str]:
81
+ # Only called on non option completions.
82
+ if os.sep in prefix[1:]:
83
+ prefix_dir = len(os.path.dirname(prefix) + os.sep)
84
+ else:
85
+ prefix_dir = 0
86
+ completion = []
87
+ globbed = []
88
+ if "*" not in prefix and "?" not in prefix:
89
+ # We are on unix, otherwise no bash.
90
+ if not prefix or prefix[-1] == os.sep:
91
+ globbed.extend(glob(prefix + ".*"))
92
+ prefix += "*"
93
+ globbed.extend(glob(prefix))
94
+ for x in sorted(globbed):
95
+ if os.path.isdir(x):
96
+ x += "/"
97
+ # Append stripping the prefix (like bash, not like compgen).
98
+ completion.append(x[prefix_dir:])
99
+ return completion
100
+
101
+
102
+ if os.environ.get("_ARGCOMPLETE"):
103
+ try:
104
+ import argcomplete.completers
105
+ except ImportError:
106
+ sys.exit(-1)
107
+ filescompleter: FastFilesCompleter | None = FastFilesCompleter()
108
+
109
+ def try_argcomplete(parser: argparse.ArgumentParser) -> None:
110
+ argcomplete.autocomplete(parser, always_complete_options=False)
111
+
112
+ else:
113
+
114
+ def try_argcomplete(parser: argparse.ArgumentParser) -> None:
115
+ pass
116
+
117
+ filescompleter = None
.venv/lib/python3.11/site-packages/_pytest/cacheprovider.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Implementation of the cache provider."""
3
+
4
+ # This plugin was not named "cache" to avoid conflicts with the external
5
+ # pytest-cache version.
6
+ from __future__ import annotations
7
+
8
+ import dataclasses
9
+ import errno
10
+ import json
11
+ import os
12
+ from pathlib import Path
13
+ import tempfile
14
+ from typing import final
15
+ from typing import Generator
16
+ from typing import Iterable
17
+
18
+ from .pathlib import resolve_from_str
19
+ from .pathlib import rm_rf
20
+ from .reports import CollectReport
21
+ from _pytest import nodes
22
+ from _pytest._io import TerminalWriter
23
+ from _pytest.config import Config
24
+ from _pytest.config import ExitCode
25
+ from _pytest.config import hookimpl
26
+ from _pytest.config.argparsing import Parser
27
+ from _pytest.deprecated import check_ispytest
28
+ from _pytest.fixtures import fixture
29
+ from _pytest.fixtures import FixtureRequest
30
+ from _pytest.main import Session
31
+ from _pytest.nodes import Directory
32
+ from _pytest.nodes import File
33
+ from _pytest.reports import TestReport
34
+
35
+
36
+ README_CONTENT = """\
37
+ # pytest cache directory #
38
+
39
+ This directory contains data from the pytest's cache plugin,
40
+ which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
41
+
42
+ **Do not** commit this to version control.
43
+
44
+ See [the docs](https://docs.pytest.org/en/stable/how-to/cache.html) for more information.
45
+ """
46
+
47
+ CACHEDIR_TAG_CONTENT = b"""\
48
+ Signature: 8a477f597d28d172789f06886806bc55
49
+ # This file is a cache directory tag created by pytest.
50
+ # For information about cache directory tags, see:
51
+ # https://bford.info/cachedir/spec.html
52
+ """
53
+
54
+
55
+ @final
56
+ @dataclasses.dataclass
57
+ class Cache:
58
+ """Instance of the `cache` fixture."""
59
+
60
+ _cachedir: Path = dataclasses.field(repr=False)
61
+ _config: Config = dataclasses.field(repr=False)
62
+
63
+ # Sub-directory under cache-dir for directories created by `mkdir()`.
64
+ _CACHE_PREFIX_DIRS = "d"
65
+
66
+ # Sub-directory under cache-dir for values created by `set()`.
67
+ _CACHE_PREFIX_VALUES = "v"
68
+
69
+ def __init__(
70
+ self, cachedir: Path, config: Config, *, _ispytest: bool = False
71
+ ) -> None:
72
+ check_ispytest(_ispytest)
73
+ self._cachedir = cachedir
74
+ self._config = config
75
+
76
+ @classmethod
77
+ def for_config(cls, config: Config, *, _ispytest: bool = False) -> Cache:
78
+ """Create the Cache instance for a Config.
79
+
80
+ :meta private:
81
+ """
82
+ check_ispytest(_ispytest)
83
+ cachedir = cls.cache_dir_from_config(config, _ispytest=True)
84
+ if config.getoption("cacheclear") and cachedir.is_dir():
85
+ cls.clear_cache(cachedir, _ispytest=True)
86
+ return cls(cachedir, config, _ispytest=True)
87
+
88
+ @classmethod
89
+ def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None:
90
+ """Clear the sub-directories used to hold cached directories and values.
91
+
92
+ :meta private:
93
+ """
94
+ check_ispytest(_ispytest)
95
+ for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES):
96
+ d = cachedir / prefix
97
+ if d.is_dir():
98
+ rm_rf(d)
99
+
100
+ @staticmethod
101
+ def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path:
102
+ """Get the path to the cache directory for a Config.
103
+
104
+ :meta private:
105
+ """
106
+ check_ispytest(_ispytest)
107
+ return resolve_from_str(config.getini("cache_dir"), config.rootpath)
108
+
109
+ def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None:
110
+ """Issue a cache warning.
111
+
112
+ :meta private:
113
+ """
114
+ check_ispytest(_ispytest)
115
+ import warnings
116
+
117
+ from _pytest.warning_types import PytestCacheWarning
118
+
119
+ warnings.warn(
120
+ PytestCacheWarning(fmt.format(**args) if args else fmt),
121
+ self._config.hook,
122
+ stacklevel=3,
123
+ )
124
+
125
+ def _mkdir(self, path: Path) -> None:
126
+ self._ensure_cache_dir_and_supporting_files()
127
+ path.mkdir(exist_ok=True, parents=True)
128
+
129
+ def mkdir(self, name: str) -> Path:
130
+ """Return a directory path object with the given name.
131
+
132
+ If the directory does not yet exist, it will be created. You can use
133
+ it to manage files to e.g. store/retrieve database dumps across test
134
+ sessions.
135
+
136
+ .. versionadded:: 7.0
137
+
138
+ :param name:
139
+ Must be a string not containing a ``/`` separator.
140
+ Make sure the name contains your plugin or application
141
+ identifiers to prevent clashes with other cache users.
142
+ """
143
+ path = Path(name)
144
+ if len(path.parts) > 1:
145
+ raise ValueError("name is not allowed to contain path separators")
146
+ res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path)
147
+ self._mkdir(res)
148
+ return res
149
+
150
+ def _getvaluepath(self, key: str) -> Path:
151
+ return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key))
152
+
153
+ def get(self, key: str, default):
154
+ """Return the cached value for the given key.
155
+
156
+ If no value was yet cached or the value cannot be read, the specified
157
+ default is returned.
158
+
159
+ :param key:
160
+ Must be a ``/`` separated value. Usually the first
161
+ name is the name of your plugin or your application.
162
+ :param default:
163
+ The value to return in case of a cache-miss or invalid cache value.
164
+ """
165
+ path = self._getvaluepath(key)
166
+ try:
167
+ with path.open("r", encoding="UTF-8") as f:
168
+ return json.load(f)
169
+ except (ValueError, OSError):
170
+ return default
171
+
172
+ def set(self, key: str, value: object) -> None:
173
+ """Save value for the given key.
174
+
175
+ :param key:
176
+ Must be a ``/`` separated value. Usually the first
177
+ name is the name of your plugin or your application.
178
+ :param value:
179
+ Must be of any combination of basic python types,
180
+ including nested types like lists of dictionaries.
181
+ """
182
+ path = self._getvaluepath(key)
183
+ try:
184
+ self._mkdir(path.parent)
185
+ except OSError as exc:
186
+ self.warn(
187
+ f"could not create cache path {path}: {exc}",
188
+ _ispytest=True,
189
+ )
190
+ return
191
+ data = json.dumps(value, ensure_ascii=False, indent=2)
192
+ try:
193
+ f = path.open("w", encoding="UTF-8")
194
+ except OSError as exc:
195
+ self.warn(
196
+ f"cache could not write path {path}: {exc}",
197
+ _ispytest=True,
198
+ )
199
+ else:
200
+ with f:
201
+ f.write(data)
202
+
203
+ def _ensure_cache_dir_and_supporting_files(self) -> None:
204
+ """Create the cache dir and its supporting files."""
205
+ if self._cachedir.is_dir():
206
+ return
207
+
208
+ self._cachedir.parent.mkdir(parents=True, exist_ok=True)
209
+ with tempfile.TemporaryDirectory(
210
+ prefix="pytest-cache-files-",
211
+ dir=self._cachedir.parent,
212
+ ) as newpath:
213
+ path = Path(newpath)
214
+
215
+ # Reset permissions to the default, see #12308.
216
+ # Note: there's no way to get the current umask atomically, eek.
217
+ umask = os.umask(0o022)
218
+ os.umask(umask)
219
+ path.chmod(0o777 - umask)
220
+
221
+ with open(path.joinpath("README.md"), "x", encoding="UTF-8") as f:
222
+ f.write(README_CONTENT)
223
+ with open(path.joinpath(".gitignore"), "x", encoding="UTF-8") as f:
224
+ f.write("# Created by pytest automatically.\n*\n")
225
+ with open(path.joinpath("CACHEDIR.TAG"), "xb") as f:
226
+ f.write(CACHEDIR_TAG_CONTENT)
227
+
228
+ try:
229
+ path.rename(self._cachedir)
230
+ except OSError as e:
231
+ # If 2 concurrent pytests both race to the rename, the loser
232
+ # gets "Directory not empty" from the rename. In this case,
233
+ # everything is handled so just continue (while letting the
234
+ # temporary directory be cleaned up).
235
+ # On Windows, the error is a FileExistsError which translates to EEXIST.
236
+ if e.errno not in (errno.ENOTEMPTY, errno.EEXIST):
237
+ raise
238
+ else:
239
+ # Create a directory in place of the one we just moved so that
240
+ # `TemporaryDirectory`'s cleanup doesn't complain.
241
+ #
242
+ # TODO: pass ignore_cleanup_errors=True when we no longer support python < 3.10.
243
+ # See https://github.com/python/cpython/issues/74168. Note that passing
244
+ # delete=False would do the wrong thing in case of errors and isn't supported
245
+ # until python 3.12.
246
+ path.mkdir()
247
+
248
+
249
+ class LFPluginCollWrapper:
250
+ def __init__(self, lfplugin: LFPlugin) -> None:
251
+ self.lfplugin = lfplugin
252
+ self._collected_at_least_one_failure = False
253
+
254
+ @hookimpl(wrapper=True)
255
+ def pytest_make_collect_report(
256
+ self, collector: nodes.Collector
257
+ ) -> Generator[None, CollectReport, CollectReport]:
258
+ res = yield
259
+ if isinstance(collector, (Session, Directory)):
260
+ # Sort any lf-paths to the beginning.
261
+ lf_paths = self.lfplugin._last_failed_paths
262
+
263
+ # Use stable sort to prioritize last failed.
264
+ def sort_key(node: nodes.Item | nodes.Collector) -> bool:
265
+ return node.path in lf_paths
266
+
267
+ res.result = sorted(
268
+ res.result,
269
+ key=sort_key,
270
+ reverse=True,
271
+ )
272
+
273
+ elif isinstance(collector, File):
274
+ if collector.path in self.lfplugin._last_failed_paths:
275
+ result = res.result
276
+ lastfailed = self.lfplugin.lastfailed
277
+
278
+ # Only filter with known failures.
279
+ if not self._collected_at_least_one_failure:
280
+ if not any(x.nodeid in lastfailed for x in result):
281
+ return res
282
+ self.lfplugin.config.pluginmanager.register(
283
+ LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip"
284
+ )
285
+ self._collected_at_least_one_failure = True
286
+
287
+ session = collector.session
288
+ result[:] = [
289
+ x
290
+ for x in result
291
+ if x.nodeid in lastfailed
292
+ # Include any passed arguments (not trivial to filter).
293
+ or session.isinitpath(x.path)
294
+ # Keep all sub-collectors.
295
+ or isinstance(x, nodes.Collector)
296
+ ]
297
+
298
+ return res
299
+
300
+
301
+ class LFPluginCollSkipfiles:
302
+ def __init__(self, lfplugin: LFPlugin) -> None:
303
+ self.lfplugin = lfplugin
304
+
305
+ @hookimpl
306
+ def pytest_make_collect_report(
307
+ self, collector: nodes.Collector
308
+ ) -> CollectReport | None:
309
+ if isinstance(collector, File):
310
+ if collector.path not in self.lfplugin._last_failed_paths:
311
+ self.lfplugin._skipped_files += 1
312
+
313
+ return CollectReport(
314
+ collector.nodeid, "passed", longrepr=None, result=[]
315
+ )
316
+ return None
317
+
318
+
319
+ class LFPlugin:
320
+ """Plugin which implements the --lf (run last-failing) option."""
321
+
322
+ def __init__(self, config: Config) -> None:
323
+ self.config = config
324
+ active_keys = "lf", "failedfirst"
325
+ self.active = any(config.getoption(key) for key in active_keys)
326
+ assert config.cache
327
+ self.lastfailed: dict[str, bool] = config.cache.get("cache/lastfailed", {})
328
+ self._previously_failed_count: int | None = None
329
+ self._report_status: str | None = None
330
+ self._skipped_files = 0 # count skipped files during collection due to --lf
331
+
332
+ if config.getoption("lf"):
333
+ self._last_failed_paths = self.get_last_failed_paths()
334
+ config.pluginmanager.register(
335
+ LFPluginCollWrapper(self), "lfplugin-collwrapper"
336
+ )
337
+
338
+ def get_last_failed_paths(self) -> set[Path]:
339
+ """Return a set with all Paths of the previously failed nodeids and
340
+ their parents."""
341
+ rootpath = self.config.rootpath
342
+ result = set()
343
+ for nodeid in self.lastfailed:
344
+ path = rootpath / nodeid.split("::")[0]
345
+ result.add(path)
346
+ result.update(path.parents)
347
+ return {x for x in result if x.exists()}
348
+
349
+ def pytest_report_collectionfinish(self) -> str | None:
350
+ if self.active and self.config.get_verbosity() >= 0:
351
+ return f"run-last-failure: {self._report_status}"
352
+ return None
353
+
354
+ def pytest_runtest_logreport(self, report: TestReport) -> None:
355
+ if (report.when == "call" and report.passed) or report.skipped:
356
+ self.lastfailed.pop(report.nodeid, None)
357
+ elif report.failed:
358
+ self.lastfailed[report.nodeid] = True
359
+
360
+ def pytest_collectreport(self, report: CollectReport) -> None:
361
+ passed = report.outcome in ("passed", "skipped")
362
+ if passed:
363
+ if report.nodeid in self.lastfailed:
364
+ self.lastfailed.pop(report.nodeid)
365
+ self.lastfailed.update((item.nodeid, True) for item in report.result)
366
+ else:
367
+ self.lastfailed[report.nodeid] = True
368
+
369
+ @hookimpl(wrapper=True, tryfirst=True)
370
+ def pytest_collection_modifyitems(
371
+ self, config: Config, items: list[nodes.Item]
372
+ ) -> Generator[None]:
373
+ res = yield
374
+
375
+ if not self.active:
376
+ return res
377
+
378
+ if self.lastfailed:
379
+ previously_failed = []
380
+ previously_passed = []
381
+ for item in items:
382
+ if item.nodeid in self.lastfailed:
383
+ previously_failed.append(item)
384
+ else:
385
+ previously_passed.append(item)
386
+ self._previously_failed_count = len(previously_failed)
387
+
388
+ if not previously_failed:
389
+ # Running a subset of all tests with recorded failures
390
+ # only outside of it.
391
+ self._report_status = "%d known failures not in selected tests" % (
392
+ len(self.lastfailed),
393
+ )
394
+ else:
395
+ if self.config.getoption("lf"):
396
+ items[:] = previously_failed
397
+ config.hook.pytest_deselected(items=previously_passed)
398
+ else: # --failedfirst
399
+ items[:] = previously_failed + previously_passed
400
+
401
+ noun = "failure" if self._previously_failed_count == 1 else "failures"
402
+ suffix = " first" if self.config.getoption("failedfirst") else ""
403
+ self._report_status = (
404
+ f"rerun previous {self._previously_failed_count} {noun}{suffix}"
405
+ )
406
+
407
+ if self._skipped_files > 0:
408
+ files_noun = "file" if self._skipped_files == 1 else "files"
409
+ self._report_status += f" (skipped {self._skipped_files} {files_noun})"
410
+ else:
411
+ self._report_status = "no previously failed tests, "
412
+ if self.config.getoption("last_failed_no_failures") == "none":
413
+ self._report_status += "deselecting all items."
414
+ config.hook.pytest_deselected(items=items[:])
415
+ items[:] = []
416
+ else:
417
+ self._report_status += "not deselecting items."
418
+
419
+ return res
420
+
421
+ def pytest_sessionfinish(self, session: Session) -> None:
422
+ config = self.config
423
+ if config.getoption("cacheshow") or hasattr(config, "workerinput"):
424
+ return
425
+
426
+ assert config.cache is not None
427
+ saved_lastfailed = config.cache.get("cache/lastfailed", {})
428
+ if saved_lastfailed != self.lastfailed:
429
+ config.cache.set("cache/lastfailed", self.lastfailed)
430
+
431
+
432
+ class NFPlugin:
433
+ """Plugin which implements the --nf (run new-first) option."""
434
+
435
+ def __init__(self, config: Config) -> None:
436
+ self.config = config
437
+ self.active = config.option.newfirst
438
+ assert config.cache is not None
439
+ self.cached_nodeids = set(config.cache.get("cache/nodeids", []))
440
+
441
+ @hookimpl(wrapper=True, tryfirst=True)
442
+ def pytest_collection_modifyitems(self, items: list[nodes.Item]) -> Generator[None]:
443
+ res = yield
444
+
445
+ if self.active:
446
+ new_items: dict[str, nodes.Item] = {}
447
+ other_items: dict[str, nodes.Item] = {}
448
+ for item in items:
449
+ if item.nodeid not in self.cached_nodeids:
450
+ new_items[item.nodeid] = item
451
+ else:
452
+ other_items[item.nodeid] = item
453
+
454
+ items[:] = self._get_increasing_order(
455
+ new_items.values()
456
+ ) + self._get_increasing_order(other_items.values())
457
+ self.cached_nodeids.update(new_items)
458
+ else:
459
+ self.cached_nodeids.update(item.nodeid for item in items)
460
+
461
+ return res
462
+
463
+ def _get_increasing_order(self, items: Iterable[nodes.Item]) -> list[nodes.Item]:
464
+ return sorted(items, key=lambda item: item.path.stat().st_mtime, reverse=True)
465
+
466
+ def pytest_sessionfinish(self) -> None:
467
+ config = self.config
468
+ if config.getoption("cacheshow") or hasattr(config, "workerinput"):
469
+ return
470
+
471
+ if config.getoption("collectonly"):
472
+ return
473
+
474
+ assert config.cache is not None
475
+ config.cache.set("cache/nodeids", sorted(self.cached_nodeids))
476
+
477
+
478
+ def pytest_addoption(parser: Parser) -> None:
479
+ group = parser.getgroup("general")
480
+ group.addoption(
481
+ "--lf",
482
+ "--last-failed",
483
+ action="store_true",
484
+ dest="lf",
485
+ help="Rerun only the tests that failed "
486
+ "at the last run (or all if none failed)",
487
+ )
488
+ group.addoption(
489
+ "--ff",
490
+ "--failed-first",
491
+ action="store_true",
492
+ dest="failedfirst",
493
+ help="Run all tests, but run the last failures first. "
494
+ "This may re-order tests and thus lead to "
495
+ "repeated fixture setup/teardown.",
496
+ )
497
+ group.addoption(
498
+ "--nf",
499
+ "--new-first",
500
+ action="store_true",
501
+ dest="newfirst",
502
+ help="Run tests from new files first, then the rest of the tests "
503
+ "sorted by file mtime",
504
+ )
505
+ group.addoption(
506
+ "--cache-show",
507
+ action="append",
508
+ nargs="?",
509
+ dest="cacheshow",
510
+ help=(
511
+ "Show cache contents, don't perform collection or tests. "
512
+ "Optional argument: glob (default: '*')."
513
+ ),
514
+ )
515
+ group.addoption(
516
+ "--cache-clear",
517
+ action="store_true",
518
+ dest="cacheclear",
519
+ help="Remove all cache contents at start of test run",
520
+ )
521
+ cache_dir_default = ".pytest_cache"
522
+ if "TOX_ENV_DIR" in os.environ:
523
+ cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default)
524
+ parser.addini("cache_dir", default=cache_dir_default, help="Cache directory path")
525
+ group.addoption(
526
+ "--lfnf",
527
+ "--last-failed-no-failures",
528
+ action="store",
529
+ dest="last_failed_no_failures",
530
+ choices=("all", "none"),
531
+ default="all",
532
+ help="With ``--lf``, determines whether to execute tests when there "
533
+ "are no previously (known) failures or when no "
534
+ "cached ``lastfailed`` data was found. "
535
+ "``all`` (the default) runs the full test suite again. "
536
+ "``none`` just emits a message about no known failures and exits successfully.",
537
+ )
538
+
539
+
540
+ def pytest_cmdline_main(config: Config) -> int | ExitCode | None:
541
+ if config.option.cacheshow and not config.option.help:
542
+ from _pytest.main import wrap_session
543
+
544
+ return wrap_session(config, cacheshow)
545
+ return None
546
+
547
+
548
+ @hookimpl(tryfirst=True)
549
+ def pytest_configure(config: Config) -> None:
550
+ config.cache = Cache.for_config(config, _ispytest=True)
551
+ config.pluginmanager.register(LFPlugin(config), "lfplugin")
552
+ config.pluginmanager.register(NFPlugin(config), "nfplugin")
553
+
554
+
555
+ @fixture
556
+ def cache(request: FixtureRequest) -> Cache:
557
+ """Return a cache object that can persist state between testing sessions.
558
+
559
+ cache.get(key, default)
560
+ cache.set(key, value)
561
+
562
+ Keys must be ``/`` separated strings, where the first part is usually the
563
+ name of your plugin or application to avoid clashes with other cache users.
564
+
565
+ Values can be any object handled by the json stdlib module.
566
+ """
567
+ assert request.config.cache is not None
568
+ return request.config.cache
569
+
570
+
571
+ def pytest_report_header(config: Config) -> str | None:
572
+ """Display cachedir with --cache-show and if non-default."""
573
+ if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache":
574
+ assert config.cache is not None
575
+ cachedir = config.cache._cachedir
576
+ # TODO: evaluate generating upward relative paths
577
+ # starting with .., ../.. if sensible
578
+
579
+ try:
580
+ displaypath = cachedir.relative_to(config.rootpath)
581
+ except ValueError:
582
+ displaypath = cachedir
583
+ return f"cachedir: {displaypath}"
584
+ return None
585
+
586
+
587
+ def cacheshow(config: Config, session: Session) -> int:
588
+ from pprint import pformat
589
+
590
+ assert config.cache is not None
591
+
592
+ tw = TerminalWriter()
593
+ tw.line("cachedir: " + str(config.cache._cachedir))
594
+ if not config.cache._cachedir.is_dir():
595
+ tw.line("cache is empty")
596
+ return 0
597
+
598
+ glob = config.option.cacheshow[0]
599
+ if glob is None:
600
+ glob = "*"
601
+
602
+ dummy = object()
603
+ basedir = config.cache._cachedir
604
+ vdir = basedir / Cache._CACHE_PREFIX_VALUES
605
+ tw.sep("-", f"cache values for {glob!r}")
606
+ for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()):
607
+ key = str(valpath.relative_to(vdir))
608
+ val = config.cache.get(key, dummy)
609
+ if val is dummy:
610
+ tw.line(f"{key} contains unreadable content, will be ignored")
611
+ else:
612
+ tw.line(f"{key} contains:")
613
+ for line in pformat(val).splitlines():
614
+ tw.line(" " + line)
615
+
616
+ ddir = basedir / Cache._CACHE_PREFIX_DIRS
617
+ if ddir.is_dir():
618
+ contents = sorted(ddir.rglob(glob))
619
+ tw.sep("-", f"cache directories for {glob!r}")
620
+ for p in contents:
621
+ # if p.is_dir():
622
+ # print("%s/" % p.relative_to(basedir))
623
+ if p.is_file():
624
+ key = str(p.relative_to(basedir))
625
+ tw.line(f"{key} is a file of length {p.stat().st_size:d}")
626
+ return 0
.venv/lib/python3.11/site-packages/_pytest/compat.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Python version compatibility code."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ import enum
8
+ import functools
9
+ import inspect
10
+ from inspect import Parameter
11
+ from inspect import signature
12
+ import os
13
+ from pathlib import Path
14
+ import sys
15
+ from typing import Any
16
+ from typing import Callable
17
+ from typing import Final
18
+ from typing import NoReturn
19
+
20
+ import py
21
+
22
+
23
+ #: constant to prepare valuing pylib path replacements/lazy proxies later on
24
+ # intended for removal in pytest 8.0 or 9.0
25
+
26
+ # fmt: off
27
+ # intentional space to create a fake difference for the verification
28
+ LEGACY_PATH = py.path. local
29
+ # fmt: on
30
+
31
+
32
+ def legacy_path(path: str | os.PathLike[str]) -> LEGACY_PATH:
33
+ """Internal wrapper to prepare lazy proxies for legacy_path instances"""
34
+ return LEGACY_PATH(path)
35
+
36
+
37
+ # fmt: off
38
+ # Singleton type for NOTSET, as described in:
39
+ # https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions
40
+ class NotSetType(enum.Enum):
41
+ token = 0
42
+ NOTSET: Final = NotSetType.token
43
+ # fmt: on
44
+
45
+
46
+ def is_generator(func: object) -> bool:
47
+ genfunc = inspect.isgeneratorfunction(func)
48
+ return genfunc and not iscoroutinefunction(func)
49
+
50
+
51
+ def iscoroutinefunction(func: object) -> bool:
52
+ """Return True if func is a coroutine function (a function defined with async
53
+ def syntax, and doesn't contain yield), or a function decorated with
54
+ @asyncio.coroutine.
55
+
56
+ Note: copied and modified from Python 3.5's builtin coroutines.py to avoid
57
+ importing asyncio directly, which in turns also initializes the "logging"
58
+ module as a side-effect (see issue #8).
59
+ """
60
+ return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
61
+
62
+
63
+ def is_async_function(func: object) -> bool:
64
+ """Return True if the given function seems to be an async function or
65
+ an async generator."""
66
+ return iscoroutinefunction(func) or inspect.isasyncgenfunction(func)
67
+
68
+
69
+ def getlocation(function, curdir: str | os.PathLike[str] | None = None) -> str:
70
+ function = get_real_func(function)
71
+ fn = Path(inspect.getfile(function))
72
+ lineno = function.__code__.co_firstlineno
73
+ if curdir is not None:
74
+ try:
75
+ relfn = fn.relative_to(curdir)
76
+ except ValueError:
77
+ pass
78
+ else:
79
+ return "%s:%d" % (relfn, lineno + 1)
80
+ return "%s:%d" % (fn, lineno + 1)
81
+
82
+
83
+ def num_mock_patch_args(function) -> int:
84
+ """Return number of arguments used up by mock arguments (if any)."""
85
+ patchings = getattr(function, "patchings", None)
86
+ if not patchings:
87
+ return 0
88
+
89
+ mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object())
90
+ ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object())
91
+
92
+ return len(
93
+ [
94
+ p
95
+ for p in patchings
96
+ if not p.attribute_name
97
+ and (p.new is mock_sentinel or p.new is ut_mock_sentinel)
98
+ ]
99
+ )
100
+
101
+
102
+ def getfuncargnames(
103
+ function: Callable[..., object],
104
+ *,
105
+ name: str = "",
106
+ cls: type | None = None,
107
+ ) -> tuple[str, ...]:
108
+ """Return the names of a function's mandatory arguments.
109
+
110
+ Should return the names of all function arguments that:
111
+ * Aren't bound to an instance or type as in instance or class methods.
112
+ * Don't have default values.
113
+ * Aren't bound with functools.partial.
114
+ * Aren't replaced with mocks.
115
+
116
+ The cls arguments indicate that the function should be treated as a bound
117
+ method even though it's not unless the function is a static method.
118
+
119
+ The name parameter should be the original name in which the function was collected.
120
+ """
121
+ # TODO(RonnyPfannschmidt): This function should be refactored when we
122
+ # revisit fixtures. The fixture mechanism should ask the node for
123
+ # the fixture names, and not try to obtain directly from the
124
+ # function object well after collection has occurred.
125
+
126
+ # The parameters attribute of a Signature object contains an
127
+ # ordered mapping of parameter names to Parameter instances. This
128
+ # creates a tuple of the names of the parameters that don't have
129
+ # defaults.
130
+ try:
131
+ parameters = signature(function).parameters
132
+ except (ValueError, TypeError) as e:
133
+ from _pytest.outcomes import fail
134
+
135
+ fail(
136
+ f"Could not determine arguments of {function!r}: {e}",
137
+ pytrace=False,
138
+ )
139
+
140
+ arg_names = tuple(
141
+ p.name
142
+ for p in parameters.values()
143
+ if (
144
+ p.kind is Parameter.POSITIONAL_OR_KEYWORD
145
+ or p.kind is Parameter.KEYWORD_ONLY
146
+ )
147
+ and p.default is Parameter.empty
148
+ )
149
+ if not name:
150
+ name = function.__name__
151
+
152
+ # If this function should be treated as a bound method even though
153
+ # it's passed as an unbound method or function, remove the first
154
+ # parameter name.
155
+ if (
156
+ # Not using `getattr` because we don't want to resolve the staticmethod.
157
+ # Not using `cls.__dict__` because we want to check the entire MRO.
158
+ cls
159
+ and not isinstance(
160
+ inspect.getattr_static(cls, name, default=None), staticmethod
161
+ )
162
+ ):
163
+ arg_names = arg_names[1:]
164
+ # Remove any names that will be replaced with mocks.
165
+ if hasattr(function, "__wrapped__"):
166
+ arg_names = arg_names[num_mock_patch_args(function) :]
167
+ return arg_names
168
+
169
+
170
+ def get_default_arg_names(function: Callable[..., Any]) -> tuple[str, ...]:
171
+ # Note: this code intentionally mirrors the code at the beginning of
172
+ # getfuncargnames, to get the arguments which were excluded from its result
173
+ # because they had default values.
174
+ return tuple(
175
+ p.name
176
+ for p in signature(function).parameters.values()
177
+ if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
178
+ and p.default is not Parameter.empty
179
+ )
180
+
181
+
182
+ _non_printable_ascii_translate_table = {
183
+ i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127)
184
+ }
185
+ _non_printable_ascii_translate_table.update(
186
+ {ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
187
+ )
188
+
189
+
190
+ def ascii_escaped(val: bytes | str) -> str:
191
+ r"""If val is pure ASCII, return it as an str, otherwise, escape
192
+ bytes objects into a sequence of escaped bytes:
193
+
194
+ b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6'
195
+
196
+ and escapes strings into a sequence of escaped unicode ids, e.g.:
197
+
198
+ r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944'
199
+
200
+ Note:
201
+ The obvious "v.decode('unicode-escape')" will return
202
+ valid UTF-8 unicode if it finds them in bytes, but we
203
+ want to return escaped bytes for any byte, even if they match
204
+ a UTF-8 string.
205
+ """
206
+ if isinstance(val, bytes):
207
+ ret = val.decode("ascii", "backslashreplace")
208
+ else:
209
+ ret = val.encode("unicode_escape").decode("ascii")
210
+ return ret.translate(_non_printable_ascii_translate_table)
211
+
212
+
213
+ @dataclasses.dataclass
214
+ class _PytestWrapper:
215
+ """Dummy wrapper around a function object for internal use only.
216
+
217
+ Used to correctly unwrap the underlying function object when we are
218
+ creating fixtures, because we wrap the function object ourselves with a
219
+ decorator to issue warnings when the fixture function is called directly.
220
+ """
221
+
222
+ obj: Any
223
+
224
+
225
+ def get_real_func(obj):
226
+ """Get the real function object of the (possibly) wrapped object by
227
+ functools.wraps or functools.partial."""
228
+ start_obj = obj
229
+ for i in range(100):
230
+ # __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
231
+ # to trigger a warning if it gets called directly instead of by pytest: we don't
232
+ # want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
233
+ new_obj = getattr(obj, "__pytest_wrapped__", None)
234
+ if isinstance(new_obj, _PytestWrapper):
235
+ obj = new_obj.obj
236
+ break
237
+ new_obj = getattr(obj, "__wrapped__", None)
238
+ if new_obj is None:
239
+ break
240
+ obj = new_obj
241
+ else:
242
+ from _pytest._io.saferepr import saferepr
243
+
244
+ raise ValueError(
245
+ f"could not find real function of {saferepr(start_obj)}\nstopped at {saferepr(obj)}"
246
+ )
247
+ if isinstance(obj, functools.partial):
248
+ obj = obj.func
249
+ return obj
250
+
251
+
252
+ def get_real_method(obj, holder):
253
+ """Attempt to obtain the real function object that might be wrapping
254
+ ``obj``, while at the same time returning a bound method to ``holder`` if
255
+ the original object was a bound method."""
256
+ try:
257
+ is_method = hasattr(obj, "__func__")
258
+ obj = get_real_func(obj)
259
+ except Exception: # pragma: no cover
260
+ return obj
261
+ if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
262
+ obj = obj.__get__(holder)
263
+ return obj
264
+
265
+
266
+ def getimfunc(func):
267
+ try:
268
+ return func.__func__
269
+ except AttributeError:
270
+ return func
271
+
272
+
273
+ def safe_getattr(object: Any, name: str, default: Any) -> Any:
274
+ """Like getattr but return default upon any Exception or any OutcomeException.
275
+
276
+ Attribute access can potentially fail for 'evil' Python objects.
277
+ See issue #214.
278
+ It catches OutcomeException because of #2490 (issue #580), new outcomes
279
+ are derived from BaseException instead of Exception (for more details
280
+ check #2707).
281
+ """
282
+ from _pytest.outcomes import TEST_OUTCOME
283
+
284
+ try:
285
+ return getattr(object, name, default)
286
+ except TEST_OUTCOME:
287
+ return default
288
+
289
+
290
+ def safe_isclass(obj: object) -> bool:
291
+ """Ignore any exception via isinstance on Python 3."""
292
+ try:
293
+ return inspect.isclass(obj)
294
+ except Exception:
295
+ return False
296
+
297
+
298
+ def get_user_id() -> int | None:
299
+ """Return the current process's real user id or None if it could not be
300
+ determined.
301
+
302
+ :return: The user id or None if it could not be determined.
303
+ """
304
+ # mypy follows the version and platform checking expectation of PEP 484:
305
+ # https://mypy.readthedocs.io/en/stable/common_issues.html?highlight=platform#python-version-and-system-platform-checks
306
+ # Containment checks are too complex for mypy v1.5.0 and cause failure.
307
+ if sys.platform == "win32" or sys.platform == "emscripten":
308
+ # win32 does not have a getuid() function.
309
+ # Emscripten has a return 0 stub.
310
+ return None
311
+ else:
312
+ # On other platforms, a return value of -1 is assumed to indicate that
313
+ # the current process's real user id could not be determined.
314
+ ERROR = -1
315
+ uid = os.getuid()
316
+ return uid if uid != ERROR else None
317
+
318
+
319
+ # Perform exhaustiveness checking.
320
+ #
321
+ # Consider this example:
322
+ #
323
+ # MyUnion = Union[int, str]
324
+ #
325
+ # def handle(x: MyUnion) -> int {
326
+ # if isinstance(x, int):
327
+ # return 1
328
+ # elif isinstance(x, str):
329
+ # return 2
330
+ # else:
331
+ # raise Exception('unreachable')
332
+ #
333
+ # Now suppose we add a new variant:
334
+ #
335
+ # MyUnion = Union[int, str, bytes]
336
+ #
337
+ # After doing this, we must remember ourselves to go and update the handle
338
+ # function to handle the new variant.
339
+ #
340
+ # With `assert_never` we can do better:
341
+ #
342
+ # // raise Exception('unreachable')
343
+ # return assert_never(x)
344
+ #
345
+ # Now, if we forget to handle the new variant, the type-checker will emit a
346
+ # compile-time error, instead of the runtime error we would have gotten
347
+ # previously.
348
+ #
349
+ # This also work for Enums (if you use `is` to compare) and Literals.
350
+ def assert_never(value: NoReturn) -> NoReturn:
351
+ assert False, f"Unhandled value: {value} ({type(value).__name__})"
.venv/lib/python3.11/site-packages/_pytest/doctest.py ADDED
@@ -0,0 +1,755 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Discover and run doctests in modules and test files."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import bdb
7
+ from contextlib import contextmanager
8
+ import functools
9
+ import inspect
10
+ import os
11
+ from pathlib import Path
12
+ import platform
13
+ import sys
14
+ import traceback
15
+ import types
16
+ from typing import Any
17
+ from typing import Callable
18
+ from typing import Generator
19
+ from typing import Iterable
20
+ from typing import Pattern
21
+ from typing import Sequence
22
+ from typing import TYPE_CHECKING
23
+ import warnings
24
+
25
+ from _pytest import outcomes
26
+ from _pytest._code.code import ExceptionInfo
27
+ from _pytest._code.code import ReprFileLocation
28
+ from _pytest._code.code import TerminalRepr
29
+ from _pytest._io import TerminalWriter
30
+ from _pytest.compat import safe_getattr
31
+ from _pytest.config import Config
32
+ from _pytest.config.argparsing import Parser
33
+ from _pytest.fixtures import fixture
34
+ from _pytest.fixtures import TopRequest
35
+ from _pytest.nodes import Collector
36
+ from _pytest.nodes import Item
37
+ from _pytest.outcomes import OutcomeException
38
+ from _pytest.outcomes import skip
39
+ from _pytest.pathlib import fnmatch_ex
40
+ from _pytest.python import Module
41
+ from _pytest.python_api import approx
42
+ from _pytest.warning_types import PytestWarning
43
+
44
+
45
+ if TYPE_CHECKING:
46
+ import doctest
47
+
48
+ from typing_extensions import Self
49
+
50
+ DOCTEST_REPORT_CHOICE_NONE = "none"
51
+ DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
52
+ DOCTEST_REPORT_CHOICE_NDIFF = "ndiff"
53
+ DOCTEST_REPORT_CHOICE_UDIFF = "udiff"
54
+ DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure"
55
+
56
+ DOCTEST_REPORT_CHOICES = (
57
+ DOCTEST_REPORT_CHOICE_NONE,
58
+ DOCTEST_REPORT_CHOICE_CDIFF,
59
+ DOCTEST_REPORT_CHOICE_NDIFF,
60
+ DOCTEST_REPORT_CHOICE_UDIFF,
61
+ DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
62
+ )
63
+
64
+ # Lazy definition of runner class
65
+ RUNNER_CLASS = None
66
+ # Lazy definition of output checker class
67
+ CHECKER_CLASS: type[doctest.OutputChecker] | None = None
68
+
69
+
70
+ def pytest_addoption(parser: Parser) -> None:
71
+ parser.addini(
72
+ "doctest_optionflags",
73
+ "Option flags for doctests",
74
+ type="args",
75
+ default=["ELLIPSIS"],
76
+ )
77
+ parser.addini(
78
+ "doctest_encoding", "Encoding used for doctest files", default="utf-8"
79
+ )
80
+ group = parser.getgroup("collect")
81
+ group.addoption(
82
+ "--doctest-modules",
83
+ action="store_true",
84
+ default=False,
85
+ help="Run doctests in all .py modules",
86
+ dest="doctestmodules",
87
+ )
88
+ group.addoption(
89
+ "--doctest-report",
90
+ type=str.lower,
91
+ default="udiff",
92
+ help="Choose another output format for diffs on doctest failure",
93
+ choices=DOCTEST_REPORT_CHOICES,
94
+ dest="doctestreport",
95
+ )
96
+ group.addoption(
97
+ "--doctest-glob",
98
+ action="append",
99
+ default=[],
100
+ metavar="pat",
101
+ help="Doctests file matching pattern, default: test*.txt",
102
+ dest="doctestglob",
103
+ )
104
+ group.addoption(
105
+ "--doctest-ignore-import-errors",
106
+ action="store_true",
107
+ default=False,
108
+ help="Ignore doctest collection errors",
109
+ dest="doctest_ignore_import_errors",
110
+ )
111
+ group.addoption(
112
+ "--doctest-continue-on-failure",
113
+ action="store_true",
114
+ default=False,
115
+ help="For a given doctest, continue to run after the first failure",
116
+ dest="doctest_continue_on_failure",
117
+ )
118
+
119
+
120
+ def pytest_unconfigure() -> None:
121
+ global RUNNER_CLASS
122
+
123
+ RUNNER_CLASS = None
124
+
125
+
126
+ def pytest_collect_file(
127
+ file_path: Path,
128
+ parent: Collector,
129
+ ) -> DoctestModule | DoctestTextfile | None:
130
+ config = parent.config
131
+ if file_path.suffix == ".py":
132
+ if config.option.doctestmodules and not any(
133
+ (_is_setup_py(file_path), _is_main_py(file_path))
134
+ ):
135
+ return DoctestModule.from_parent(parent, path=file_path)
136
+ elif _is_doctest(config, file_path, parent):
137
+ return DoctestTextfile.from_parent(parent, path=file_path)
138
+ return None
139
+
140
+
141
+ def _is_setup_py(path: Path) -> bool:
142
+ if path.name != "setup.py":
143
+ return False
144
+ contents = path.read_bytes()
145
+ return b"setuptools" in contents or b"distutils" in contents
146
+
147
+
148
+ def _is_doctest(config: Config, path: Path, parent: Collector) -> bool:
149
+ if path.suffix in (".txt", ".rst") and parent.session.isinitpath(path):
150
+ return True
151
+ globs = config.getoption("doctestglob") or ["test*.txt"]
152
+ return any(fnmatch_ex(glob, path) for glob in globs)
153
+
154
+
155
+ def _is_main_py(path: Path) -> bool:
156
+ return path.name == "__main__.py"
157
+
158
+
159
+ class ReprFailDoctest(TerminalRepr):
160
+ def __init__(
161
+ self, reprlocation_lines: Sequence[tuple[ReprFileLocation, Sequence[str]]]
162
+ ) -> None:
163
+ self.reprlocation_lines = reprlocation_lines
164
+
165
+ def toterminal(self, tw: TerminalWriter) -> None:
166
+ for reprlocation, lines in self.reprlocation_lines:
167
+ for line in lines:
168
+ tw.line(line)
169
+ reprlocation.toterminal(tw)
170
+
171
+
172
+ class MultipleDoctestFailures(Exception):
173
+ def __init__(self, failures: Sequence[doctest.DocTestFailure]) -> None:
174
+ super().__init__()
175
+ self.failures = failures
176
+
177
+
178
+ def _init_runner_class() -> type[doctest.DocTestRunner]:
179
+ import doctest
180
+
181
+ class PytestDoctestRunner(doctest.DebugRunner):
182
+ """Runner to collect failures.
183
+
184
+ Note that the out variable in this case is a list instead of a
185
+ stdout-like object.
186
+ """
187
+
188
+ def __init__(
189
+ self,
190
+ checker: doctest.OutputChecker | None = None,
191
+ verbose: bool | None = None,
192
+ optionflags: int = 0,
193
+ continue_on_failure: bool = True,
194
+ ) -> None:
195
+ super().__init__(checker=checker, verbose=verbose, optionflags=optionflags)
196
+ self.continue_on_failure = continue_on_failure
197
+
198
+ def report_failure(
199
+ self,
200
+ out,
201
+ test: doctest.DocTest,
202
+ example: doctest.Example,
203
+ got: str,
204
+ ) -> None:
205
+ failure = doctest.DocTestFailure(test, example, got)
206
+ if self.continue_on_failure:
207
+ out.append(failure)
208
+ else:
209
+ raise failure
210
+
211
+ def report_unexpected_exception(
212
+ self,
213
+ out,
214
+ test: doctest.DocTest,
215
+ example: doctest.Example,
216
+ exc_info: tuple[type[BaseException], BaseException, types.TracebackType],
217
+ ) -> None:
218
+ if isinstance(exc_info[1], OutcomeException):
219
+ raise exc_info[1]
220
+ if isinstance(exc_info[1], bdb.BdbQuit):
221
+ outcomes.exit("Quitting debugger")
222
+ failure = doctest.UnexpectedException(test, example, exc_info)
223
+ if self.continue_on_failure:
224
+ out.append(failure)
225
+ else:
226
+ raise failure
227
+
228
+ return PytestDoctestRunner
229
+
230
+
231
+ def _get_runner(
232
+ checker: doctest.OutputChecker | None = None,
233
+ verbose: bool | None = None,
234
+ optionflags: int = 0,
235
+ continue_on_failure: bool = True,
236
+ ) -> doctest.DocTestRunner:
237
+ # We need this in order to do a lazy import on doctest
238
+ global RUNNER_CLASS
239
+ if RUNNER_CLASS is None:
240
+ RUNNER_CLASS = _init_runner_class()
241
+ # Type ignored because the continue_on_failure argument is only defined on
242
+ # PytestDoctestRunner, which is lazily defined so can't be used as a type.
243
+ return RUNNER_CLASS( # type: ignore
244
+ checker=checker,
245
+ verbose=verbose,
246
+ optionflags=optionflags,
247
+ continue_on_failure=continue_on_failure,
248
+ )
249
+
250
+
251
+ class DoctestItem(Item):
252
+ def __init__(
253
+ self,
254
+ name: str,
255
+ parent: DoctestTextfile | DoctestModule,
256
+ runner: doctest.DocTestRunner,
257
+ dtest: doctest.DocTest,
258
+ ) -> None:
259
+ super().__init__(name, parent)
260
+ self.runner = runner
261
+ self.dtest = dtest
262
+
263
+ # Stuff needed for fixture support.
264
+ self.obj = None
265
+ fm = self.session._fixturemanager
266
+ fixtureinfo = fm.getfixtureinfo(node=self, func=None, cls=None)
267
+ self._fixtureinfo = fixtureinfo
268
+ self.fixturenames = fixtureinfo.names_closure
269
+ self._initrequest()
270
+
271
+ @classmethod
272
+ def from_parent( # type: ignore[override]
273
+ cls,
274
+ parent: DoctestTextfile | DoctestModule,
275
+ *,
276
+ name: str,
277
+ runner: doctest.DocTestRunner,
278
+ dtest: doctest.DocTest,
279
+ ) -> Self:
280
+ # incompatible signature due to imposed limits on subclass
281
+ """The public named constructor."""
282
+ return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest)
283
+
284
+ def _initrequest(self) -> None:
285
+ self.funcargs: dict[str, object] = {}
286
+ self._request = TopRequest(self, _ispytest=True) # type: ignore[arg-type]
287
+
288
+ def setup(self) -> None:
289
+ self._request._fillfixtures()
290
+ globs = dict(getfixture=self._request.getfixturevalue)
291
+ for name, value in self._request.getfixturevalue("doctest_namespace").items():
292
+ globs[name] = value
293
+ self.dtest.globs.update(globs)
294
+
295
+ def runtest(self) -> None:
296
+ _check_all_skipped(self.dtest)
297
+ self._disable_output_capturing_for_darwin()
298
+ failures: list[doctest.DocTestFailure] = []
299
+ # Type ignored because we change the type of `out` from what
300
+ # doctest expects.
301
+ self.runner.run(self.dtest, out=failures) # type: ignore[arg-type]
302
+ if failures:
303
+ raise MultipleDoctestFailures(failures)
304
+
305
+ def _disable_output_capturing_for_darwin(self) -> None:
306
+ """Disable output capturing. Otherwise, stdout is lost to doctest (#985)."""
307
+ if platform.system() != "Darwin":
308
+ return
309
+ capman = self.config.pluginmanager.getplugin("capturemanager")
310
+ if capman:
311
+ capman.suspend_global_capture(in_=True)
312
+ out, err = capman.read_global_capture()
313
+ sys.stdout.write(out)
314
+ sys.stderr.write(err)
315
+
316
+ # TODO: Type ignored -- breaks Liskov Substitution.
317
+ def repr_failure( # type: ignore[override]
318
+ self,
319
+ excinfo: ExceptionInfo[BaseException],
320
+ ) -> str | TerminalRepr:
321
+ import doctest
322
+
323
+ failures: (
324
+ Sequence[doctest.DocTestFailure | doctest.UnexpectedException] | None
325
+ ) = None
326
+ if isinstance(
327
+ excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException)
328
+ ):
329
+ failures = [excinfo.value]
330
+ elif isinstance(excinfo.value, MultipleDoctestFailures):
331
+ failures = excinfo.value.failures
332
+
333
+ if failures is None:
334
+ return super().repr_failure(excinfo)
335
+
336
+ reprlocation_lines = []
337
+ for failure in failures:
338
+ example = failure.example
339
+ test = failure.test
340
+ filename = test.filename
341
+ if test.lineno is None:
342
+ lineno = None
343
+ else:
344
+ lineno = test.lineno + example.lineno + 1
345
+ message = type(failure).__name__
346
+ # TODO: ReprFileLocation doesn't expect a None lineno.
347
+ reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type]
348
+ checker = _get_checker()
349
+ report_choice = _get_report_choice(self.config.getoption("doctestreport"))
350
+ if lineno is not None:
351
+ assert failure.test.docstring is not None
352
+ lines = failure.test.docstring.splitlines(False)
353
+ # add line numbers to the left of the error message
354
+ assert test.lineno is not None
355
+ lines = [
356
+ "%03d %s" % (i + test.lineno + 1, x) for (i, x) in enumerate(lines)
357
+ ]
358
+ # trim docstring error lines to 10
359
+ lines = lines[max(example.lineno - 9, 0) : example.lineno + 1]
360
+ else:
361
+ lines = [
362
+ "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example"
363
+ ]
364
+ indent = ">>>"
365
+ for line in example.source.splitlines():
366
+ lines.append(f"??? {indent} {line}")
367
+ indent = "..."
368
+ if isinstance(failure, doctest.DocTestFailure):
369
+ lines += checker.output_difference(
370
+ example, failure.got, report_choice
371
+ ).split("\n")
372
+ else:
373
+ inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info)
374
+ lines += [f"UNEXPECTED EXCEPTION: {inner_excinfo.value!r}"]
375
+ lines += [
376
+ x.strip("\n") for x in traceback.format_exception(*failure.exc_info)
377
+ ]
378
+ reprlocation_lines.append((reprlocation, lines))
379
+ return ReprFailDoctest(reprlocation_lines)
380
+
381
+ def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]:
382
+ return self.path, self.dtest.lineno, f"[doctest] {self.name}"
383
+
384
+
385
+ def _get_flag_lookup() -> dict[str, int]:
386
+ import doctest
387
+
388
+ return dict(
389
+ DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
390
+ DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
391
+ NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
392
+ ELLIPSIS=doctest.ELLIPSIS,
393
+ IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
394
+ COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
395
+ ALLOW_UNICODE=_get_allow_unicode_flag(),
396
+ ALLOW_BYTES=_get_allow_bytes_flag(),
397
+ NUMBER=_get_number_flag(),
398
+ )
399
+
400
+
401
+ def get_optionflags(config: Config) -> int:
402
+ optionflags_str = config.getini("doctest_optionflags")
403
+ flag_lookup_table = _get_flag_lookup()
404
+ flag_acc = 0
405
+ for flag in optionflags_str:
406
+ flag_acc |= flag_lookup_table[flag]
407
+ return flag_acc
408
+
409
+
410
+ def _get_continue_on_failure(config: Config) -> bool:
411
+ continue_on_failure: bool = config.getvalue("doctest_continue_on_failure")
412
+ if continue_on_failure:
413
+ # We need to turn off this if we use pdb since we should stop at
414
+ # the first failure.
415
+ if config.getvalue("usepdb"):
416
+ continue_on_failure = False
417
+ return continue_on_failure
418
+
419
+
420
+ class DoctestTextfile(Module):
421
+ obj = None
422
+
423
+ def collect(self) -> Iterable[DoctestItem]:
424
+ import doctest
425
+
426
+ # Inspired by doctest.testfile; ideally we would use it directly,
427
+ # but it doesn't support passing a custom checker.
428
+ encoding = self.config.getini("doctest_encoding")
429
+ text = self.path.read_text(encoding)
430
+ filename = str(self.path)
431
+ name = self.path.name
432
+ globs = {"__name__": "__main__"}
433
+
434
+ optionflags = get_optionflags(self.config)
435
+
436
+ runner = _get_runner(
437
+ verbose=False,
438
+ optionflags=optionflags,
439
+ checker=_get_checker(),
440
+ continue_on_failure=_get_continue_on_failure(self.config),
441
+ )
442
+
443
+ parser = doctest.DocTestParser()
444
+ test = parser.get_doctest(text, globs, name, filename, 0)
445
+ if test.examples:
446
+ yield DoctestItem.from_parent(
447
+ self, name=test.name, runner=runner, dtest=test
448
+ )
449
+
450
+
451
+ def _check_all_skipped(test: doctest.DocTest) -> None:
452
+ """Raise pytest.skip() if all examples in the given DocTest have the SKIP
453
+ option set."""
454
+ import doctest
455
+
456
+ all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
457
+ if all_skipped:
458
+ skip("all tests skipped by +SKIP option")
459
+
460
+
461
+ def _is_mocked(obj: object) -> bool:
462
+ """Return if an object is possibly a mock object by checking the
463
+ existence of a highly improbable attribute."""
464
+ return (
465
+ safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None)
466
+ is not None
467
+ )
468
+
469
+
470
+ @contextmanager
471
+ def _patch_unwrap_mock_aware() -> Generator[None]:
472
+ """Context manager which replaces ``inspect.unwrap`` with a version
473
+ that's aware of mock objects and doesn't recurse into them."""
474
+ real_unwrap = inspect.unwrap
475
+
476
+ def _mock_aware_unwrap(
477
+ func: Callable[..., Any], *, stop: Callable[[Any], Any] | None = None
478
+ ) -> Any:
479
+ try:
480
+ if stop is None or stop is _is_mocked:
481
+ return real_unwrap(func, stop=_is_mocked)
482
+ _stop = stop
483
+ return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func))
484
+ except Exception as e:
485
+ warnings.warn(
486
+ f"Got {e!r} when unwrapping {func!r}. This is usually caused "
487
+ "by a violation of Python's object protocol; see e.g. "
488
+ "https://github.com/pytest-dev/pytest/issues/5080",
489
+ PytestWarning,
490
+ )
491
+ raise
492
+
493
+ inspect.unwrap = _mock_aware_unwrap
494
+ try:
495
+ yield
496
+ finally:
497
+ inspect.unwrap = real_unwrap
498
+
499
+
500
+ class DoctestModule(Module):
501
+ def collect(self) -> Iterable[DoctestItem]:
502
+ import doctest
503
+
504
+ class MockAwareDocTestFinder(doctest.DocTestFinder):
505
+ py_ver_info_minor = sys.version_info[:2]
506
+ is_find_lineno_broken = (
507
+ py_ver_info_minor < (3, 11)
508
+ or (py_ver_info_minor == (3, 11) and sys.version_info.micro < 9)
509
+ or (py_ver_info_minor == (3, 12) and sys.version_info.micro < 3)
510
+ )
511
+ if is_find_lineno_broken:
512
+
513
+ def _find_lineno(self, obj, source_lines):
514
+ """On older Pythons, doctest code does not take into account
515
+ `@property`. https://github.com/python/cpython/issues/61648
516
+
517
+ Moreover, wrapped Doctests need to be unwrapped so the correct
518
+ line number is returned. #8796
519
+ """
520
+ if isinstance(obj, property):
521
+ obj = getattr(obj, "fget", obj)
522
+
523
+ if hasattr(obj, "__wrapped__"):
524
+ # Get the main obj in case of it being wrapped
525
+ obj = inspect.unwrap(obj)
526
+
527
+ # Type ignored because this is a private function.
528
+ return super()._find_lineno( # type:ignore[misc]
529
+ obj,
530
+ source_lines,
531
+ )
532
+
533
+ if sys.version_info < (3, 10):
534
+
535
+ def _find(
536
+ self, tests, obj, name, module, source_lines, globs, seen
537
+ ) -> None:
538
+ """Override _find to work around issue in stdlib.
539
+
540
+ https://github.com/pytest-dev/pytest/issues/3456
541
+ https://github.com/python/cpython/issues/69718
542
+ """
543
+ if _is_mocked(obj):
544
+ return # pragma: no cover
545
+ with _patch_unwrap_mock_aware():
546
+ # Type ignored because this is a private function.
547
+ super()._find( # type:ignore[misc]
548
+ tests, obj, name, module, source_lines, globs, seen
549
+ )
550
+
551
+ if sys.version_info < (3, 13):
552
+
553
+ def _from_module(self, module, object):
554
+ """`cached_property` objects are never considered a part
555
+ of the 'current module'. As such they are skipped by doctest.
556
+ Here we override `_from_module` to check the underlying
557
+ function instead. https://github.com/python/cpython/issues/107995
558
+ """
559
+ if isinstance(object, functools.cached_property):
560
+ object = object.func
561
+
562
+ # Type ignored because this is a private function.
563
+ return super()._from_module(module, object) # type: ignore[misc]
564
+
565
+ try:
566
+ module = self.obj
567
+ except Collector.CollectError:
568
+ if self.config.getvalue("doctest_ignore_import_errors"):
569
+ skip(f"unable to import module {self.path!r}")
570
+ else:
571
+ raise
572
+
573
+ # While doctests currently don't support fixtures directly, we still
574
+ # need to pick up autouse fixtures.
575
+ self.session._fixturemanager.parsefactories(self)
576
+
577
+ # Uses internal doctest module parsing mechanism.
578
+ finder = MockAwareDocTestFinder()
579
+ optionflags = get_optionflags(self.config)
580
+ runner = _get_runner(
581
+ verbose=False,
582
+ optionflags=optionflags,
583
+ checker=_get_checker(),
584
+ continue_on_failure=_get_continue_on_failure(self.config),
585
+ )
586
+
587
+ for test in finder.find(module, module.__name__):
588
+ if test.examples: # skip empty doctests
589
+ yield DoctestItem.from_parent(
590
+ self, name=test.name, runner=runner, dtest=test
591
+ )
592
+
593
+
594
+ def _init_checker_class() -> type[doctest.OutputChecker]:
595
+ import doctest
596
+ import re
597
+
598
+ class LiteralsOutputChecker(doctest.OutputChecker):
599
+ # Based on doctest_nose_plugin.py from the nltk project
600
+ # (https://github.com/nltk/nltk) and on the "numtest" doctest extension
601
+ # by Sebastien Boisgerault (https://github.com/boisgera/numtest).
602
+
603
+ _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
604
+ _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
605
+ _number_re = re.compile(
606
+ r"""
607
+ (?P<number>
608
+ (?P<mantissa>
609
+ (?P<integer1> [+-]?\d*)\.(?P<fraction>\d+)
610
+ |
611
+ (?P<integer2> [+-]?\d+)\.
612
+ )
613
+ (?:
614
+ [Ee]
615
+ (?P<exponent1> [+-]?\d+)
616
+ )?
617
+ |
618
+ (?P<integer3> [+-]?\d+)
619
+ (?:
620
+ [Ee]
621
+ (?P<exponent2> [+-]?\d+)
622
+ )
623
+ )
624
+ """,
625
+ re.VERBOSE,
626
+ )
627
+
628
+ def check_output(self, want: str, got: str, optionflags: int) -> bool:
629
+ if super().check_output(want, got, optionflags):
630
+ return True
631
+
632
+ allow_unicode = optionflags & _get_allow_unicode_flag()
633
+ allow_bytes = optionflags & _get_allow_bytes_flag()
634
+ allow_number = optionflags & _get_number_flag()
635
+
636
+ if not allow_unicode and not allow_bytes and not allow_number:
637
+ return False
638
+
639
+ def remove_prefixes(regex: Pattern[str], txt: str) -> str:
640
+ return re.sub(regex, r"\1\2", txt)
641
+
642
+ if allow_unicode:
643
+ want = remove_prefixes(self._unicode_literal_re, want)
644
+ got = remove_prefixes(self._unicode_literal_re, got)
645
+
646
+ if allow_bytes:
647
+ want = remove_prefixes(self._bytes_literal_re, want)
648
+ got = remove_prefixes(self._bytes_literal_re, got)
649
+
650
+ if allow_number:
651
+ got = self._remove_unwanted_precision(want, got)
652
+
653
+ return super().check_output(want, got, optionflags)
654
+
655
+ def _remove_unwanted_precision(self, want: str, got: str) -> str:
656
+ wants = list(self._number_re.finditer(want))
657
+ gots = list(self._number_re.finditer(got))
658
+ if len(wants) != len(gots):
659
+ return got
660
+ offset = 0
661
+ for w, g in zip(wants, gots):
662
+ fraction: str | None = w.group("fraction")
663
+ exponent: str | None = w.group("exponent1")
664
+ if exponent is None:
665
+ exponent = w.group("exponent2")
666
+ precision = 0 if fraction is None else len(fraction)
667
+ if exponent is not None:
668
+ precision -= int(exponent)
669
+ if float(w.group()) == approx(float(g.group()), abs=10**-precision):
670
+ # They're close enough. Replace the text we actually
671
+ # got with the text we want, so that it will match when we
672
+ # check the string literally.
673
+ got = (
674
+ got[: g.start() + offset] + w.group() + got[g.end() + offset :]
675
+ )
676
+ offset += w.end() - w.start() - (g.end() - g.start())
677
+ return got
678
+
679
+ return LiteralsOutputChecker
680
+
681
+
682
+ def _get_checker() -> doctest.OutputChecker:
683
+ """Return a doctest.OutputChecker subclass that supports some
684
+ additional options:
685
+
686
+ * ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b''
687
+ prefixes (respectively) in string literals. Useful when the same
688
+ doctest should run in Python 2 and Python 3.
689
+
690
+ * NUMBER to ignore floating-point differences smaller than the
691
+ precision of the literal number in the doctest.
692
+
693
+ An inner class is used to avoid importing "doctest" at the module
694
+ level.
695
+ """
696
+ global CHECKER_CLASS
697
+ if CHECKER_CLASS is None:
698
+ CHECKER_CLASS = _init_checker_class()
699
+ return CHECKER_CLASS()
700
+
701
+
702
+ def _get_allow_unicode_flag() -> int:
703
+ """Register and return the ALLOW_UNICODE flag."""
704
+ import doctest
705
+
706
+ return doctest.register_optionflag("ALLOW_UNICODE")
707
+
708
+
709
+ def _get_allow_bytes_flag() -> int:
710
+ """Register and return the ALLOW_BYTES flag."""
711
+ import doctest
712
+
713
+ return doctest.register_optionflag("ALLOW_BYTES")
714
+
715
+
716
+ def _get_number_flag() -> int:
717
+ """Register and return the NUMBER flag."""
718
+ import doctest
719
+
720
+ return doctest.register_optionflag("NUMBER")
721
+
722
+
723
+ def _get_report_choice(key: str) -> int:
724
+ """Return the actual `doctest` module flag value.
725
+
726
+ We want to do it as late as possible to avoid importing `doctest` and all
727
+ its dependencies when parsing options, as it adds overhead and breaks tests.
728
+ """
729
+ import doctest
730
+
731
+ return {
732
+ DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,
733
+ DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,
734
+ DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
735
+ DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,
736
+ DOCTEST_REPORT_CHOICE_NONE: 0,
737
+ }[key]
738
+
739
+
740
+ @fixture(scope="session")
741
+ def doctest_namespace() -> dict[str, Any]:
742
+ """Fixture that returns a :py:class:`dict` that will be injected into the
743
+ namespace of doctests.
744
+
745
+ Usually this fixture is used in conjunction with another ``autouse`` fixture:
746
+
747
+ .. code-block:: python
748
+
749
+ @pytest.fixture(autouse=True)
750
+ def add_np(doctest_namespace):
751
+ doctest_namespace["np"] = numpy
752
+
753
+ For more details: :ref:`doctest_namespace`.
754
+ """
755
+ return dict()
.venv/lib/python3.11/site-packages/_pytest/helpconfig.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Version info, help messages, tracing configuration."""
3
+
4
+ from __future__ import annotations
5
+
6
+ from argparse import Action
7
+ import os
8
+ import sys
9
+ from typing import Generator
10
+
11
+ from _pytest.config import Config
12
+ from _pytest.config import ExitCode
13
+ from _pytest.config import PrintHelp
14
+ from _pytest.config.argparsing import Parser
15
+ from _pytest.terminal import TerminalReporter
16
+ import pytest
17
+
18
+
19
+ class HelpAction(Action):
20
+ """An argparse Action that will raise an exception in order to skip the
21
+ rest of the argument parsing when --help is passed.
22
+
23
+ This prevents argparse from quitting due to missing required arguments
24
+ when any are defined, for example by ``pytest_addoption``.
25
+ This is similar to the way that the builtin argparse --help option is
26
+ implemented by raising SystemExit.
27
+ """
28
+
29
+ def __init__(self, option_strings, dest=None, default=False, help=None):
30
+ super().__init__(
31
+ option_strings=option_strings,
32
+ dest=dest,
33
+ const=True,
34
+ default=default,
35
+ nargs=0,
36
+ help=help,
37
+ )
38
+
39
+ def __call__(self, parser, namespace, values, option_string=None):
40
+ setattr(namespace, self.dest, self.const)
41
+
42
+ # We should only skip the rest of the parsing after preparse is done.
43
+ if getattr(parser._parser, "after_preparse", False):
44
+ raise PrintHelp
45
+
46
+
47
+ def pytest_addoption(parser: Parser) -> None:
48
+ group = parser.getgroup("debugconfig")
49
+ group.addoption(
50
+ "--version",
51
+ "-V",
52
+ action="count",
53
+ default=0,
54
+ dest="version",
55
+ help="Display pytest version and information about plugins. "
56
+ "When given twice, also display information about plugins.",
57
+ )
58
+ group._addoption(
59
+ "-h",
60
+ "--help",
61
+ action=HelpAction,
62
+ dest="help",
63
+ help="Show help message and configuration info",
64
+ )
65
+ group._addoption(
66
+ "-p",
67
+ action="append",
68
+ dest="plugins",
69
+ default=[],
70
+ metavar="name",
71
+ help="Early-load given plugin module name or entry point (multi-allowed). "
72
+ "To avoid loading of plugins, use the `no:` prefix, e.g. "
73
+ "`no:doctest`.",
74
+ )
75
+ group.addoption(
76
+ "--traceconfig",
77
+ "--trace-config",
78
+ action="store_true",
79
+ default=False,
80
+ help="Trace considerations of conftest.py files",
81
+ )
82
+ group.addoption(
83
+ "--debug",
84
+ action="store",
85
+ nargs="?",
86
+ const="pytestdebug.log",
87
+ dest="debug",
88
+ metavar="DEBUG_FILE_NAME",
89
+ help="Store internal tracing debug information in this log file. "
90
+ "This file is opened with 'w' and truncated as a result, care advised. "
91
+ "Default: pytestdebug.log.",
92
+ )
93
+ group._addoption(
94
+ "-o",
95
+ "--override-ini",
96
+ dest="override_ini",
97
+ action="append",
98
+ help='Override ini option with "option=value" style, '
99
+ "e.g. `-o xfail_strict=True -o cache_dir=cache`.",
100
+ )
101
+
102
+
103
+ @pytest.hookimpl(wrapper=True)
104
+ def pytest_cmdline_parse() -> Generator[None, Config, Config]:
105
+ config = yield
106
+
107
+ if config.option.debug:
108
+ # --debug | --debug <file.log> was provided.
109
+ path = config.option.debug
110
+ debugfile = open(path, "w", encoding="utf-8")
111
+ debugfile.write(
112
+ "versions pytest-{}, "
113
+ "python-{}\ninvocation_dir={}\ncwd={}\nargs={}\n\n".format(
114
+ pytest.__version__,
115
+ ".".join(map(str, sys.version_info)),
116
+ config.invocation_params.dir,
117
+ os.getcwd(),
118
+ config.invocation_params.args,
119
+ )
120
+ )
121
+ config.trace.root.setwriter(debugfile.write)
122
+ undo_tracing = config.pluginmanager.enable_tracing()
123
+ sys.stderr.write(f"writing pytest debug information to {path}\n")
124
+
125
+ def unset_tracing() -> None:
126
+ debugfile.close()
127
+ sys.stderr.write(f"wrote pytest debug information to {debugfile.name}\n")
128
+ config.trace.root.setwriter(None)
129
+ undo_tracing()
130
+
131
+ config.add_cleanup(unset_tracing)
132
+
133
+ return config
134
+
135
+
136
+ def showversion(config: Config) -> None:
137
+ if config.option.version > 1:
138
+ sys.stdout.write(
139
+ f"This is pytest version {pytest.__version__}, imported from {pytest.__file__}\n"
140
+ )
141
+ plugininfo = getpluginversioninfo(config)
142
+ if plugininfo:
143
+ for line in plugininfo:
144
+ sys.stdout.write(line + "\n")
145
+ else:
146
+ sys.stdout.write(f"pytest {pytest.__version__}\n")
147
+
148
+
149
+ def pytest_cmdline_main(config: Config) -> int | ExitCode | None:
150
+ if config.option.version > 0:
151
+ showversion(config)
152
+ return 0
153
+ elif config.option.help:
154
+ config._do_configure()
155
+ showhelp(config)
156
+ config._ensure_unconfigure()
157
+ return 0
158
+ return None
159
+
160
+
161
+ def showhelp(config: Config) -> None:
162
+ import textwrap
163
+
164
+ reporter: TerminalReporter | None = config.pluginmanager.get_plugin(
165
+ "terminalreporter"
166
+ )
167
+ assert reporter is not None
168
+ tw = reporter._tw
169
+ tw.write(config._parser.optparser.format_help())
170
+ tw.line()
171
+ tw.line(
172
+ "[pytest] ini-options in the first "
173
+ "pytest.ini|tox.ini|setup.cfg|pyproject.toml file found:"
174
+ )
175
+ tw.line()
176
+
177
+ columns = tw.fullwidth # costly call
178
+ indent_len = 24 # based on argparse's max_help_position=24
179
+ indent = " " * indent_len
180
+ for name in config._parser._ininames:
181
+ help, type, default = config._parser._inidict[name]
182
+ if type is None:
183
+ type = "string"
184
+ if help is None:
185
+ raise TypeError(f"help argument cannot be None for {name}")
186
+ spec = f"{name} ({type}):"
187
+ tw.write(f" {spec}")
188
+ spec_len = len(spec)
189
+ if spec_len > (indent_len - 3):
190
+ # Display help starting at a new line.
191
+ tw.line()
192
+ helplines = textwrap.wrap(
193
+ help,
194
+ columns,
195
+ initial_indent=indent,
196
+ subsequent_indent=indent,
197
+ break_on_hyphens=False,
198
+ )
199
+
200
+ for line in helplines:
201
+ tw.line(line)
202
+ else:
203
+ # Display help starting after the spec, following lines indented.
204
+ tw.write(" " * (indent_len - spec_len - 2))
205
+ wrapped = textwrap.wrap(help, columns - indent_len, break_on_hyphens=False)
206
+
207
+ if wrapped:
208
+ tw.line(wrapped[0])
209
+ for line in wrapped[1:]:
210
+ tw.line(indent + line)
211
+
212
+ tw.line()
213
+ tw.line("Environment variables:")
214
+ vars = [
215
+ (
216
+ "CI",
217
+ "When set (regardless of value), pytest knows it is running in a "
218
+ "CI process and does not truncate summary info",
219
+ ),
220
+ ("BUILD_NUMBER", "Equivalent to CI"),
221
+ ("PYTEST_ADDOPTS", "Extra command line options"),
222
+ ("PYTEST_PLUGINS", "Comma-separated plugins to load during startup"),
223
+ ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "Set to disable plugin auto-loading"),
224
+ ("PYTEST_DEBUG", "Set to enable debug tracing of pytest's internals"),
225
+ ]
226
+ for name, help in vars:
227
+ tw.line(f" {name:<24} {help}")
228
+ tw.line()
229
+ tw.line()
230
+
231
+ tw.line("to see available markers type: pytest --markers")
232
+ tw.line("to see available fixtures type: pytest --fixtures")
233
+ tw.line(
234
+ "(shown according to specified file_or_dir or current dir "
235
+ "if not specified; fixtures with leading '_' are only shown "
236
+ "with the '-v' option"
237
+ )
238
+
239
+ for warningreport in reporter.stats.get("warnings", []):
240
+ tw.line("warning : " + warningreport.message, red=True)
241
+
242
+
243
+ conftest_options = [("pytest_plugins", "list of plugin names to load")]
244
+
245
+
246
+ def getpluginversioninfo(config: Config) -> list[str]:
247
+ lines = []
248
+ plugininfo = config.pluginmanager.list_plugin_distinfo()
249
+ if plugininfo:
250
+ lines.append("registered third-party plugins:")
251
+ for plugin, dist in plugininfo:
252
+ loc = getattr(plugin, "__file__", repr(plugin))
253
+ content = f"{dist.project_name}-{dist.version} at {loc}"
254
+ lines.append(" " + content)
255
+ return lines
256
+
257
+
258
+ def pytest_report_header(config: Config) -> list[str]:
259
+ lines = []
260
+ if config.option.debug or config.option.traceconfig:
261
+ lines.append(f"using: pytest-{pytest.__version__}")
262
+
263
+ verinfo = getpluginversioninfo(config)
264
+ if verinfo:
265
+ lines.extend(verinfo)
266
+
267
+ if config.option.traceconfig:
268
+ lines.append("active plugins:")
269
+ items = config.pluginmanager.list_name_plugin()
270
+ for name, plugin in items:
271
+ if hasattr(plugin, "__file__"):
272
+ r = plugin.__file__
273
+ else:
274
+ r = repr(plugin)
275
+ lines.append(f" {name:<20}: {r}")
276
+ return lines
.venv/lib/python3.11/site-packages/_pytest/hookspec.py ADDED
@@ -0,0 +1,1333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # ruff: noqa: T100
3
+ """Hook specifications for pytest plugins which are invoked by pytest itself
4
+ and by builtin plugins."""
5
+
6
+ from __future__ import annotations
7
+
8
+ from pathlib import Path
9
+ from typing import Any
10
+ from typing import Mapping
11
+ from typing import Sequence
12
+ from typing import TYPE_CHECKING
13
+
14
+ from pluggy import HookspecMarker
15
+
16
+ from .deprecated import HOOK_LEGACY_PATH_ARG
17
+
18
+
19
+ if TYPE_CHECKING:
20
+ import pdb
21
+ from typing import Literal
22
+ import warnings
23
+
24
+ from _pytest._code.code import ExceptionInfo
25
+ from _pytest._code.code import ExceptionRepr
26
+ from _pytest.compat import LEGACY_PATH
27
+ from _pytest.config import _PluggyPlugin
28
+ from _pytest.config import Config
29
+ from _pytest.config import ExitCode
30
+ from _pytest.config import PytestPluginManager
31
+ from _pytest.config.argparsing import Parser
32
+ from _pytest.fixtures import FixtureDef
33
+ from _pytest.fixtures import SubRequest
34
+ from _pytest.main import Session
35
+ from _pytest.nodes import Collector
36
+ from _pytest.nodes import Item
37
+ from _pytest.outcomes import Exit
38
+ from _pytest.python import Class
39
+ from _pytest.python import Function
40
+ from _pytest.python import Metafunc
41
+ from _pytest.python import Module
42
+ from _pytest.reports import CollectReport
43
+ from _pytest.reports import TestReport
44
+ from _pytest.runner import CallInfo
45
+ from _pytest.terminal import TerminalReporter
46
+ from _pytest.terminal import TestShortLogReport
47
+
48
+
49
+ hookspec = HookspecMarker("pytest")
50
+
51
+ # -------------------------------------------------------------------------
52
+ # Initialization hooks called for every plugin
53
+ # -------------------------------------------------------------------------
54
+
55
+
56
+ @hookspec(historic=True)
57
+ def pytest_addhooks(pluginmanager: PytestPluginManager) -> None:
58
+ """Called at plugin registration time to allow adding new hooks via a call to
59
+ :func:`pluginmanager.add_hookspecs(module_or_class, prefix) <pytest.PytestPluginManager.add_hookspecs>`.
60
+
61
+ :param pluginmanager: The pytest plugin manager.
62
+
63
+ .. note::
64
+ This hook is incompatible with hook wrappers.
65
+
66
+ Use in conftest plugins
67
+ =======================
68
+
69
+ If a conftest plugin implements this hook, it will be called immediately
70
+ when the conftest is registered.
71
+ """
72
+
73
+
74
+ @hookspec(historic=True)
75
+ def pytest_plugin_registered(
76
+ plugin: _PluggyPlugin,
77
+ plugin_name: str,
78
+ manager: PytestPluginManager,
79
+ ) -> None:
80
+ """A new pytest plugin got registered.
81
+
82
+ :param plugin: The plugin module or instance.
83
+ :param plugin_name: The name by which the plugin is registered.
84
+ :param manager: The pytest plugin manager.
85
+
86
+ .. note::
87
+ This hook is incompatible with hook wrappers.
88
+
89
+ Use in conftest plugins
90
+ =======================
91
+
92
+ If a conftest plugin implements this hook, it will be called immediately
93
+ when the conftest is registered, once for each plugin registered thus far
94
+ (including itself!), and for all plugins thereafter when they are
95
+ registered.
96
+ """
97
+
98
+
99
+ @hookspec(historic=True)
100
+ def pytest_addoption(parser: Parser, pluginmanager: PytestPluginManager) -> None:
101
+ """Register argparse-style options and ini-style config values,
102
+ called once at the beginning of a test run.
103
+
104
+ :param parser:
105
+ To add command line options, call
106
+ :py:func:`parser.addoption(...) <pytest.Parser.addoption>`.
107
+ To add ini-file values call :py:func:`parser.addini(...)
108
+ <pytest.Parser.addini>`.
109
+
110
+ :param pluginmanager:
111
+ The pytest plugin manager, which can be used to install :py:func:`~pytest.hookspec`'s
112
+ or :py:func:`~pytest.hookimpl`'s and allow one plugin to call another plugin's hooks
113
+ to change how command line options are added.
114
+
115
+ Options can later be accessed through the
116
+ :py:class:`config <pytest.Config>` object, respectively:
117
+
118
+ - :py:func:`config.getoption(name) <pytest.Config.getoption>` to
119
+ retrieve the value of a command line option.
120
+
121
+ - :py:func:`config.getini(name) <pytest.Config.getini>` to retrieve
122
+ a value read from an ini-style file.
123
+
124
+ The config object is passed around on many internal objects via the ``.config``
125
+ attribute or can be retrieved as the ``pytestconfig`` fixture.
126
+
127
+ .. note::
128
+ This hook is incompatible with hook wrappers.
129
+
130
+ Use in conftest plugins
131
+ =======================
132
+
133
+ If a conftest plugin implements this hook, it will be called immediately
134
+ when the conftest is registered.
135
+
136
+ This hook is only called for :ref:`initial conftests <pluginorder>`.
137
+ """
138
+
139
+
140
+ @hookspec(historic=True)
141
+ def pytest_configure(config: Config) -> None:
142
+ """Allow plugins and conftest files to perform initial configuration.
143
+
144
+ .. note::
145
+ This hook is incompatible with hook wrappers.
146
+
147
+ :param config: The pytest config object.
148
+
149
+ Use in conftest plugins
150
+ =======================
151
+
152
+ This hook is called for every :ref:`initial conftest <pluginorder>` file
153
+ after command line options have been parsed. After that, the hook is called
154
+ for other conftest files as they are registered.
155
+ """
156
+
157
+
158
+ # -------------------------------------------------------------------------
159
+ # Bootstrapping hooks called for plugins registered early enough:
160
+ # internal and 3rd party plugins.
161
+ # -------------------------------------------------------------------------
162
+
163
+
164
+ @hookspec(firstresult=True)
165
+ def pytest_cmdline_parse(
166
+ pluginmanager: PytestPluginManager, args: list[str]
167
+ ) -> Config | None:
168
+ """Return an initialized :class:`~pytest.Config`, parsing the specified args.
169
+
170
+ Stops at first non-None result, see :ref:`firstresult`.
171
+
172
+ .. note::
173
+ This hook is only called for plugin classes passed to the
174
+ ``plugins`` arg when using `pytest.main`_ to perform an in-process
175
+ test run.
176
+
177
+ :param pluginmanager: The pytest plugin manager.
178
+ :param args: List of arguments passed on the command line.
179
+ :returns: A pytest config object.
180
+
181
+ Use in conftest plugins
182
+ =======================
183
+
184
+ This hook is not called for conftest files.
185
+ """
186
+
187
+
188
+ def pytest_load_initial_conftests(
189
+ early_config: Config, parser: Parser, args: list[str]
190
+ ) -> None:
191
+ """Called to implement the loading of :ref:`initial conftest files
192
+ <pluginorder>` ahead of command line option parsing.
193
+
194
+ :param early_config: The pytest config object.
195
+ :param args: Arguments passed on the command line.
196
+ :param parser: To add command line options.
197
+
198
+ Use in conftest plugins
199
+ =======================
200
+
201
+ This hook is not called for conftest files.
202
+ """
203
+
204
+
205
+ @hookspec(firstresult=True)
206
+ def pytest_cmdline_main(config: Config) -> ExitCode | int | None:
207
+ """Called for performing the main command line action.
208
+
209
+ The default implementation will invoke the configure hooks and
210
+ :hook:`pytest_runtestloop`.
211
+
212
+ Stops at first non-None result, see :ref:`firstresult`.
213
+
214
+ :param config: The pytest config object.
215
+ :returns: The exit code.
216
+
217
+ Use in conftest plugins
218
+ =======================
219
+
220
+ This hook is only called for :ref:`initial conftests <pluginorder>`.
221
+ """
222
+
223
+
224
+ # -------------------------------------------------------------------------
225
+ # collection hooks
226
+ # -------------------------------------------------------------------------
227
+
228
+
229
+ @hookspec(firstresult=True)
230
+ def pytest_collection(session: Session) -> object | None:
231
+ """Perform the collection phase for the given session.
232
+
233
+ Stops at first non-None result, see :ref:`firstresult`.
234
+ The return value is not used, but only stops further processing.
235
+
236
+ The default collection phase is this (see individual hooks for full details):
237
+
238
+ 1. Starting from ``session`` as the initial collector:
239
+
240
+ 1. ``pytest_collectstart(collector)``
241
+ 2. ``report = pytest_make_collect_report(collector)``
242
+ 3. ``pytest_exception_interact(collector, call, report)`` if an interactive exception occurred
243
+ 4. For each collected node:
244
+
245
+ 1. If an item, ``pytest_itemcollected(item)``
246
+ 2. If a collector, recurse into it.
247
+
248
+ 5. ``pytest_collectreport(report)``
249
+
250
+ 2. ``pytest_collection_modifyitems(session, config, items)``
251
+
252
+ 1. ``pytest_deselected(items)`` for any deselected items (may be called multiple times)
253
+
254
+ 3. ``pytest_collection_finish(session)``
255
+ 4. Set ``session.items`` to the list of collected items
256
+ 5. Set ``session.testscollected`` to the number of collected items
257
+
258
+ You can implement this hook to only perform some action before collection,
259
+ for example the terminal plugin uses it to start displaying the collection
260
+ counter (and returns `None`).
261
+
262
+ :param session: The pytest session object.
263
+
264
+ Use in conftest plugins
265
+ =======================
266
+
267
+ This hook is only called for :ref:`initial conftests <pluginorder>`.
268
+ """
269
+
270
+
271
+ def pytest_collection_modifyitems(
272
+ session: Session, config: Config, items: list[Item]
273
+ ) -> None:
274
+ """Called after collection has been performed. May filter or re-order
275
+ the items in-place.
276
+
277
+ When items are deselected (filtered out from ``items``),
278
+ the hook :hook:`pytest_deselected` must be called explicitly
279
+ with the deselected items to properly notify other plugins,
280
+ e.g. with ``config.hook.pytest_deselected(deselected_items)``.
281
+
282
+ :param session: The pytest session object.
283
+ :param config: The pytest config object.
284
+ :param items: List of item objects.
285
+
286
+ Use in conftest plugins
287
+ =======================
288
+
289
+ Any conftest plugin can implement this hook.
290
+ """
291
+
292
+
293
+ def pytest_collection_finish(session: Session) -> None:
294
+ """Called after collection has been performed and modified.
295
+
296
+ :param session: The pytest session object.
297
+
298
+ Use in conftest plugins
299
+ =======================
300
+
301
+ Any conftest plugin can implement this hook.
302
+ """
303
+
304
+
305
+ @hookspec(
306
+ firstresult=True,
307
+ warn_on_impl_args={
308
+ "path": HOOK_LEGACY_PATH_ARG.format(
309
+ pylib_path_arg="path", pathlib_path_arg="collection_path"
310
+ ),
311
+ },
312
+ )
313
+ def pytest_ignore_collect(
314
+ collection_path: Path, path: LEGACY_PATH, config: Config
315
+ ) -> bool | None:
316
+ """Return ``True`` to ignore this path for collection.
317
+
318
+ Return ``None`` to let other plugins ignore the path for collection.
319
+
320
+ Returning ``False`` will forcefully *not* ignore this path for collection,
321
+ without giving a chance for other plugins to ignore this path.
322
+
323
+ This hook is consulted for all files and directories prior to calling
324
+ more specific hooks.
325
+
326
+ Stops at first non-None result, see :ref:`firstresult`.
327
+
328
+ :param collection_path: The path to analyze.
329
+ :type collection_path: pathlib.Path
330
+ :param path: The path to analyze (deprecated).
331
+ :param config: The pytest config object.
332
+
333
+ .. versionchanged:: 7.0.0
334
+ The ``collection_path`` parameter was added as a :class:`pathlib.Path`
335
+ equivalent of the ``path`` parameter. The ``path`` parameter
336
+ has been deprecated.
337
+
338
+ Use in conftest plugins
339
+ =======================
340
+
341
+ Any conftest file can implement this hook. For a given collection path, only
342
+ conftest files in parent directories of the collection path are consulted
343
+ (if the path is a directory, its own conftest file is *not* consulted - a
344
+ directory cannot ignore itself!).
345
+ """
346
+
347
+
348
+ @hookspec(firstresult=True)
349
+ def pytest_collect_directory(path: Path, parent: Collector) -> Collector | None:
350
+ """Create a :class:`~pytest.Collector` for the given directory, or None if
351
+ not relevant.
352
+
353
+ .. versionadded:: 8.0
354
+
355
+ For best results, the returned collector should be a subclass of
356
+ :class:`~pytest.Directory`, but this is not required.
357
+
358
+ The new node needs to have the specified ``parent`` as a parent.
359
+
360
+ Stops at first non-None result, see :ref:`firstresult`.
361
+
362
+ :param path: The path to analyze.
363
+ :type path: pathlib.Path
364
+
365
+ See :ref:`custom directory collectors` for a simple example of use of this
366
+ hook.
367
+
368
+ Use in conftest plugins
369
+ =======================
370
+
371
+ Any conftest file can implement this hook. For a given collection path, only
372
+ conftest files in parent directories of the collection path are consulted
373
+ (if the path is a directory, its own conftest file is *not* consulted - a
374
+ directory cannot collect itself!).
375
+ """
376
+
377
+
378
+ @hookspec(
379
+ warn_on_impl_args={
380
+ "path": HOOK_LEGACY_PATH_ARG.format(
381
+ pylib_path_arg="path", pathlib_path_arg="file_path"
382
+ ),
383
+ },
384
+ )
385
+ def pytest_collect_file(
386
+ file_path: Path, path: LEGACY_PATH, parent: Collector
387
+ ) -> Collector | None:
388
+ """Create a :class:`~pytest.Collector` for the given path, or None if not relevant.
389
+
390
+ For best results, the returned collector should be a subclass of
391
+ :class:`~pytest.File`, but this is not required.
392
+
393
+ The new node needs to have the specified ``parent`` as a parent.
394
+
395
+ :param file_path: The path to analyze.
396
+ :type file_path: pathlib.Path
397
+ :param path: The path to collect (deprecated).
398
+
399
+ .. versionchanged:: 7.0.0
400
+ The ``file_path`` parameter was added as a :class:`pathlib.Path`
401
+ equivalent of the ``path`` parameter. The ``path`` parameter
402
+ has been deprecated.
403
+
404
+ Use in conftest plugins
405
+ =======================
406
+
407
+ Any conftest file can implement this hook. For a given file path, only
408
+ conftest files in parent directories of the file path are consulted.
409
+ """
410
+
411
+
412
+ # logging hooks for collection
413
+
414
+
415
+ def pytest_collectstart(collector: Collector) -> None:
416
+ """Collector starts collecting.
417
+
418
+ :param collector:
419
+ The collector.
420
+
421
+ Use in conftest plugins
422
+ =======================
423
+
424
+ Any conftest file can implement this hook. For a given collector, only
425
+ conftest files in the collector's directory and its parent directories are
426
+ consulted.
427
+ """
428
+
429
+
430
+ def pytest_itemcollected(item: Item) -> None:
431
+ """We just collected a test item.
432
+
433
+ :param item:
434
+ The item.
435
+
436
+ Use in conftest plugins
437
+ =======================
438
+
439
+ Any conftest file can implement this hook. For a given item, only conftest
440
+ files in the item's directory and its parent directories are consulted.
441
+ """
442
+
443
+
444
+ def pytest_collectreport(report: CollectReport) -> None:
445
+ """Collector finished collecting.
446
+
447
+ :param report:
448
+ The collect report.
449
+
450
+ Use in conftest plugins
451
+ =======================
452
+
453
+ Any conftest file can implement this hook. For a given collector, only
454
+ conftest files in the collector's directory and its parent directories are
455
+ consulted.
456
+ """
457
+
458
+
459
+ def pytest_deselected(items: Sequence[Item]) -> None:
460
+ """Called for deselected test items, e.g. by keyword.
461
+
462
+ Note that this hook has two integration aspects for plugins:
463
+
464
+ - it can be *implemented* to be notified of deselected items
465
+ - it must be *called* from :hook:`pytest_collection_modifyitems`
466
+ implementations when items are deselected (to properly notify other plugins).
467
+
468
+ May be called multiple times.
469
+
470
+ :param items:
471
+ The items.
472
+
473
+ Use in conftest plugins
474
+ =======================
475
+
476
+ Any conftest file can implement this hook.
477
+ """
478
+
479
+
480
+ @hookspec(firstresult=True)
481
+ def pytest_make_collect_report(collector: Collector) -> CollectReport | None:
482
+ """Perform :func:`collector.collect() <pytest.Collector.collect>` and return
483
+ a :class:`~pytest.CollectReport`.
484
+
485
+ Stops at first non-None result, see :ref:`firstresult`.
486
+
487
+ :param collector:
488
+ The collector.
489
+
490
+ Use in conftest plugins
491
+ =======================
492
+
493
+ Any conftest file can implement this hook. For a given collector, only
494
+ conftest files in the collector's directory and its parent directories are
495
+ consulted.
496
+ """
497
+
498
+
499
+ # -------------------------------------------------------------------------
500
+ # Python test function related hooks
501
+ # -------------------------------------------------------------------------
502
+
503
+
504
+ @hookspec(
505
+ firstresult=True,
506
+ warn_on_impl_args={
507
+ "path": HOOK_LEGACY_PATH_ARG.format(
508
+ pylib_path_arg="path", pathlib_path_arg="module_path"
509
+ ),
510
+ },
511
+ )
512
+ def pytest_pycollect_makemodule(
513
+ module_path: Path, path: LEGACY_PATH, parent
514
+ ) -> Module | None:
515
+ """Return a :class:`pytest.Module` collector or None for the given path.
516
+
517
+ This hook will be called for each matching test module path.
518
+ The :hook:`pytest_collect_file` hook needs to be used if you want to
519
+ create test modules for files that do not match as a test module.
520
+
521
+ Stops at first non-None result, see :ref:`firstresult`.
522
+
523
+ :param module_path: The path of the module to collect.
524
+ :type module_path: pathlib.Path
525
+ :param path: The path of the module to collect (deprecated).
526
+
527
+ .. versionchanged:: 7.0.0
528
+ The ``module_path`` parameter was added as a :class:`pathlib.Path`
529
+ equivalent of the ``path`` parameter.
530
+
531
+ The ``path`` parameter has been deprecated in favor of ``fspath``.
532
+
533
+ Use in conftest plugins
534
+ =======================
535
+
536
+ Any conftest file can implement this hook. For a given parent collector,
537
+ only conftest files in the collector's directory and its parent directories
538
+ are consulted.
539
+ """
540
+
541
+
542
+ @hookspec(firstresult=True)
543
+ def pytest_pycollect_makeitem(
544
+ collector: Module | Class, name: str, obj: object
545
+ ) -> None | Item | Collector | list[Item | Collector]:
546
+ """Return a custom item/collector for a Python object in a module, or None.
547
+
548
+ Stops at first non-None result, see :ref:`firstresult`.
549
+
550
+ :param collector:
551
+ The module/class collector.
552
+ :param name:
553
+ The name of the object in the module/class.
554
+ :param obj:
555
+ The object.
556
+ :returns:
557
+ The created items/collectors.
558
+
559
+ Use in conftest plugins
560
+ =======================
561
+
562
+ Any conftest file can implement this hook. For a given collector, only
563
+ conftest files in the collector's directory and its parent directories
564
+ are consulted.
565
+ """
566
+
567
+
568
+ @hookspec(firstresult=True)
569
+ def pytest_pyfunc_call(pyfuncitem: Function) -> object | None:
570
+ """Call underlying test function.
571
+
572
+ Stops at first non-None result, see :ref:`firstresult`.
573
+
574
+ :param pyfuncitem:
575
+ The function item.
576
+
577
+ Use in conftest plugins
578
+ =======================
579
+
580
+ Any conftest file can implement this hook. For a given item, only
581
+ conftest files in the item's directory and its parent directories
582
+ are consulted.
583
+ """
584
+
585
+
586
+ def pytest_generate_tests(metafunc: Metafunc) -> None:
587
+ """Generate (multiple) parametrized calls to a test function.
588
+
589
+ :param metafunc:
590
+ The :class:`~pytest.Metafunc` helper for the test function.
591
+
592
+ Use in conftest plugins
593
+ =======================
594
+
595
+ Any conftest file can implement this hook. For a given function definition,
596
+ only conftest files in the functions's directory and its parent directories
597
+ are consulted.
598
+ """
599
+
600
+
601
+ @hookspec(firstresult=True)
602
+ def pytest_make_parametrize_id(config: Config, val: object, argname: str) -> str | None:
603
+ """Return a user-friendly string representation of the given ``val``
604
+ that will be used by @pytest.mark.parametrize calls, or None if the hook
605
+ doesn't know about ``val``.
606
+
607
+ The parameter name is available as ``argname``, if required.
608
+
609
+ Stops at first non-None result, see :ref:`firstresult`.
610
+
611
+ :param config: The pytest config object.
612
+ :param val: The parametrized value.
613
+ :param argname: The automatic parameter name produced by pytest.
614
+
615
+ Use in conftest plugins
616
+ =======================
617
+
618
+ Any conftest file can implement this hook.
619
+ """
620
+
621
+
622
+ # -------------------------------------------------------------------------
623
+ # runtest related hooks
624
+ # -------------------------------------------------------------------------
625
+
626
+
627
+ @hookspec(firstresult=True)
628
+ def pytest_runtestloop(session: Session) -> object | None:
629
+ """Perform the main runtest loop (after collection finished).
630
+
631
+ The default hook implementation performs the runtest protocol for all items
632
+ collected in the session (``session.items``), unless the collection failed
633
+ or the ``collectonly`` pytest option is set.
634
+
635
+ If at any point :py:func:`pytest.exit` is called, the loop is
636
+ terminated immediately.
637
+
638
+ If at any point ``session.shouldfail`` or ``session.shouldstop`` are set, the
639
+ loop is terminated after the runtest protocol for the current item is finished.
640
+
641
+ :param session: The pytest session object.
642
+
643
+ Stops at first non-None result, see :ref:`firstresult`.
644
+ The return value is not used, but only stops further processing.
645
+
646
+ Use in conftest plugins
647
+ =======================
648
+
649
+ Any conftest file can implement this hook.
650
+ """
651
+
652
+
653
+ @hookspec(firstresult=True)
654
+ def pytest_runtest_protocol(item: Item, nextitem: Item | None) -> object | None:
655
+ """Perform the runtest protocol for a single test item.
656
+
657
+ The default runtest protocol is this (see individual hooks for full details):
658
+
659
+ - ``pytest_runtest_logstart(nodeid, location)``
660
+
661
+ - Setup phase:
662
+ - ``call = pytest_runtest_setup(item)`` (wrapped in ``CallInfo(when="setup")``)
663
+ - ``report = pytest_runtest_makereport(item, call)``
664
+ - ``pytest_runtest_logreport(report)``
665
+ - ``pytest_exception_interact(call, report)`` if an interactive exception occurred
666
+
667
+ - Call phase, if the setup passed and the ``setuponly`` pytest option is not set:
668
+ - ``call = pytest_runtest_call(item)`` (wrapped in ``CallInfo(when="call")``)
669
+ - ``report = pytest_runtest_makereport(item, call)``
670
+ - ``pytest_runtest_logreport(report)``
671
+ - ``pytest_exception_interact(call, report)`` if an interactive exception occurred
672
+
673
+ - Teardown phase:
674
+ - ``call = pytest_runtest_teardown(item, nextitem)`` (wrapped in ``CallInfo(when="teardown")``)
675
+ - ``report = pytest_runtest_makereport(item, call)``
676
+ - ``pytest_runtest_logreport(report)``
677
+ - ``pytest_exception_interact(call, report)`` if an interactive exception occurred
678
+
679
+ - ``pytest_runtest_logfinish(nodeid, location)``
680
+
681
+ :param item: Test item for which the runtest protocol is performed.
682
+ :param nextitem: The scheduled-to-be-next test item (or None if this is the end my friend).
683
+
684
+ Stops at first non-None result, see :ref:`firstresult`.
685
+ The return value is not used, but only stops further processing.
686
+
687
+ Use in conftest plugins
688
+ =======================
689
+
690
+ Any conftest file can implement this hook.
691
+ """
692
+
693
+
694
+ def pytest_runtest_logstart(nodeid: str, location: tuple[str, int | None, str]) -> None:
695
+ """Called at the start of running the runtest protocol for a single item.
696
+
697
+ See :hook:`pytest_runtest_protocol` for a description of the runtest protocol.
698
+
699
+ :param nodeid: Full node ID of the item.
700
+ :param location: A tuple of ``(filename, lineno, testname)``
701
+ where ``filename`` is a file path relative to ``config.rootpath``
702
+ and ``lineno`` is 0-based.
703
+
704
+ Use in conftest plugins
705
+ =======================
706
+
707
+ Any conftest file can implement this hook. For a given item, only conftest
708
+ files in the item's directory and its parent directories are consulted.
709
+ """
710
+
711
+
712
+ def pytest_runtest_logfinish(
713
+ nodeid: str, location: tuple[str, int | None, str]
714
+ ) -> None:
715
+ """Called at the end of running the runtest protocol for a single item.
716
+
717
+ See :hook:`pytest_runtest_protocol` for a description of the runtest protocol.
718
+
719
+ :param nodeid: Full node ID of the item.
720
+ :param location: A tuple of ``(filename, lineno, testname)``
721
+ where ``filename`` is a file path relative to ``config.rootpath``
722
+ and ``lineno`` is 0-based.
723
+
724
+ Use in conftest plugins
725
+ =======================
726
+
727
+ Any conftest file can implement this hook. For a given item, only conftest
728
+ files in the item's directory and its parent directories are consulted.
729
+ """
730
+
731
+
732
+ def pytest_runtest_setup(item: Item) -> None:
733
+ """Called to perform the setup phase for a test item.
734
+
735
+ The default implementation runs ``setup()`` on ``item`` and all of its
736
+ parents (which haven't been setup yet). This includes obtaining the
737
+ values of fixtures required by the item (which haven't been obtained
738
+ yet).
739
+
740
+ :param item:
741
+ The item.
742
+
743
+ Use in conftest plugins
744
+ =======================
745
+
746
+ Any conftest file can implement this hook. For a given item, only conftest
747
+ files in the item's directory and its parent directories are consulted.
748
+ """
749
+
750
+
751
+ def pytest_runtest_call(item: Item) -> None:
752
+ """Called to run the test for test item (the call phase).
753
+
754
+ The default implementation calls ``item.runtest()``.
755
+
756
+ :param item:
757
+ The item.
758
+
759
+ Use in conftest plugins
760
+ =======================
761
+
762
+ Any conftest file can implement this hook. For a given item, only conftest
763
+ files in the item's directory and its parent directories are consulted.
764
+ """
765
+
766
+
767
+ def pytest_runtest_teardown(item: Item, nextitem: Item | None) -> None:
768
+ """Called to perform the teardown phase for a test item.
769
+
770
+ The default implementation runs the finalizers and calls ``teardown()``
771
+ on ``item`` and all of its parents (which need to be torn down). This
772
+ includes running the teardown phase of fixtures required by the item (if
773
+ they go out of scope).
774
+
775
+ :param item:
776
+ The item.
777
+ :param nextitem:
778
+ The scheduled-to-be-next test item (None if no further test item is
779
+ scheduled). This argument is used to perform exact teardowns, i.e.
780
+ calling just enough finalizers so that nextitem only needs to call
781
+ setup functions.
782
+
783
+ Use in conftest plugins
784
+ =======================
785
+
786
+ Any conftest file can implement this hook. For a given item, only conftest
787
+ files in the item's directory and its parent directories are consulted.
788
+ """
789
+
790
+
791
+ @hookspec(firstresult=True)
792
+ def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport | None:
793
+ """Called to create a :class:`~pytest.TestReport` for each of
794
+ the setup, call and teardown runtest phases of a test item.
795
+
796
+ See :hook:`pytest_runtest_protocol` for a description of the runtest protocol.
797
+
798
+ :param item: The item.
799
+ :param call: The :class:`~pytest.CallInfo` for the phase.
800
+
801
+ Stops at first non-None result, see :ref:`firstresult`.
802
+
803
+ Use in conftest plugins
804
+ =======================
805
+
806
+ Any conftest file can implement this hook. For a given item, only conftest
807
+ files in the item's directory and its parent directories are consulted.
808
+ """
809
+
810
+
811
+ def pytest_runtest_logreport(report: TestReport) -> None:
812
+ """Process the :class:`~pytest.TestReport` produced for each
813
+ of the setup, call and teardown runtest phases of an item.
814
+
815
+ See :hook:`pytest_runtest_protocol` for a description of the runtest protocol.
816
+
817
+ Use in conftest plugins
818
+ =======================
819
+
820
+ Any conftest file can implement this hook. For a given item, only conftest
821
+ files in the item's directory and its parent directories are consulted.
822
+ """
823
+
824
+
825
+ @hookspec(firstresult=True)
826
+ def pytest_report_to_serializable(
827
+ config: Config,
828
+ report: CollectReport | TestReport,
829
+ ) -> dict[str, Any] | None:
830
+ """Serialize the given report object into a data structure suitable for
831
+ sending over the wire, e.g. converted to JSON.
832
+
833
+ :param config: The pytest config object.
834
+ :param report: The report.
835
+
836
+ Use in conftest plugins
837
+ =======================
838
+
839
+ Any conftest file can implement this hook. The exact details may depend
840
+ on the plugin which calls the hook.
841
+ """
842
+
843
+
844
+ @hookspec(firstresult=True)
845
+ def pytest_report_from_serializable(
846
+ config: Config,
847
+ data: dict[str, Any],
848
+ ) -> CollectReport | TestReport | None:
849
+ """Restore a report object previously serialized with
850
+ :hook:`pytest_report_to_serializable`.
851
+
852
+ :param config: The pytest config object.
853
+
854
+ Use in conftest plugins
855
+ =======================
856
+
857
+ Any conftest file can implement this hook. The exact details may depend
858
+ on the plugin which calls the hook.
859
+ """
860
+
861
+
862
+ # -------------------------------------------------------------------------
863
+ # Fixture related hooks
864
+ # -------------------------------------------------------------------------
865
+
866
+
867
+ @hookspec(firstresult=True)
868
+ def pytest_fixture_setup(
869
+ fixturedef: FixtureDef[Any], request: SubRequest
870
+ ) -> object | None:
871
+ """Perform fixture setup execution.
872
+
873
+ :param fixturedef:
874
+ The fixture definition object.
875
+ :param request:
876
+ The fixture request object.
877
+ :returns:
878
+ The return value of the call to the fixture function.
879
+
880
+ Stops at first non-None result, see :ref:`firstresult`.
881
+
882
+ .. note::
883
+ If the fixture function returns None, other implementations of
884
+ this hook function will continue to be called, according to the
885
+ behavior of the :ref:`firstresult` option.
886
+
887
+ Use in conftest plugins
888
+ =======================
889
+
890
+ Any conftest file can implement this hook. For a given fixture, only
891
+ conftest files in the fixture scope's directory and its parent directories
892
+ are consulted.
893
+ """
894
+
895
+
896
+ def pytest_fixture_post_finalizer(
897
+ fixturedef: FixtureDef[Any], request: SubRequest
898
+ ) -> None:
899
+ """Called after fixture teardown, but before the cache is cleared, so
900
+ the fixture result ``fixturedef.cached_result`` is still available (not
901
+ ``None``).
902
+
903
+ :param fixturedef:
904
+ The fixture definition object.
905
+ :param request:
906
+ The fixture request object.
907
+
908
+ Use in conftest plugins
909
+ =======================
910
+
911
+ Any conftest file can implement this hook. For a given fixture, only
912
+ conftest files in the fixture scope's directory and its parent directories
913
+ are consulted.
914
+ """
915
+
916
+
917
+ # -------------------------------------------------------------------------
918
+ # test session related hooks
919
+ # -------------------------------------------------------------------------
920
+
921
+
922
+ def pytest_sessionstart(session: Session) -> None:
923
+ """Called after the ``Session`` object has been created and before performing collection
924
+ and entering the run test loop.
925
+
926
+ :param session: The pytest session object.
927
+
928
+ Use in conftest plugins
929
+ =======================
930
+
931
+ This hook is only called for :ref:`initial conftests <pluginorder>`.
932
+ """
933
+
934
+
935
+ def pytest_sessionfinish(
936
+ session: Session,
937
+ exitstatus: int | ExitCode,
938
+ ) -> None:
939
+ """Called after whole test run finished, right before returning the exit status to the system.
940
+
941
+ :param session: The pytest session object.
942
+ :param exitstatus: The status which pytest will return to the system.
943
+
944
+ Use in conftest plugins
945
+ =======================
946
+
947
+ Any conftest file can implement this hook.
948
+ """
949
+
950
+
951
+ def pytest_unconfigure(config: Config) -> None:
952
+ """Called before test process is exited.
953
+
954
+ :param config: The pytest config object.
955
+
956
+ Use in conftest plugins
957
+ =======================
958
+
959
+ Any conftest file can implement this hook.
960
+ """
961
+
962
+
963
+ # -------------------------------------------------------------------------
964
+ # hooks for customizing the assert methods
965
+ # -------------------------------------------------------------------------
966
+
967
+
968
+ def pytest_assertrepr_compare(
969
+ config: Config, op: str, left: object, right: object
970
+ ) -> list[str] | None:
971
+ """Return explanation for comparisons in failing assert expressions.
972
+
973
+ Return None for no custom explanation, otherwise return a list
974
+ of strings. The strings will be joined by newlines but any newlines
975
+ *in* a string will be escaped. Note that all but the first line will
976
+ be indented slightly, the intention is for the first line to be a summary.
977
+
978
+ :param config: The pytest config object.
979
+ :param op: The operator, e.g. `"=="`, `"!="`, `"not in"`.
980
+ :param left: The left operand.
981
+ :param right: The right operand.
982
+
983
+ Use in conftest plugins
984
+ =======================
985
+
986
+ Any conftest file can implement this hook. For a given item, only conftest
987
+ files in the item's directory and its parent directories are consulted.
988
+ """
989
+
990
+
991
+ def pytest_assertion_pass(item: Item, lineno: int, orig: str, expl: str) -> None:
992
+ """Called whenever an assertion passes.
993
+
994
+ .. versionadded:: 5.0
995
+
996
+ Use this hook to do some processing after a passing assertion.
997
+ The original assertion information is available in the `orig` string
998
+ and the pytest introspected assertion information is available in the
999
+ `expl` string.
1000
+
1001
+ This hook must be explicitly enabled by the ``enable_assertion_pass_hook``
1002
+ ini-file option:
1003
+
1004
+ .. code-block:: ini
1005
+
1006
+ [pytest]
1007
+ enable_assertion_pass_hook=true
1008
+
1009
+ You need to **clean the .pyc** files in your project directory and interpreter libraries
1010
+ when enabling this option, as assertions will require to be re-written.
1011
+
1012
+ :param item: pytest item object of current test.
1013
+ :param lineno: Line number of the assert statement.
1014
+ :param orig: String with the original assertion.
1015
+ :param expl: String with the assert explanation.
1016
+
1017
+ Use in conftest plugins
1018
+ =======================
1019
+
1020
+ Any conftest file can implement this hook. For a given item, only conftest
1021
+ files in the item's directory and its parent directories are consulted.
1022
+ """
1023
+
1024
+
1025
+ # -------------------------------------------------------------------------
1026
+ # Hooks for influencing reporting (invoked from _pytest_terminal).
1027
+ # -------------------------------------------------------------------------
1028
+
1029
+
1030
+ @hookspec(
1031
+ warn_on_impl_args={
1032
+ "startdir": HOOK_LEGACY_PATH_ARG.format(
1033
+ pylib_path_arg="startdir", pathlib_path_arg="start_path"
1034
+ ),
1035
+ },
1036
+ )
1037
+ def pytest_report_header( # type:ignore[empty-body]
1038
+ config: Config, start_path: Path, startdir: LEGACY_PATH
1039
+ ) -> str | list[str]:
1040
+ """Return a string or list of strings to be displayed as header info for terminal reporting.
1041
+
1042
+ :param config: The pytest config object.
1043
+ :param start_path: The starting dir.
1044
+ :type start_path: pathlib.Path
1045
+ :param startdir: The starting dir (deprecated).
1046
+
1047
+ .. note::
1048
+
1049
+ Lines returned by a plugin are displayed before those of plugins which
1050
+ ran before it.
1051
+ If you want to have your line(s) displayed first, use
1052
+ :ref:`trylast=True <plugin-hookorder>`.
1053
+
1054
+ .. versionchanged:: 7.0.0
1055
+ The ``start_path`` parameter was added as a :class:`pathlib.Path`
1056
+ equivalent of the ``startdir`` parameter. The ``startdir`` parameter
1057
+ has been deprecated.
1058
+
1059
+ Use in conftest plugins
1060
+ =======================
1061
+
1062
+ This hook is only called for :ref:`initial conftests <pluginorder>`.
1063
+ """
1064
+
1065
+
1066
+ @hookspec(
1067
+ warn_on_impl_args={
1068
+ "startdir": HOOK_LEGACY_PATH_ARG.format(
1069
+ pylib_path_arg="startdir", pathlib_path_arg="start_path"
1070
+ ),
1071
+ },
1072
+ )
1073
+ def pytest_report_collectionfinish( # type:ignore[empty-body]
1074
+ config: Config,
1075
+ start_path: Path,
1076
+ startdir: LEGACY_PATH,
1077
+ items: Sequence[Item],
1078
+ ) -> str | list[str]:
1079
+ """Return a string or list of strings to be displayed after collection
1080
+ has finished successfully.
1081
+
1082
+ These strings will be displayed after the standard "collected X items" message.
1083
+
1084
+ .. versionadded:: 3.2
1085
+
1086
+ :param config: The pytest config object.
1087
+ :param start_path: The starting dir.
1088
+ :type start_path: pathlib.Path
1089
+ :param startdir: The starting dir (deprecated).
1090
+ :param items: List of pytest items that are going to be executed; this list should not be modified.
1091
+
1092
+ .. note::
1093
+
1094
+ Lines returned by a plugin are displayed before those of plugins which
1095
+ ran before it.
1096
+ If you want to have your line(s) displayed first, use
1097
+ :ref:`trylast=True <plugin-hookorder>`.
1098
+
1099
+ .. versionchanged:: 7.0.0
1100
+ The ``start_path`` parameter was added as a :class:`pathlib.Path`
1101
+ equivalent of the ``startdir`` parameter. The ``startdir`` parameter
1102
+ has been deprecated.
1103
+
1104
+ Use in conftest plugins
1105
+ =======================
1106
+
1107
+ Any conftest plugin can implement this hook.
1108
+ """
1109
+
1110
+
1111
+ @hookspec(firstresult=True)
1112
+ def pytest_report_teststatus( # type:ignore[empty-body]
1113
+ report: CollectReport | TestReport, config: Config
1114
+ ) -> TestShortLogReport | tuple[str, str, str | tuple[str, Mapping[str, bool]]]:
1115
+ """Return result-category, shortletter and verbose word for status
1116
+ reporting.
1117
+
1118
+ The result-category is a category in which to count the result, for
1119
+ example "passed", "skipped", "error" or the empty string.
1120
+
1121
+ The shortletter is shown as testing progresses, for example ".", "s",
1122
+ "E" or the empty string.
1123
+
1124
+ The verbose word is shown as testing progresses in verbose mode, for
1125
+ example "PASSED", "SKIPPED", "ERROR" or the empty string.
1126
+
1127
+ pytest may style these implicitly according to the report outcome.
1128
+ To provide explicit styling, return a tuple for the verbose word,
1129
+ for example ``"rerun", "R", ("RERUN", {"yellow": True})``.
1130
+
1131
+ :param report: The report object whose status is to be returned.
1132
+ :param config: The pytest config object.
1133
+ :returns: The test status.
1134
+
1135
+ Stops at first non-None result, see :ref:`firstresult`.
1136
+
1137
+ Use in conftest plugins
1138
+ =======================
1139
+
1140
+ Any conftest plugin can implement this hook.
1141
+ """
1142
+
1143
+
1144
+ def pytest_terminal_summary(
1145
+ terminalreporter: TerminalReporter,
1146
+ exitstatus: ExitCode,
1147
+ config: Config,
1148
+ ) -> None:
1149
+ """Add a section to terminal summary reporting.
1150
+
1151
+ :param terminalreporter: The internal terminal reporter object.
1152
+ :param exitstatus: The exit status that will be reported back to the OS.
1153
+ :param config: The pytest config object.
1154
+
1155
+ .. versionadded:: 4.2
1156
+ The ``config`` parameter.
1157
+
1158
+ Use in conftest plugins
1159
+ =======================
1160
+
1161
+ Any conftest plugin can implement this hook.
1162
+ """
1163
+
1164
+
1165
+ @hookspec(historic=True)
1166
+ def pytest_warning_recorded(
1167
+ warning_message: warnings.WarningMessage,
1168
+ when: Literal["config", "collect", "runtest"],
1169
+ nodeid: str,
1170
+ location: tuple[str, int, str] | None,
1171
+ ) -> None:
1172
+ """Process a warning captured by the internal pytest warnings plugin.
1173
+
1174
+ :param warning_message:
1175
+ The captured warning. This is the same object produced by :class:`warnings.catch_warnings`,
1176
+ and contains the same attributes as the parameters of :py:func:`warnings.showwarning`.
1177
+
1178
+ :param when:
1179
+ Indicates when the warning was captured. Possible values:
1180
+
1181
+ * ``"config"``: during pytest configuration/initialization stage.
1182
+ * ``"collect"``: during test collection.
1183
+ * ``"runtest"``: during test execution.
1184
+
1185
+ :param nodeid:
1186
+ Full id of the item. Empty string for warnings that are not specific to
1187
+ a particular node.
1188
+
1189
+ :param location:
1190
+ When available, holds information about the execution context of the captured
1191
+ warning (filename, linenumber, function). ``function`` evaluates to <module>
1192
+ when the execution context is at the module level.
1193
+
1194
+ .. versionadded:: 6.0
1195
+
1196
+ Use in conftest plugins
1197
+ =======================
1198
+
1199
+ Any conftest file can implement this hook. If the warning is specific to a
1200
+ particular node, only conftest files in parent directories of the node are
1201
+ consulted.
1202
+ """
1203
+
1204
+
1205
+ # -------------------------------------------------------------------------
1206
+ # Hooks for influencing skipping
1207
+ # -------------------------------------------------------------------------
1208
+
1209
+
1210
+ def pytest_markeval_namespace( # type:ignore[empty-body]
1211
+ config: Config,
1212
+ ) -> dict[str, Any]:
1213
+ """Called when constructing the globals dictionary used for
1214
+ evaluating string conditions in xfail/skipif markers.
1215
+
1216
+ This is useful when the condition for a marker requires
1217
+ objects that are expensive or impossible to obtain during
1218
+ collection time, which is required by normal boolean
1219
+ conditions.
1220
+
1221
+ .. versionadded:: 6.2
1222
+
1223
+ :param config: The pytest config object.
1224
+ :returns: A dictionary of additional globals to add.
1225
+
1226
+ Use in conftest plugins
1227
+ =======================
1228
+
1229
+ Any conftest file can implement this hook. For a given item, only conftest
1230
+ files in parent directories of the item are consulted.
1231
+ """
1232
+
1233
+
1234
+ # -------------------------------------------------------------------------
1235
+ # error handling and internal debugging hooks
1236
+ # -------------------------------------------------------------------------
1237
+
1238
+
1239
+ def pytest_internalerror(
1240
+ excrepr: ExceptionRepr,
1241
+ excinfo: ExceptionInfo[BaseException],
1242
+ ) -> bool | None:
1243
+ """Called for internal errors.
1244
+
1245
+ Return True to suppress the fallback handling of printing an
1246
+ INTERNALERROR message directly to sys.stderr.
1247
+
1248
+ :param excrepr: The exception repr object.
1249
+ :param excinfo: The exception info.
1250
+
1251
+ Use in conftest plugins
1252
+ =======================
1253
+
1254
+ Any conftest plugin can implement this hook.
1255
+ """
1256
+
1257
+
1258
+ def pytest_keyboard_interrupt(
1259
+ excinfo: ExceptionInfo[KeyboardInterrupt | Exit],
1260
+ ) -> None:
1261
+ """Called for keyboard interrupt.
1262
+
1263
+ :param excinfo: The exception info.
1264
+
1265
+ Use in conftest plugins
1266
+ =======================
1267
+
1268
+ Any conftest plugin can implement this hook.
1269
+ """
1270
+
1271
+
1272
+ def pytest_exception_interact(
1273
+ node: Item | Collector,
1274
+ call: CallInfo[Any],
1275
+ report: CollectReport | TestReport,
1276
+ ) -> None:
1277
+ """Called when an exception was raised which can potentially be
1278
+ interactively handled.
1279
+
1280
+ May be called during collection (see :hook:`pytest_make_collect_report`),
1281
+ in which case ``report`` is a :class:`~pytest.CollectReport`.
1282
+
1283
+ May be called during runtest of an item (see :hook:`pytest_runtest_protocol`),
1284
+ in which case ``report`` is a :class:`~pytest.TestReport`.
1285
+
1286
+ This hook is not called if the exception that was raised is an internal
1287
+ exception like ``skip.Exception``.
1288
+
1289
+ :param node:
1290
+ The item or collector.
1291
+ :param call:
1292
+ The call information. Contains the exception.
1293
+ :param report:
1294
+ The collection or test report.
1295
+
1296
+ Use in conftest plugins
1297
+ =======================
1298
+
1299
+ Any conftest file can implement this hook. For a given node, only conftest
1300
+ files in parent directories of the node are consulted.
1301
+ """
1302
+
1303
+
1304
+ def pytest_enter_pdb(config: Config, pdb: pdb.Pdb) -> None:
1305
+ """Called upon pdb.set_trace().
1306
+
1307
+ Can be used by plugins to take special action just before the python
1308
+ debugger enters interactive mode.
1309
+
1310
+ :param config: The pytest config object.
1311
+ :param pdb: The Pdb instance.
1312
+
1313
+ Use in conftest plugins
1314
+ =======================
1315
+
1316
+ Any conftest plugin can implement this hook.
1317
+ """
1318
+
1319
+
1320
+ def pytest_leave_pdb(config: Config, pdb: pdb.Pdb) -> None:
1321
+ """Called when leaving pdb (e.g. with continue after pdb.set_trace()).
1322
+
1323
+ Can be used by plugins to take special action just after the python
1324
+ debugger leaves interactive mode.
1325
+
1326
+ :param config: The pytest config object.
1327
+ :param pdb: The Pdb instance.
1328
+
1329
+ Use in conftest plugins
1330
+ =======================
1331
+
1332
+ Any conftest plugin can implement this hook.
1333
+ """
.venv/lib/python3.11/site-packages/_pytest/legacypath.py ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Add backward compatibility support for the legacy py path type."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from pathlib import Path
8
+ import shlex
9
+ import subprocess
10
+ from typing import Final
11
+ from typing import final
12
+ from typing import TYPE_CHECKING
13
+
14
+ from iniconfig import SectionWrapper
15
+
16
+ from _pytest.cacheprovider import Cache
17
+ from _pytest.compat import LEGACY_PATH
18
+ from _pytest.compat import legacy_path
19
+ from _pytest.config import Config
20
+ from _pytest.config import hookimpl
21
+ from _pytest.config import PytestPluginManager
22
+ from _pytest.deprecated import check_ispytest
23
+ from _pytest.fixtures import fixture
24
+ from _pytest.fixtures import FixtureRequest
25
+ from _pytest.main import Session
26
+ from _pytest.monkeypatch import MonkeyPatch
27
+ from _pytest.nodes import Collector
28
+ from _pytest.nodes import Item
29
+ from _pytest.nodes import Node
30
+ from _pytest.pytester import HookRecorder
31
+ from _pytest.pytester import Pytester
32
+ from _pytest.pytester import RunResult
33
+ from _pytest.terminal import TerminalReporter
34
+ from _pytest.tmpdir import TempPathFactory
35
+
36
+
37
+ if TYPE_CHECKING:
38
+ import pexpect
39
+
40
+
41
+ @final
42
+ class Testdir:
43
+ """
44
+ Similar to :class:`Pytester`, but this class works with legacy legacy_path objects instead.
45
+
46
+ All methods just forward to an internal :class:`Pytester` instance, converting results
47
+ to `legacy_path` objects as necessary.
48
+ """
49
+
50
+ __test__ = False
51
+
52
+ CLOSE_STDIN: Final = Pytester.CLOSE_STDIN
53
+ TimeoutExpired: Final = Pytester.TimeoutExpired
54
+
55
+ def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None:
56
+ check_ispytest(_ispytest)
57
+ self._pytester = pytester
58
+
59
+ @property
60
+ def tmpdir(self) -> LEGACY_PATH:
61
+ """Temporary directory where tests are executed."""
62
+ return legacy_path(self._pytester.path)
63
+
64
+ @property
65
+ def test_tmproot(self) -> LEGACY_PATH:
66
+ return legacy_path(self._pytester._test_tmproot)
67
+
68
+ @property
69
+ def request(self):
70
+ return self._pytester._request
71
+
72
+ @property
73
+ def plugins(self):
74
+ return self._pytester.plugins
75
+
76
+ @plugins.setter
77
+ def plugins(self, plugins):
78
+ self._pytester.plugins = plugins
79
+
80
+ @property
81
+ def monkeypatch(self) -> MonkeyPatch:
82
+ return self._pytester._monkeypatch
83
+
84
+ def make_hook_recorder(self, pluginmanager) -> HookRecorder:
85
+ """See :meth:`Pytester.make_hook_recorder`."""
86
+ return self._pytester.make_hook_recorder(pluginmanager)
87
+
88
+ def chdir(self) -> None:
89
+ """See :meth:`Pytester.chdir`."""
90
+ return self._pytester.chdir()
91
+
92
+ def finalize(self) -> None:
93
+ return self._pytester._finalize()
94
+
95
+ def makefile(self, ext, *args, **kwargs) -> LEGACY_PATH:
96
+ """See :meth:`Pytester.makefile`."""
97
+ if ext and not ext.startswith("."):
98
+ # pytester.makefile is going to throw a ValueError in a way that
99
+ # testdir.makefile did not, because
100
+ # pathlib.Path is stricter suffixes than py.path
101
+ # This ext arguments is likely user error, but since testdir has
102
+ # allowed this, we will prepend "." as a workaround to avoid breaking
103
+ # testdir usage that worked before
104
+ ext = "." + ext
105
+ return legacy_path(self._pytester.makefile(ext, *args, **kwargs))
106
+
107
+ def makeconftest(self, source) -> LEGACY_PATH:
108
+ """See :meth:`Pytester.makeconftest`."""
109
+ return legacy_path(self._pytester.makeconftest(source))
110
+
111
+ def makeini(self, source) -> LEGACY_PATH:
112
+ """See :meth:`Pytester.makeini`."""
113
+ return legacy_path(self._pytester.makeini(source))
114
+
115
+ def getinicfg(self, source: str) -> SectionWrapper:
116
+ """See :meth:`Pytester.getinicfg`."""
117
+ return self._pytester.getinicfg(source)
118
+
119
+ def makepyprojecttoml(self, source) -> LEGACY_PATH:
120
+ """See :meth:`Pytester.makepyprojecttoml`."""
121
+ return legacy_path(self._pytester.makepyprojecttoml(source))
122
+
123
+ def makepyfile(self, *args, **kwargs) -> LEGACY_PATH:
124
+ """See :meth:`Pytester.makepyfile`."""
125
+ return legacy_path(self._pytester.makepyfile(*args, **kwargs))
126
+
127
+ def maketxtfile(self, *args, **kwargs) -> LEGACY_PATH:
128
+ """See :meth:`Pytester.maketxtfile`."""
129
+ return legacy_path(self._pytester.maketxtfile(*args, **kwargs))
130
+
131
+ def syspathinsert(self, path=None) -> None:
132
+ """See :meth:`Pytester.syspathinsert`."""
133
+ return self._pytester.syspathinsert(path)
134
+
135
+ def mkdir(self, name) -> LEGACY_PATH:
136
+ """See :meth:`Pytester.mkdir`."""
137
+ return legacy_path(self._pytester.mkdir(name))
138
+
139
+ def mkpydir(self, name) -> LEGACY_PATH:
140
+ """See :meth:`Pytester.mkpydir`."""
141
+ return legacy_path(self._pytester.mkpydir(name))
142
+
143
+ def copy_example(self, name=None) -> LEGACY_PATH:
144
+ """See :meth:`Pytester.copy_example`."""
145
+ return legacy_path(self._pytester.copy_example(name))
146
+
147
+ def getnode(self, config: Config, arg) -> Item | Collector | None:
148
+ """See :meth:`Pytester.getnode`."""
149
+ return self._pytester.getnode(config, arg)
150
+
151
+ def getpathnode(self, path):
152
+ """See :meth:`Pytester.getpathnode`."""
153
+ return self._pytester.getpathnode(path)
154
+
155
+ def genitems(self, colitems: list[Item | Collector]) -> list[Item]:
156
+ """See :meth:`Pytester.genitems`."""
157
+ return self._pytester.genitems(colitems)
158
+
159
+ def runitem(self, source):
160
+ """See :meth:`Pytester.runitem`."""
161
+ return self._pytester.runitem(source)
162
+
163
+ def inline_runsource(self, source, *cmdlineargs):
164
+ """See :meth:`Pytester.inline_runsource`."""
165
+ return self._pytester.inline_runsource(source, *cmdlineargs)
166
+
167
+ def inline_genitems(self, *args):
168
+ """See :meth:`Pytester.inline_genitems`."""
169
+ return self._pytester.inline_genitems(*args)
170
+
171
+ def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False):
172
+ """See :meth:`Pytester.inline_run`."""
173
+ return self._pytester.inline_run(
174
+ *args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc
175
+ )
176
+
177
+ def runpytest_inprocess(self, *args, **kwargs) -> RunResult:
178
+ """See :meth:`Pytester.runpytest_inprocess`."""
179
+ return self._pytester.runpytest_inprocess(*args, **kwargs)
180
+
181
+ def runpytest(self, *args, **kwargs) -> RunResult:
182
+ """See :meth:`Pytester.runpytest`."""
183
+ return self._pytester.runpytest(*args, **kwargs)
184
+
185
+ def parseconfig(self, *args) -> Config:
186
+ """See :meth:`Pytester.parseconfig`."""
187
+ return self._pytester.parseconfig(*args)
188
+
189
+ def parseconfigure(self, *args) -> Config:
190
+ """See :meth:`Pytester.parseconfigure`."""
191
+ return self._pytester.parseconfigure(*args)
192
+
193
+ def getitem(self, source, funcname="test_func"):
194
+ """See :meth:`Pytester.getitem`."""
195
+ return self._pytester.getitem(source, funcname)
196
+
197
+ def getitems(self, source):
198
+ """See :meth:`Pytester.getitems`."""
199
+ return self._pytester.getitems(source)
200
+
201
+ def getmodulecol(self, source, configargs=(), withinit=False):
202
+ """See :meth:`Pytester.getmodulecol`."""
203
+ return self._pytester.getmodulecol(
204
+ source, configargs=configargs, withinit=withinit
205
+ )
206
+
207
+ def collect_by_name(self, modcol: Collector, name: str) -> Item | Collector | None:
208
+ """See :meth:`Pytester.collect_by_name`."""
209
+ return self._pytester.collect_by_name(modcol, name)
210
+
211
+ def popen(
212
+ self,
213
+ cmdargs,
214
+ stdout=subprocess.PIPE,
215
+ stderr=subprocess.PIPE,
216
+ stdin=CLOSE_STDIN,
217
+ **kw,
218
+ ):
219
+ """See :meth:`Pytester.popen`."""
220
+ return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw)
221
+
222
+ def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult:
223
+ """See :meth:`Pytester.run`."""
224
+ return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin)
225
+
226
+ def runpython(self, script) -> RunResult:
227
+ """See :meth:`Pytester.runpython`."""
228
+ return self._pytester.runpython(script)
229
+
230
+ def runpython_c(self, command):
231
+ """See :meth:`Pytester.runpython_c`."""
232
+ return self._pytester.runpython_c(command)
233
+
234
+ def runpytest_subprocess(self, *args, timeout=None) -> RunResult:
235
+ """See :meth:`Pytester.runpytest_subprocess`."""
236
+ return self._pytester.runpytest_subprocess(*args, timeout=timeout)
237
+
238
+ def spawn_pytest(self, string: str, expect_timeout: float = 10.0) -> pexpect.spawn:
239
+ """See :meth:`Pytester.spawn_pytest`."""
240
+ return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout)
241
+
242
+ def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn:
243
+ """See :meth:`Pytester.spawn`."""
244
+ return self._pytester.spawn(cmd, expect_timeout=expect_timeout)
245
+
246
+ def __repr__(self) -> str:
247
+ return f"<Testdir {self.tmpdir!r}>"
248
+
249
+ def __str__(self) -> str:
250
+ return str(self.tmpdir)
251
+
252
+
253
+ class LegacyTestdirPlugin:
254
+ @staticmethod
255
+ @fixture
256
+ def testdir(pytester: Pytester) -> Testdir:
257
+ """
258
+ Identical to :fixture:`pytester`, and provides an instance whose methods return
259
+ legacy ``LEGACY_PATH`` objects instead when applicable.
260
+
261
+ New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`.
262
+ """
263
+ return Testdir(pytester, _ispytest=True)
264
+
265
+
266
+ @final
267
+ @dataclasses.dataclass
268
+ class TempdirFactory:
269
+ """Backward compatibility wrapper that implements ``py.path.local``
270
+ for :class:`TempPathFactory`.
271
+
272
+ .. note::
273
+ These days, it is preferred to use ``tmp_path_factory``.
274
+
275
+ :ref:`About the tmpdir and tmpdir_factory fixtures<tmpdir and tmpdir_factory>`.
276
+
277
+ """
278
+
279
+ _tmppath_factory: TempPathFactory
280
+
281
+ def __init__(
282
+ self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False
283
+ ) -> None:
284
+ check_ispytest(_ispytest)
285
+ self._tmppath_factory = tmppath_factory
286
+
287
+ def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH:
288
+ """Same as :meth:`TempPathFactory.mktemp`, but returns a ``py.path.local`` object."""
289
+ return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve())
290
+
291
+ def getbasetemp(self) -> LEGACY_PATH:
292
+ """Same as :meth:`TempPathFactory.getbasetemp`, but returns a ``py.path.local`` object."""
293
+ return legacy_path(self._tmppath_factory.getbasetemp().resolve())
294
+
295
+
296
+ class LegacyTmpdirPlugin:
297
+ @staticmethod
298
+ @fixture(scope="session")
299
+ def tmpdir_factory(request: FixtureRequest) -> TempdirFactory:
300
+ """Return a :class:`pytest.TempdirFactory` instance for the test session."""
301
+ # Set dynamically by pytest_configure().
302
+ return request.config._tmpdirhandler # type: ignore
303
+
304
+ @staticmethod
305
+ @fixture
306
+ def tmpdir(tmp_path: Path) -> LEGACY_PATH:
307
+ """Return a temporary directory (as `legacy_path`_ object)
308
+ which is unique to each test function invocation.
309
+ The temporary directory is created as a subdirectory
310
+ of the base temporary directory, with configurable retention,
311
+ as discussed in :ref:`temporary directory location and retention`.
312
+
313
+ .. note::
314
+ These days, it is preferred to use ``tmp_path``.
315
+
316
+ :ref:`About the tmpdir and tmpdir_factory fixtures<tmpdir and tmpdir_factory>`.
317
+
318
+ .. _legacy_path: https://py.readthedocs.io/en/latest/path.html
319
+ """
320
+ return legacy_path(tmp_path)
321
+
322
+
323
+ def Cache_makedir(self: Cache, name: str) -> LEGACY_PATH:
324
+ """Return a directory path object with the given name.
325
+
326
+ Same as :func:`mkdir`, but returns a legacy py path instance.
327
+ """
328
+ return legacy_path(self.mkdir(name))
329
+
330
+
331
+ def FixtureRequest_fspath(self: FixtureRequest) -> LEGACY_PATH:
332
+ """(deprecated) The file system path of the test module which collected this test."""
333
+ return legacy_path(self.path)
334
+
335
+
336
+ def TerminalReporter_startdir(self: TerminalReporter) -> LEGACY_PATH:
337
+ """The directory from which pytest was invoked.
338
+
339
+ Prefer to use ``startpath`` which is a :class:`pathlib.Path`.
340
+
341
+ :type: LEGACY_PATH
342
+ """
343
+ return legacy_path(self.startpath)
344
+
345
+
346
+ def Config_invocation_dir(self: Config) -> LEGACY_PATH:
347
+ """The directory from which pytest was invoked.
348
+
349
+ Prefer to use :attr:`invocation_params.dir <InvocationParams.dir>`,
350
+ which is a :class:`pathlib.Path`.
351
+
352
+ :type: LEGACY_PATH
353
+ """
354
+ return legacy_path(str(self.invocation_params.dir))
355
+
356
+
357
+ def Config_rootdir(self: Config) -> LEGACY_PATH:
358
+ """The path to the :ref:`rootdir <rootdir>`.
359
+
360
+ Prefer to use :attr:`rootpath`, which is a :class:`pathlib.Path`.
361
+
362
+ :type: LEGACY_PATH
363
+ """
364
+ return legacy_path(str(self.rootpath))
365
+
366
+
367
+ def Config_inifile(self: Config) -> LEGACY_PATH | None:
368
+ """The path to the :ref:`configfile <configfiles>`.
369
+
370
+ Prefer to use :attr:`inipath`, which is a :class:`pathlib.Path`.
371
+
372
+ :type: Optional[LEGACY_PATH]
373
+ """
374
+ return legacy_path(str(self.inipath)) if self.inipath else None
375
+
376
+
377
+ def Session_startdir(self: Session) -> LEGACY_PATH:
378
+ """The path from which pytest was invoked.
379
+
380
+ Prefer to use ``startpath`` which is a :class:`pathlib.Path`.
381
+
382
+ :type: LEGACY_PATH
383
+ """
384
+ return legacy_path(self.startpath)
385
+
386
+
387
+ def Config__getini_unknown_type(self, name: str, type: str, value: str | list[str]):
388
+ if type == "pathlist":
389
+ # TODO: This assert is probably not valid in all cases.
390
+ assert self.inipath is not None
391
+ dp = self.inipath.parent
392
+ input_values = shlex.split(value) if isinstance(value, str) else value
393
+ return [legacy_path(str(dp / x)) for x in input_values]
394
+ else:
395
+ raise ValueError(f"unknown configuration type: {type}", value)
396
+
397
+
398
+ def Node_fspath(self: Node) -> LEGACY_PATH:
399
+ """(deprecated) returns a legacy_path copy of self.path"""
400
+ return legacy_path(self.path)
401
+
402
+
403
+ def Node_fspath_set(self: Node, value: LEGACY_PATH) -> None:
404
+ self.path = Path(value)
405
+
406
+
407
+ @hookimpl(tryfirst=True)
408
+ def pytest_load_initial_conftests(early_config: Config) -> None:
409
+ """Monkeypatch legacy path attributes in several classes, as early as possible."""
410
+ mp = MonkeyPatch()
411
+ early_config.add_cleanup(mp.undo)
412
+
413
+ # Add Cache.makedir().
414
+ mp.setattr(Cache, "makedir", Cache_makedir, raising=False)
415
+
416
+ # Add FixtureRequest.fspath property.
417
+ mp.setattr(FixtureRequest, "fspath", property(FixtureRequest_fspath), raising=False)
418
+
419
+ # Add TerminalReporter.startdir property.
420
+ mp.setattr(
421
+ TerminalReporter, "startdir", property(TerminalReporter_startdir), raising=False
422
+ )
423
+
424
+ # Add Config.{invocation_dir,rootdir,inifile} properties.
425
+ mp.setattr(Config, "invocation_dir", property(Config_invocation_dir), raising=False)
426
+ mp.setattr(Config, "rootdir", property(Config_rootdir), raising=False)
427
+ mp.setattr(Config, "inifile", property(Config_inifile), raising=False)
428
+
429
+ # Add Session.startdir property.
430
+ mp.setattr(Session, "startdir", property(Session_startdir), raising=False)
431
+
432
+ # Add pathlist configuration type.
433
+ mp.setattr(Config, "_getini_unknown_type", Config__getini_unknown_type)
434
+
435
+ # Add Node.fspath property.
436
+ mp.setattr(Node, "fspath", property(Node_fspath, Node_fspath_set), raising=False)
437
+
438
+
439
+ @hookimpl
440
+ def pytest_configure(config: Config) -> None:
441
+ """Installs the LegacyTmpdirPlugin if the ``tmpdir`` plugin is also installed."""
442
+ if config.pluginmanager.has_plugin("tmpdir"):
443
+ mp = MonkeyPatch()
444
+ config.add_cleanup(mp.undo)
445
+ # Create TmpdirFactory and attach it to the config object.
446
+ #
447
+ # This is to comply with existing plugins which expect the handler to be
448
+ # available at pytest_configure time, but ideally should be moved entirely
449
+ # to the tmpdir_factory session fixture.
450
+ try:
451
+ tmp_path_factory = config._tmp_path_factory # type: ignore[attr-defined]
452
+ except AttributeError:
453
+ # tmpdir plugin is blocked.
454
+ pass
455
+ else:
456
+ _tmpdirhandler = TempdirFactory(tmp_path_factory, _ispytest=True)
457
+ mp.setattr(config, "_tmpdirhandler", _tmpdirhandler, raising=False)
458
+
459
+ config.pluginmanager.register(LegacyTmpdirPlugin, "legacypath-tmpdir")
460
+
461
+
462
+ @hookimpl
463
+ def pytest_plugin_registered(plugin: object, manager: PytestPluginManager) -> None:
464
+ # pytester is not loaded by default and is commonly loaded from a conftest,
465
+ # so checking for it in `pytest_configure` is not enough.
466
+ is_pytester = plugin is manager.get_plugin("pytester")
467
+ if is_pytester and not manager.is_registered(LegacyTestdirPlugin):
468
+ manager.register(LegacyTestdirPlugin, "legacypath-pytester")
.venv/lib/python3.11/site-packages/_pytest/logging.py ADDED
@@ -0,0 +1,955 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Access and control log capturing."""
3
+
4
+ from __future__ import annotations
5
+
6
+ from contextlib import contextmanager
7
+ from contextlib import nullcontext
8
+ from datetime import datetime
9
+ from datetime import timedelta
10
+ from datetime import timezone
11
+ import io
12
+ from io import StringIO
13
+ import logging
14
+ from logging import LogRecord
15
+ import os
16
+ from pathlib import Path
17
+ import re
18
+ from types import TracebackType
19
+ from typing import AbstractSet
20
+ from typing import Dict
21
+ from typing import final
22
+ from typing import Generator
23
+ from typing import Generic
24
+ from typing import List
25
+ from typing import Literal
26
+ from typing import Mapping
27
+ from typing import TYPE_CHECKING
28
+ from typing import TypeVar
29
+
30
+ from _pytest import nodes
31
+ from _pytest._io import TerminalWriter
32
+ from _pytest.capture import CaptureManager
33
+ from _pytest.config import _strtobool
34
+ from _pytest.config import Config
35
+ from _pytest.config import create_terminal_writer
36
+ from _pytest.config import hookimpl
37
+ from _pytest.config import UsageError
38
+ from _pytest.config.argparsing import Parser
39
+ from _pytest.deprecated import check_ispytest
40
+ from _pytest.fixtures import fixture
41
+ from _pytest.fixtures import FixtureRequest
42
+ from _pytest.main import Session
43
+ from _pytest.stash import StashKey
44
+ from _pytest.terminal import TerminalReporter
45
+
46
+
47
+ if TYPE_CHECKING:
48
+ logging_StreamHandler = logging.StreamHandler[StringIO]
49
+ else:
50
+ logging_StreamHandler = logging.StreamHandler
51
+
52
+ DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s"
53
+ DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S"
54
+ _ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m")
55
+ caplog_handler_key = StashKey["LogCaptureHandler"]()
56
+ caplog_records_key = StashKey[Dict[str, List[logging.LogRecord]]]()
57
+
58
+
59
+ def _remove_ansi_escape_sequences(text: str) -> str:
60
+ return _ANSI_ESCAPE_SEQ.sub("", text)
61
+
62
+
63
+ class DatetimeFormatter(logging.Formatter):
64
+ """A logging formatter which formats record with
65
+ :func:`datetime.datetime.strftime` formatter instead of
66
+ :func:`time.strftime` in case of microseconds in format string.
67
+ """
68
+
69
+ def formatTime(self, record: LogRecord, datefmt: str | None = None) -> str:
70
+ if datefmt and "%f" in datefmt:
71
+ ct = self.converter(record.created)
72
+ tz = timezone(timedelta(seconds=ct.tm_gmtoff), ct.tm_zone)
73
+ # Construct `datetime.datetime` object from `struct_time`
74
+ # and msecs information from `record`
75
+ # Using int() instead of round() to avoid it exceeding 1_000_000 and causing a ValueError (#11861).
76
+ dt = datetime(*ct[0:6], microsecond=int(record.msecs * 1000), tzinfo=tz)
77
+ return dt.strftime(datefmt)
78
+ # Use `logging.Formatter` for non-microsecond formats
79
+ return super().formatTime(record, datefmt)
80
+
81
+
82
+ class ColoredLevelFormatter(DatetimeFormatter):
83
+ """A logging formatter which colorizes the %(levelname)..s part of the
84
+ log format passed to __init__."""
85
+
86
+ LOGLEVEL_COLOROPTS: Mapping[int, AbstractSet[str]] = {
87
+ logging.CRITICAL: {"red"},
88
+ logging.ERROR: {"red", "bold"},
89
+ logging.WARNING: {"yellow"},
90
+ logging.WARN: {"yellow"},
91
+ logging.INFO: {"green"},
92
+ logging.DEBUG: {"purple"},
93
+ logging.NOTSET: set(),
94
+ }
95
+ LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*(?:\.\d+)?s)")
96
+
97
+ def __init__(self, terminalwriter: TerminalWriter, *args, **kwargs) -> None:
98
+ super().__init__(*args, **kwargs)
99
+ self._terminalwriter = terminalwriter
100
+ self._original_fmt = self._style._fmt
101
+ self._level_to_fmt_mapping: dict[int, str] = {}
102
+
103
+ for level, color_opts in self.LOGLEVEL_COLOROPTS.items():
104
+ self.add_color_level(level, *color_opts)
105
+
106
+ def add_color_level(self, level: int, *color_opts: str) -> None:
107
+ """Add or update color opts for a log level.
108
+
109
+ :param level:
110
+ Log level to apply a style to, e.g. ``logging.INFO``.
111
+ :param color_opts:
112
+ ANSI escape sequence color options. Capitalized colors indicates
113
+ background color, i.e. ``'green', 'Yellow', 'bold'`` will give bold
114
+ green text on yellow background.
115
+
116
+ .. warning::
117
+ This is an experimental API.
118
+ """
119
+ assert self._fmt is not None
120
+ levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt)
121
+ if not levelname_fmt_match:
122
+ return
123
+ levelname_fmt = levelname_fmt_match.group()
124
+
125
+ formatted_levelname = levelname_fmt % {"levelname": logging.getLevelName(level)}
126
+
127
+ # add ANSI escape sequences around the formatted levelname
128
+ color_kwargs = {name: True for name in color_opts}
129
+ colorized_formatted_levelname = self._terminalwriter.markup(
130
+ formatted_levelname, **color_kwargs
131
+ )
132
+ self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub(
133
+ colorized_formatted_levelname, self._fmt
134
+ )
135
+
136
+ def format(self, record: logging.LogRecord) -> str:
137
+ fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt)
138
+ self._style._fmt = fmt
139
+ return super().format(record)
140
+
141
+
142
+ class PercentStyleMultiline(logging.PercentStyle):
143
+ """A logging style with special support for multiline messages.
144
+
145
+ If the message of a record consists of multiple lines, this style
146
+ formats the message as if each line were logged separately.
147
+ """
148
+
149
+ def __init__(self, fmt: str, auto_indent: int | str | bool | None) -> None:
150
+ super().__init__(fmt)
151
+ self._auto_indent = self._get_auto_indent(auto_indent)
152
+
153
+ @staticmethod
154
+ def _get_auto_indent(auto_indent_option: int | str | bool | None) -> int:
155
+ """Determine the current auto indentation setting.
156
+
157
+ Specify auto indent behavior (on/off/fixed) by passing in
158
+ extra={"auto_indent": [value]} to the call to logging.log() or
159
+ using a --log-auto-indent [value] command line or the
160
+ log_auto_indent [value] config option.
161
+
162
+ Default behavior is auto-indent off.
163
+
164
+ Using the string "True" or "on" or the boolean True as the value
165
+ turns auto indent on, using the string "False" or "off" or the
166
+ boolean False or the int 0 turns it off, and specifying a
167
+ positive integer fixes the indentation position to the value
168
+ specified.
169
+
170
+ Any other values for the option are invalid, and will silently be
171
+ converted to the default.
172
+
173
+ :param None|bool|int|str auto_indent_option:
174
+ User specified option for indentation from command line, config
175
+ or extra kwarg. Accepts int, bool or str. str option accepts the
176
+ same range of values as boolean config options, as well as
177
+ positive integers represented in str form.
178
+
179
+ :returns:
180
+ Indentation value, which can be
181
+ -1 (automatically determine indentation) or
182
+ 0 (auto-indent turned off) or
183
+ >0 (explicitly set indentation position).
184
+ """
185
+ if auto_indent_option is None:
186
+ return 0
187
+ elif isinstance(auto_indent_option, bool):
188
+ if auto_indent_option:
189
+ return -1
190
+ else:
191
+ return 0
192
+ elif isinstance(auto_indent_option, int):
193
+ return int(auto_indent_option)
194
+ elif isinstance(auto_indent_option, str):
195
+ try:
196
+ return int(auto_indent_option)
197
+ except ValueError:
198
+ pass
199
+ try:
200
+ if _strtobool(auto_indent_option):
201
+ return -1
202
+ except ValueError:
203
+ return 0
204
+
205
+ return 0
206
+
207
+ def format(self, record: logging.LogRecord) -> str:
208
+ if "\n" in record.message:
209
+ if hasattr(record, "auto_indent"):
210
+ # Passed in from the "extra={}" kwarg on the call to logging.log().
211
+ auto_indent = self._get_auto_indent(record.auto_indent)
212
+ else:
213
+ auto_indent = self._auto_indent
214
+
215
+ if auto_indent:
216
+ lines = record.message.splitlines()
217
+ formatted = self._fmt % {**record.__dict__, "message": lines[0]}
218
+
219
+ if auto_indent < 0:
220
+ indentation = _remove_ansi_escape_sequences(formatted).find(
221
+ lines[0]
222
+ )
223
+ else:
224
+ # Optimizes logging by allowing a fixed indentation.
225
+ indentation = auto_indent
226
+ lines[0] = formatted
227
+ return ("\n" + " " * indentation).join(lines)
228
+ return self._fmt % record.__dict__
229
+
230
+
231
+ def get_option_ini(config: Config, *names: str):
232
+ for name in names:
233
+ ret = config.getoption(name) # 'default' arg won't work as expected
234
+ if ret is None:
235
+ ret = config.getini(name)
236
+ if ret:
237
+ return ret
238
+
239
+
240
+ def pytest_addoption(parser: Parser) -> None:
241
+ """Add options to control log capturing."""
242
+ group = parser.getgroup("logging")
243
+
244
+ def add_option_ini(option, dest, default=None, type=None, **kwargs):
245
+ parser.addini(
246
+ dest, default=default, type=type, help="Default value for " + option
247
+ )
248
+ group.addoption(option, dest=dest, **kwargs)
249
+
250
+ add_option_ini(
251
+ "--log-level",
252
+ dest="log_level",
253
+ default=None,
254
+ metavar="LEVEL",
255
+ help=(
256
+ "Level of messages to catch/display."
257
+ " Not set by default, so it depends on the root/parent log handler's"
258
+ ' effective level, where it is "WARNING" by default.'
259
+ ),
260
+ )
261
+ add_option_ini(
262
+ "--log-format",
263
+ dest="log_format",
264
+ default=DEFAULT_LOG_FORMAT,
265
+ help="Log format used by the logging module",
266
+ )
267
+ add_option_ini(
268
+ "--log-date-format",
269
+ dest="log_date_format",
270
+ default=DEFAULT_LOG_DATE_FORMAT,
271
+ help="Log date format used by the logging module",
272
+ )
273
+ parser.addini(
274
+ "log_cli",
275
+ default=False,
276
+ type="bool",
277
+ help='Enable log display during test run (also known as "live logging")',
278
+ )
279
+ add_option_ini(
280
+ "--log-cli-level", dest="log_cli_level", default=None, help="CLI logging level"
281
+ )
282
+ add_option_ini(
283
+ "--log-cli-format",
284
+ dest="log_cli_format",
285
+ default=None,
286
+ help="Log format used by the logging module",
287
+ )
288
+ add_option_ini(
289
+ "--log-cli-date-format",
290
+ dest="log_cli_date_format",
291
+ default=None,
292
+ help="Log date format used by the logging module",
293
+ )
294
+ add_option_ini(
295
+ "--log-file",
296
+ dest="log_file",
297
+ default=None,
298
+ help="Path to a file when logging will be written to",
299
+ )
300
+ add_option_ini(
301
+ "--log-file-mode",
302
+ dest="log_file_mode",
303
+ default="w",
304
+ choices=["w", "a"],
305
+ help="Log file open mode",
306
+ )
307
+ add_option_ini(
308
+ "--log-file-level",
309
+ dest="log_file_level",
310
+ default=None,
311
+ help="Log file logging level",
312
+ )
313
+ add_option_ini(
314
+ "--log-file-format",
315
+ dest="log_file_format",
316
+ default=None,
317
+ help="Log format used by the logging module",
318
+ )
319
+ add_option_ini(
320
+ "--log-file-date-format",
321
+ dest="log_file_date_format",
322
+ default=None,
323
+ help="Log date format used by the logging module",
324
+ )
325
+ add_option_ini(
326
+ "--log-auto-indent",
327
+ dest="log_auto_indent",
328
+ default=None,
329
+ help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.",
330
+ )
331
+ group.addoption(
332
+ "--log-disable",
333
+ action="append",
334
+ default=[],
335
+ dest="logger_disable",
336
+ help="Disable a logger by name. Can be passed multiple times.",
337
+ )
338
+
339
+
340
+ _HandlerType = TypeVar("_HandlerType", bound=logging.Handler)
341
+
342
+
343
+ # Not using @contextmanager for performance reasons.
344
+ class catching_logs(Generic[_HandlerType]):
345
+ """Context manager that prepares the whole logging machinery properly."""
346
+
347
+ __slots__ = ("handler", "level", "orig_level")
348
+
349
+ def __init__(self, handler: _HandlerType, level: int | None = None) -> None:
350
+ self.handler = handler
351
+ self.level = level
352
+
353
+ def __enter__(self) -> _HandlerType:
354
+ root_logger = logging.getLogger()
355
+ if self.level is not None:
356
+ self.handler.setLevel(self.level)
357
+ root_logger.addHandler(self.handler)
358
+ if self.level is not None:
359
+ self.orig_level = root_logger.level
360
+ root_logger.setLevel(min(self.orig_level, self.level))
361
+ return self.handler
362
+
363
+ def __exit__(
364
+ self,
365
+ exc_type: type[BaseException] | None,
366
+ exc_val: BaseException | None,
367
+ exc_tb: TracebackType | None,
368
+ ) -> None:
369
+ root_logger = logging.getLogger()
370
+ if self.level is not None:
371
+ root_logger.setLevel(self.orig_level)
372
+ root_logger.removeHandler(self.handler)
373
+
374
+
375
+ class LogCaptureHandler(logging_StreamHandler):
376
+ """A logging handler that stores log records and the log text."""
377
+
378
+ def __init__(self) -> None:
379
+ """Create a new log handler."""
380
+ super().__init__(StringIO())
381
+ self.records: list[logging.LogRecord] = []
382
+
383
+ def emit(self, record: logging.LogRecord) -> None:
384
+ """Keep the log records in a list in addition to the log text."""
385
+ self.records.append(record)
386
+ super().emit(record)
387
+
388
+ def reset(self) -> None:
389
+ self.records = []
390
+ self.stream = StringIO()
391
+
392
+ def clear(self) -> None:
393
+ self.records.clear()
394
+ self.stream = StringIO()
395
+
396
+ def handleError(self, record: logging.LogRecord) -> None:
397
+ if logging.raiseExceptions:
398
+ # Fail the test if the log message is bad (emit failed).
399
+ # The default behavior of logging is to print "Logging error"
400
+ # to stderr with the call stack and some extra details.
401
+ # pytest wants to make such mistakes visible during testing.
402
+ raise # noqa: PLE0704
403
+
404
+
405
+ @final
406
+ class LogCaptureFixture:
407
+ """Provides access and control of log capturing."""
408
+
409
+ def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None:
410
+ check_ispytest(_ispytest)
411
+ self._item = item
412
+ self._initial_handler_level: int | None = None
413
+ # Dict of log name -> log level.
414
+ self._initial_logger_levels: dict[str | None, int] = {}
415
+ self._initial_disabled_logging_level: int | None = None
416
+
417
+ def _finalize(self) -> None:
418
+ """Finalize the fixture.
419
+
420
+ This restores the log levels and the disabled logging levels changed by :meth:`set_level`.
421
+ """
422
+ # Restore log levels.
423
+ if self._initial_handler_level is not None:
424
+ self.handler.setLevel(self._initial_handler_level)
425
+ for logger_name, level in self._initial_logger_levels.items():
426
+ logger = logging.getLogger(logger_name)
427
+ logger.setLevel(level)
428
+ # Disable logging at the original disabled logging level.
429
+ if self._initial_disabled_logging_level is not None:
430
+ logging.disable(self._initial_disabled_logging_level)
431
+ self._initial_disabled_logging_level = None
432
+
433
+ @property
434
+ def handler(self) -> LogCaptureHandler:
435
+ """Get the logging handler used by the fixture."""
436
+ return self._item.stash[caplog_handler_key]
437
+
438
+ def get_records(
439
+ self, when: Literal["setup", "call", "teardown"]
440
+ ) -> list[logging.LogRecord]:
441
+ """Get the logging records for one of the possible test phases.
442
+
443
+ :param when:
444
+ Which test phase to obtain the records from.
445
+ Valid values are: "setup", "call" and "teardown".
446
+
447
+ :returns: The list of captured records at the given stage.
448
+
449
+ .. versionadded:: 3.4
450
+ """
451
+ return self._item.stash[caplog_records_key].get(when, [])
452
+
453
+ @property
454
+ def text(self) -> str:
455
+ """The formatted log text."""
456
+ return _remove_ansi_escape_sequences(self.handler.stream.getvalue())
457
+
458
+ @property
459
+ def records(self) -> list[logging.LogRecord]:
460
+ """The list of log records."""
461
+ return self.handler.records
462
+
463
+ @property
464
+ def record_tuples(self) -> list[tuple[str, int, str]]:
465
+ """A list of a stripped down version of log records intended
466
+ for use in assertion comparison.
467
+
468
+ The format of the tuple is:
469
+
470
+ (logger_name, log_level, message)
471
+ """
472
+ return [(r.name, r.levelno, r.getMessage()) for r in self.records]
473
+
474
+ @property
475
+ def messages(self) -> list[str]:
476
+ """A list of format-interpolated log messages.
477
+
478
+ Unlike 'records', which contains the format string and parameters for
479
+ interpolation, log messages in this list are all interpolated.
480
+
481
+ Unlike 'text', which contains the output from the handler, log
482
+ messages in this list are unadorned with levels, timestamps, etc,
483
+ making exact comparisons more reliable.
484
+
485
+ Note that traceback or stack info (from :func:`logging.exception` or
486
+ the `exc_info` or `stack_info` arguments to the logging functions) is
487
+ not included, as this is added by the formatter in the handler.
488
+
489
+ .. versionadded:: 3.7
490
+ """
491
+ return [r.getMessage() for r in self.records]
492
+
493
+ def clear(self) -> None:
494
+ """Reset the list of log records and the captured log text."""
495
+ self.handler.clear()
496
+
497
+ def _force_enable_logging(
498
+ self, level: int | str, logger_obj: logging.Logger
499
+ ) -> int:
500
+ """Enable the desired logging level if the global level was disabled via ``logging.disabled``.
501
+
502
+ Only enables logging levels greater than or equal to the requested ``level``.
503
+
504
+ Does nothing if the desired ``level`` wasn't disabled.
505
+
506
+ :param level:
507
+ The logger level caplog should capture.
508
+ All logging is enabled if a non-standard logging level string is supplied.
509
+ Valid level strings are in :data:`logging._nameToLevel`.
510
+ :param logger_obj: The logger object to check.
511
+
512
+ :return: The original disabled logging level.
513
+ """
514
+ original_disable_level: int = logger_obj.manager.disable
515
+
516
+ if isinstance(level, str):
517
+ # Try to translate the level string to an int for `logging.disable()`
518
+ level = logging.getLevelName(level)
519
+
520
+ if not isinstance(level, int):
521
+ # The level provided was not valid, so just un-disable all logging.
522
+ logging.disable(logging.NOTSET)
523
+ elif not logger_obj.isEnabledFor(level):
524
+ # Each level is `10` away from other levels.
525
+ # https://docs.python.org/3/library/logging.html#logging-levels
526
+ disable_level = max(level - 10, logging.NOTSET)
527
+ logging.disable(disable_level)
528
+
529
+ return original_disable_level
530
+
531
+ def set_level(self, level: int | str, logger: str | None = None) -> None:
532
+ """Set the threshold level of a logger for the duration of a test.
533
+
534
+ Logging messages which are less severe than this level will not be captured.
535
+
536
+ .. versionchanged:: 3.4
537
+ The levels of the loggers changed by this function will be
538
+ restored to their initial values at the end of the test.
539
+
540
+ Will enable the requested logging level if it was disabled via :func:`logging.disable`.
541
+
542
+ :param level: The level.
543
+ :param logger: The logger to update. If not given, the root logger.
544
+ """
545
+ logger_obj = logging.getLogger(logger)
546
+ # Save the original log-level to restore it during teardown.
547
+ self._initial_logger_levels.setdefault(logger, logger_obj.level)
548
+ logger_obj.setLevel(level)
549
+ if self._initial_handler_level is None:
550
+ self._initial_handler_level = self.handler.level
551
+ self.handler.setLevel(level)
552
+ initial_disabled_logging_level = self._force_enable_logging(level, logger_obj)
553
+ if self._initial_disabled_logging_level is None:
554
+ self._initial_disabled_logging_level = initial_disabled_logging_level
555
+
556
+ @contextmanager
557
+ def at_level(self, level: int | str, logger: str | None = None) -> Generator[None]:
558
+ """Context manager that sets the level for capturing of logs. After
559
+ the end of the 'with' statement the level is restored to its original
560
+ value.
561
+
562
+ Will enable the requested logging level if it was disabled via :func:`logging.disable`.
563
+
564
+ :param level: The level.
565
+ :param logger: The logger to update. If not given, the root logger.
566
+ """
567
+ logger_obj = logging.getLogger(logger)
568
+ orig_level = logger_obj.level
569
+ logger_obj.setLevel(level)
570
+ handler_orig_level = self.handler.level
571
+ self.handler.setLevel(level)
572
+ original_disable_level = self._force_enable_logging(level, logger_obj)
573
+ try:
574
+ yield
575
+ finally:
576
+ logger_obj.setLevel(orig_level)
577
+ self.handler.setLevel(handler_orig_level)
578
+ logging.disable(original_disable_level)
579
+
580
+ @contextmanager
581
+ def filtering(self, filter_: logging.Filter) -> Generator[None]:
582
+ """Context manager that temporarily adds the given filter to the caplog's
583
+ :meth:`handler` for the 'with' statement block, and removes that filter at the
584
+ end of the block.
585
+
586
+ :param filter_: A custom :class:`logging.Filter` object.
587
+
588
+ .. versionadded:: 7.5
589
+ """
590
+ self.handler.addFilter(filter_)
591
+ try:
592
+ yield
593
+ finally:
594
+ self.handler.removeFilter(filter_)
595
+
596
+
597
+ @fixture
598
+ def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture]:
599
+ """Access and control log capturing.
600
+
601
+ Captured logs are available through the following properties/methods::
602
+
603
+ * caplog.messages -> list of format-interpolated log messages
604
+ * caplog.text -> string containing formatted log output
605
+ * caplog.records -> list of logging.LogRecord instances
606
+ * caplog.record_tuples -> list of (logger_name, level, message) tuples
607
+ * caplog.clear() -> clear captured records and formatted log output string
608
+ """
609
+ result = LogCaptureFixture(request.node, _ispytest=True)
610
+ yield result
611
+ result._finalize()
612
+
613
+
614
+ def get_log_level_for_setting(config: Config, *setting_names: str) -> int | None:
615
+ for setting_name in setting_names:
616
+ log_level = config.getoption(setting_name)
617
+ if log_level is None:
618
+ log_level = config.getini(setting_name)
619
+ if log_level:
620
+ break
621
+ else:
622
+ return None
623
+
624
+ if isinstance(log_level, str):
625
+ log_level = log_level.upper()
626
+ try:
627
+ return int(getattr(logging, log_level, log_level))
628
+ except ValueError as e:
629
+ # Python logging does not recognise this as a logging level
630
+ raise UsageError(
631
+ f"'{log_level}' is not recognized as a logging level name for "
632
+ f"'{setting_name}'. Please consider passing the "
633
+ "logging level num instead."
634
+ ) from e
635
+
636
+
637
+ # run after terminalreporter/capturemanager are configured
638
+ @hookimpl(trylast=True)
639
+ def pytest_configure(config: Config) -> None:
640
+ config.pluginmanager.register(LoggingPlugin(config), "logging-plugin")
641
+
642
+
643
+ class LoggingPlugin:
644
+ """Attaches to the logging module and captures log messages for each test."""
645
+
646
+ def __init__(self, config: Config) -> None:
647
+ """Create a new plugin to capture log messages.
648
+
649
+ The formatter can be safely shared across all handlers so
650
+ create a single one for the entire test session here.
651
+ """
652
+ self._config = config
653
+
654
+ # Report logging.
655
+ self.formatter = self._create_formatter(
656
+ get_option_ini(config, "log_format"),
657
+ get_option_ini(config, "log_date_format"),
658
+ get_option_ini(config, "log_auto_indent"),
659
+ )
660
+ self.log_level = get_log_level_for_setting(config, "log_level")
661
+ self.caplog_handler = LogCaptureHandler()
662
+ self.caplog_handler.setFormatter(self.formatter)
663
+ self.report_handler = LogCaptureHandler()
664
+ self.report_handler.setFormatter(self.formatter)
665
+
666
+ # File logging.
667
+ self.log_file_level = get_log_level_for_setting(
668
+ config, "log_file_level", "log_level"
669
+ )
670
+ log_file = get_option_ini(config, "log_file") or os.devnull
671
+ if log_file != os.devnull:
672
+ directory = os.path.dirname(os.path.abspath(log_file))
673
+ if not os.path.isdir(directory):
674
+ os.makedirs(directory)
675
+
676
+ self.log_file_mode = get_option_ini(config, "log_file_mode") or "w"
677
+ self.log_file_handler = _FileHandler(
678
+ log_file, mode=self.log_file_mode, encoding="UTF-8"
679
+ )
680
+ log_file_format = get_option_ini(config, "log_file_format", "log_format")
681
+ log_file_date_format = get_option_ini(
682
+ config, "log_file_date_format", "log_date_format"
683
+ )
684
+
685
+ log_file_formatter = DatetimeFormatter(
686
+ log_file_format, datefmt=log_file_date_format
687
+ )
688
+ self.log_file_handler.setFormatter(log_file_formatter)
689
+
690
+ # CLI/live logging.
691
+ self.log_cli_level = get_log_level_for_setting(
692
+ config, "log_cli_level", "log_level"
693
+ )
694
+ if self._log_cli_enabled():
695
+ terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
696
+ # Guaranteed by `_log_cli_enabled()`.
697
+ assert terminal_reporter is not None
698
+ capture_manager = config.pluginmanager.get_plugin("capturemanager")
699
+ # if capturemanager plugin is disabled, live logging still works.
700
+ self.log_cli_handler: (
701
+ _LiveLoggingStreamHandler | _LiveLoggingNullHandler
702
+ ) = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)
703
+ else:
704
+ self.log_cli_handler = _LiveLoggingNullHandler()
705
+ log_cli_formatter = self._create_formatter(
706
+ get_option_ini(config, "log_cli_format", "log_format"),
707
+ get_option_ini(config, "log_cli_date_format", "log_date_format"),
708
+ get_option_ini(config, "log_auto_indent"),
709
+ )
710
+ self.log_cli_handler.setFormatter(log_cli_formatter)
711
+ self._disable_loggers(loggers_to_disable=config.option.logger_disable)
712
+
713
+ def _disable_loggers(self, loggers_to_disable: list[str]) -> None:
714
+ if not loggers_to_disable:
715
+ return
716
+
717
+ for name in loggers_to_disable:
718
+ logger = logging.getLogger(name)
719
+ logger.disabled = True
720
+
721
+ def _create_formatter(self, log_format, log_date_format, auto_indent):
722
+ # Color option doesn't exist if terminal plugin is disabled.
723
+ color = getattr(self._config.option, "color", "no")
724
+ if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(
725
+ log_format
726
+ ):
727
+ formatter: logging.Formatter = ColoredLevelFormatter(
728
+ create_terminal_writer(self._config), log_format, log_date_format
729
+ )
730
+ else:
731
+ formatter = DatetimeFormatter(log_format, log_date_format)
732
+
733
+ formatter._style = PercentStyleMultiline(
734
+ formatter._style._fmt, auto_indent=auto_indent
735
+ )
736
+
737
+ return formatter
738
+
739
+ def set_log_path(self, fname: str) -> None:
740
+ """Set the filename parameter for Logging.FileHandler().
741
+
742
+ Creates parent directory if it does not exist.
743
+
744
+ .. warning::
745
+ This is an experimental API.
746
+ """
747
+ fpath = Path(fname)
748
+
749
+ if not fpath.is_absolute():
750
+ fpath = self._config.rootpath / fpath
751
+
752
+ if not fpath.parent.exists():
753
+ fpath.parent.mkdir(exist_ok=True, parents=True)
754
+
755
+ # https://github.com/python/mypy/issues/11193
756
+ stream: io.TextIOWrapper = fpath.open(mode=self.log_file_mode, encoding="UTF-8") # type: ignore[assignment]
757
+ old_stream = self.log_file_handler.setStream(stream)
758
+ if old_stream:
759
+ old_stream.close()
760
+
761
+ def _log_cli_enabled(self) -> bool:
762
+ """Return whether live logging is enabled."""
763
+ enabled = self._config.getoption(
764
+ "--log-cli-level"
765
+ ) is not None or self._config.getini("log_cli")
766
+ if not enabled:
767
+ return False
768
+
769
+ terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter")
770
+ if terminal_reporter is None:
771
+ # terminal reporter is disabled e.g. by pytest-xdist.
772
+ return False
773
+
774
+ return True
775
+
776
+ @hookimpl(wrapper=True, tryfirst=True)
777
+ def pytest_sessionstart(self) -> Generator[None]:
778
+ self.log_cli_handler.set_when("sessionstart")
779
+
780
+ with catching_logs(self.log_cli_handler, level=self.log_cli_level):
781
+ with catching_logs(self.log_file_handler, level=self.log_file_level):
782
+ return (yield)
783
+
784
+ @hookimpl(wrapper=True, tryfirst=True)
785
+ def pytest_collection(self) -> Generator[None]:
786
+ self.log_cli_handler.set_when("collection")
787
+
788
+ with catching_logs(self.log_cli_handler, level=self.log_cli_level):
789
+ with catching_logs(self.log_file_handler, level=self.log_file_level):
790
+ return (yield)
791
+
792
+ @hookimpl(wrapper=True)
793
+ def pytest_runtestloop(self, session: Session) -> Generator[None, object, object]:
794
+ if session.config.option.collectonly:
795
+ return (yield)
796
+
797
+ if self._log_cli_enabled() and self._config.get_verbosity() < 1:
798
+ # The verbose flag is needed to avoid messy test progress output.
799
+ self._config.option.verbose = 1
800
+
801
+ with catching_logs(self.log_cli_handler, level=self.log_cli_level):
802
+ with catching_logs(self.log_file_handler, level=self.log_file_level):
803
+ return (yield) # Run all the tests.
804
+
805
+ @hookimpl
806
+ def pytest_runtest_logstart(self) -> None:
807
+ self.log_cli_handler.reset()
808
+ self.log_cli_handler.set_when("start")
809
+
810
+ @hookimpl
811
+ def pytest_runtest_logreport(self) -> None:
812
+ self.log_cli_handler.set_when("logreport")
813
+
814
+ def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None]:
815
+ """Implement the internals of the pytest_runtest_xxx() hooks."""
816
+ with catching_logs(
817
+ self.caplog_handler,
818
+ level=self.log_level,
819
+ ) as caplog_handler, catching_logs(
820
+ self.report_handler,
821
+ level=self.log_level,
822
+ ) as report_handler:
823
+ caplog_handler.reset()
824
+ report_handler.reset()
825
+ item.stash[caplog_records_key][when] = caplog_handler.records
826
+ item.stash[caplog_handler_key] = caplog_handler
827
+
828
+ try:
829
+ yield
830
+ finally:
831
+ log = report_handler.stream.getvalue().strip()
832
+ item.add_report_section(when, "log", log)
833
+
834
+ @hookimpl(wrapper=True)
835
+ def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None]:
836
+ self.log_cli_handler.set_when("setup")
837
+
838
+ empty: dict[str, list[logging.LogRecord]] = {}
839
+ item.stash[caplog_records_key] = empty
840
+ yield from self._runtest_for(item, "setup")
841
+
842
+ @hookimpl(wrapper=True)
843
+ def pytest_runtest_call(self, item: nodes.Item) -> Generator[None]:
844
+ self.log_cli_handler.set_when("call")
845
+
846
+ yield from self._runtest_for(item, "call")
847
+
848
+ @hookimpl(wrapper=True)
849
+ def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[None]:
850
+ self.log_cli_handler.set_when("teardown")
851
+
852
+ try:
853
+ yield from self._runtest_for(item, "teardown")
854
+ finally:
855
+ del item.stash[caplog_records_key]
856
+ del item.stash[caplog_handler_key]
857
+
858
+ @hookimpl
859
+ def pytest_runtest_logfinish(self) -> None:
860
+ self.log_cli_handler.set_when("finish")
861
+
862
+ @hookimpl(wrapper=True, tryfirst=True)
863
+ def pytest_sessionfinish(self) -> Generator[None]:
864
+ self.log_cli_handler.set_when("sessionfinish")
865
+
866
+ with catching_logs(self.log_cli_handler, level=self.log_cli_level):
867
+ with catching_logs(self.log_file_handler, level=self.log_file_level):
868
+ return (yield)
869
+
870
+ @hookimpl
871
+ def pytest_unconfigure(self) -> None:
872
+ # Close the FileHandler explicitly.
873
+ # (logging.shutdown might have lost the weakref?!)
874
+ self.log_file_handler.close()
875
+
876
+
877
+ class _FileHandler(logging.FileHandler):
878
+ """A logging FileHandler with pytest tweaks."""
879
+
880
+ def handleError(self, record: logging.LogRecord) -> None:
881
+ # Handled by LogCaptureHandler.
882
+ pass
883
+
884
+
885
+ class _LiveLoggingStreamHandler(logging_StreamHandler):
886
+ """A logging StreamHandler used by the live logging feature: it will
887
+ write a newline before the first log message in each test.
888
+
889
+ During live logging we must also explicitly disable stdout/stderr
890
+ capturing otherwise it will get captured and won't appear in the
891
+ terminal.
892
+ """
893
+
894
+ # Officially stream needs to be a IO[str], but TerminalReporter
895
+ # isn't. So force it.
896
+ stream: TerminalReporter = None # type: ignore
897
+
898
+ def __init__(
899
+ self,
900
+ terminal_reporter: TerminalReporter,
901
+ capture_manager: CaptureManager | None,
902
+ ) -> None:
903
+ super().__init__(stream=terminal_reporter) # type: ignore[arg-type]
904
+ self.capture_manager = capture_manager
905
+ self.reset()
906
+ self.set_when(None)
907
+ self._test_outcome_written = False
908
+
909
+ def reset(self) -> None:
910
+ """Reset the handler; should be called before the start of each test."""
911
+ self._first_record_emitted = False
912
+
913
+ def set_when(self, when: str | None) -> None:
914
+ """Prepare for the given test phase (setup/call/teardown)."""
915
+ self._when = when
916
+ self._section_name_shown = False
917
+ if when == "start":
918
+ self._test_outcome_written = False
919
+
920
+ def emit(self, record: logging.LogRecord) -> None:
921
+ ctx_manager = (
922
+ self.capture_manager.global_and_fixture_disabled()
923
+ if self.capture_manager
924
+ else nullcontext()
925
+ )
926
+ with ctx_manager:
927
+ if not self._first_record_emitted:
928
+ self.stream.write("\n")
929
+ self._first_record_emitted = True
930
+ elif self._when in ("teardown", "finish"):
931
+ if not self._test_outcome_written:
932
+ self._test_outcome_written = True
933
+ self.stream.write("\n")
934
+ if not self._section_name_shown and self._when:
935
+ self.stream.section("live log " + self._when, sep="-", bold=True)
936
+ self._section_name_shown = True
937
+ super().emit(record)
938
+
939
+ def handleError(self, record: logging.LogRecord) -> None:
940
+ # Handled by LogCaptureHandler.
941
+ pass
942
+
943
+
944
+ class _LiveLoggingNullHandler(logging.NullHandler):
945
+ """A logging handler used when live logging is disabled."""
946
+
947
+ def reset(self) -> None:
948
+ pass
949
+
950
+ def set_when(self, when: str) -> None:
951
+ pass
952
+
953
+ def handleError(self, record: logging.LogRecord) -> None:
954
+ # Handled by LogCaptureHandler.
955
+ pass
.venv/lib/python3.11/site-packages/_pytest/nodes.py ADDED
@@ -0,0 +1,766 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from __future__ import annotations
3
+
4
+ import abc
5
+ from functools import cached_property
6
+ from inspect import signature
7
+ import os
8
+ import pathlib
9
+ from pathlib import Path
10
+ from typing import Any
11
+ from typing import Callable
12
+ from typing import cast
13
+ from typing import Iterable
14
+ from typing import Iterator
15
+ from typing import MutableMapping
16
+ from typing import NoReturn
17
+ from typing import overload
18
+ from typing import TYPE_CHECKING
19
+ from typing import TypeVar
20
+ import warnings
21
+
22
+ import pluggy
23
+
24
+ import _pytest._code
25
+ from _pytest._code import getfslineno
26
+ from _pytest._code.code import ExceptionInfo
27
+ from _pytest._code.code import TerminalRepr
28
+ from _pytest._code.code import Traceback
29
+ from _pytest._code.code import TracebackStyle
30
+ from _pytest.compat import LEGACY_PATH
31
+ from _pytest.config import Config
32
+ from _pytest.config import ConftestImportFailure
33
+ from _pytest.config.compat import _check_path
34
+ from _pytest.deprecated import NODE_CTOR_FSPATH_ARG
35
+ from _pytest.mark.structures import Mark
36
+ from _pytest.mark.structures import MarkDecorator
37
+ from _pytest.mark.structures import NodeKeywords
38
+ from _pytest.outcomes import fail
39
+ from _pytest.pathlib import absolutepath
40
+ from _pytest.pathlib import commonpath
41
+ from _pytest.stash import Stash
42
+ from _pytest.warning_types import PytestWarning
43
+
44
+
45
+ if TYPE_CHECKING:
46
+ from typing_extensions import Self
47
+
48
+ # Imported here due to circular import.
49
+ from _pytest.main import Session
50
+
51
+
52
+ SEP = "/"
53
+
54
+ tracebackcutdir = Path(_pytest.__file__).parent
55
+
56
+
57
+ _T = TypeVar("_T")
58
+
59
+
60
+ def _imply_path(
61
+ node_type: type[Node],
62
+ path: Path | None,
63
+ fspath: LEGACY_PATH | None,
64
+ ) -> Path:
65
+ if fspath is not None:
66
+ warnings.warn(
67
+ NODE_CTOR_FSPATH_ARG.format(
68
+ node_type_name=node_type.__name__,
69
+ ),
70
+ stacklevel=6,
71
+ )
72
+ if path is not None:
73
+ if fspath is not None:
74
+ _check_path(path, fspath)
75
+ return path
76
+ else:
77
+ assert fspath is not None
78
+ return Path(fspath)
79
+
80
+
81
+ _NodeType = TypeVar("_NodeType", bound="Node")
82
+
83
+
84
+ class NodeMeta(abc.ABCMeta):
85
+ """Metaclass used by :class:`Node` to enforce that direct construction raises
86
+ :class:`Failed`.
87
+
88
+ This behaviour supports the indirection introduced with :meth:`Node.from_parent`,
89
+ the named constructor to be used instead of direct construction. The design
90
+ decision to enforce indirection with :class:`NodeMeta` was made as a
91
+ temporary aid for refactoring the collection tree, which was diagnosed to
92
+ have :class:`Node` objects whose creational patterns were overly entangled.
93
+ Once the refactoring is complete, this metaclass can be removed.
94
+
95
+ See https://github.com/pytest-dev/pytest/projects/3 for an overview of the
96
+ progress on detangling the :class:`Node` classes.
97
+ """
98
+
99
+ def __call__(cls, *k, **kw) -> NoReturn:
100
+ msg = (
101
+ "Direct construction of {name} has been deprecated, please use {name}.from_parent.\n"
102
+ "See "
103
+ "https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent"
104
+ " for more details."
105
+ ).format(name=f"{cls.__module__}.{cls.__name__}")
106
+ fail(msg, pytrace=False)
107
+
108
+ def _create(cls: type[_T], *k, **kw) -> _T:
109
+ try:
110
+ return super().__call__(*k, **kw) # type: ignore[no-any-return,misc]
111
+ except TypeError:
112
+ sig = signature(getattr(cls, "__init__"))
113
+ known_kw = {k: v for k, v in kw.items() if k in sig.parameters}
114
+ from .warning_types import PytestDeprecationWarning
115
+
116
+ warnings.warn(
117
+ PytestDeprecationWarning(
118
+ f"{cls} is not using a cooperative constructor and only takes {set(known_kw)}.\n"
119
+ "See https://docs.pytest.org/en/stable/deprecations.html"
120
+ "#constructors-of-custom-pytest-node-subclasses-should-take-kwargs "
121
+ "for more details."
122
+ )
123
+ )
124
+
125
+ return super().__call__(*k, **known_kw) # type: ignore[no-any-return,misc]
126
+
127
+
128
+ class Node(abc.ABC, metaclass=NodeMeta):
129
+ r"""Base class of :class:`Collector` and :class:`Item`, the components of
130
+ the test collection tree.
131
+
132
+ ``Collector``\'s are the internal nodes of the tree, and ``Item``\'s are the
133
+ leaf nodes.
134
+ """
135
+
136
+ # Implemented in the legacypath plugin.
137
+ #: A ``LEGACY_PATH`` copy of the :attr:`path` attribute. Intended for usage
138
+ #: for methods not migrated to ``pathlib.Path`` yet, such as
139
+ #: :meth:`Item.reportinfo <pytest.Item.reportinfo>`. Will be deprecated in
140
+ #: a future release, prefer using :attr:`path` instead.
141
+ fspath: LEGACY_PATH
142
+
143
+ # Use __slots__ to make attribute access faster.
144
+ # Note that __dict__ is still available.
145
+ __slots__ = (
146
+ "name",
147
+ "parent",
148
+ "config",
149
+ "session",
150
+ "path",
151
+ "_nodeid",
152
+ "_store",
153
+ "__dict__",
154
+ )
155
+
156
+ def __init__(
157
+ self,
158
+ name: str,
159
+ parent: Node | None = None,
160
+ config: Config | None = None,
161
+ session: Session | None = None,
162
+ fspath: LEGACY_PATH | None = None,
163
+ path: Path | None = None,
164
+ nodeid: str | None = None,
165
+ ) -> None:
166
+ #: A unique name within the scope of the parent node.
167
+ self.name: str = name
168
+
169
+ #: The parent collector node.
170
+ self.parent = parent
171
+
172
+ if config:
173
+ #: The pytest config object.
174
+ self.config: Config = config
175
+ else:
176
+ if not parent:
177
+ raise TypeError("config or parent must be provided")
178
+ self.config = parent.config
179
+
180
+ if session:
181
+ #: The pytest session this node is part of.
182
+ self.session: Session = session
183
+ else:
184
+ if not parent:
185
+ raise TypeError("session or parent must be provided")
186
+ self.session = parent.session
187
+
188
+ if path is None and fspath is None:
189
+ path = getattr(parent, "path", None)
190
+ #: Filesystem path where this node was collected from (can be None).
191
+ self.path: pathlib.Path = _imply_path(type(self), path, fspath=fspath)
192
+
193
+ # The explicit annotation is to avoid publicly exposing NodeKeywords.
194
+ #: Keywords/markers collected from all scopes.
195
+ self.keywords: MutableMapping[str, Any] = NodeKeywords(self)
196
+
197
+ #: The marker objects belonging to this node.
198
+ self.own_markers: list[Mark] = []
199
+
200
+ #: Allow adding of extra keywords to use for matching.
201
+ self.extra_keyword_matches: set[str] = set()
202
+
203
+ if nodeid is not None:
204
+ assert "::()" not in nodeid
205
+ self._nodeid = nodeid
206
+ else:
207
+ if not self.parent:
208
+ raise TypeError("nodeid or parent must be provided")
209
+ self._nodeid = self.parent.nodeid + "::" + self.name
210
+
211
+ #: A place where plugins can store information on the node for their
212
+ #: own use.
213
+ self.stash: Stash = Stash()
214
+ # Deprecated alias. Was never public. Can be removed in a few releases.
215
+ self._store = self.stash
216
+
217
+ @classmethod
218
+ def from_parent(cls, parent: Node, **kw) -> Self:
219
+ """Public constructor for Nodes.
220
+
221
+ This indirection got introduced in order to enable removing
222
+ the fragile logic from the node constructors.
223
+
224
+ Subclasses can use ``super().from_parent(...)`` when overriding the
225
+ construction.
226
+
227
+ :param parent: The parent node of this Node.
228
+ """
229
+ if "config" in kw:
230
+ raise TypeError("config is not a valid argument for from_parent")
231
+ if "session" in kw:
232
+ raise TypeError("session is not a valid argument for from_parent")
233
+ return cls._create(parent=parent, **kw)
234
+
235
+ @property
236
+ def ihook(self) -> pluggy.HookRelay:
237
+ """fspath-sensitive hook proxy used to call pytest hooks."""
238
+ return self.session.gethookproxy(self.path)
239
+
240
+ def __repr__(self) -> str:
241
+ return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None))
242
+
243
+ def warn(self, warning: Warning) -> None:
244
+ """Issue a warning for this Node.
245
+
246
+ Warnings will be displayed after the test session, unless explicitly suppressed.
247
+
248
+ :param Warning warning:
249
+ The warning instance to issue.
250
+
251
+ :raises ValueError: If ``warning`` instance is not a subclass of Warning.
252
+
253
+ Example usage:
254
+
255
+ .. code-block:: python
256
+
257
+ node.warn(PytestWarning("some message"))
258
+ node.warn(UserWarning("some message"))
259
+
260
+ .. versionchanged:: 6.2
261
+ Any subclass of :class:`Warning` is now accepted, rather than only
262
+ :class:`PytestWarning <pytest.PytestWarning>` subclasses.
263
+ """
264
+ # enforce type checks here to avoid getting a generic type error later otherwise.
265
+ if not isinstance(warning, Warning):
266
+ raise ValueError(
267
+ f"warning must be an instance of Warning or subclass, got {warning!r}"
268
+ )
269
+ path, lineno = get_fslocation_from_item(self)
270
+ assert lineno is not None
271
+ warnings.warn_explicit(
272
+ warning,
273
+ category=None,
274
+ filename=str(path),
275
+ lineno=lineno + 1,
276
+ )
277
+
278
+ # Methods for ordering nodes.
279
+
280
+ @property
281
+ def nodeid(self) -> str:
282
+ """A ::-separated string denoting its collection tree address."""
283
+ return self._nodeid
284
+
285
+ def __hash__(self) -> int:
286
+ return hash(self._nodeid)
287
+
288
+ def setup(self) -> None:
289
+ pass
290
+
291
+ def teardown(self) -> None:
292
+ pass
293
+
294
+ def iter_parents(self) -> Iterator[Node]:
295
+ """Iterate over all parent collectors starting from and including self
296
+ up to the root of the collection tree.
297
+
298
+ .. versionadded:: 8.1
299
+ """
300
+ parent: Node | None = self
301
+ while parent is not None:
302
+ yield parent
303
+ parent = parent.parent
304
+
305
+ def listchain(self) -> list[Node]:
306
+ """Return a list of all parent collectors starting from the root of the
307
+ collection tree down to and including self."""
308
+ chain = []
309
+ item: Node | None = self
310
+ while item is not None:
311
+ chain.append(item)
312
+ item = item.parent
313
+ chain.reverse()
314
+ return chain
315
+
316
+ def add_marker(self, marker: str | MarkDecorator, append: bool = True) -> None:
317
+ """Dynamically add a marker object to the node.
318
+
319
+ :param marker:
320
+ The marker.
321
+ :param append:
322
+ Whether to append the marker, or prepend it.
323
+ """
324
+ from _pytest.mark import MARK_GEN
325
+
326
+ if isinstance(marker, MarkDecorator):
327
+ marker_ = marker
328
+ elif isinstance(marker, str):
329
+ marker_ = getattr(MARK_GEN, marker)
330
+ else:
331
+ raise ValueError("is not a string or pytest.mark.* Marker")
332
+ self.keywords[marker_.name] = marker_
333
+ if append:
334
+ self.own_markers.append(marker_.mark)
335
+ else:
336
+ self.own_markers.insert(0, marker_.mark)
337
+
338
+ def iter_markers(self, name: str | None = None) -> Iterator[Mark]:
339
+ """Iterate over all markers of the node.
340
+
341
+ :param name: If given, filter the results by the name attribute.
342
+ :returns: An iterator of the markers of the node.
343
+ """
344
+ return (x[1] for x in self.iter_markers_with_node(name=name))
345
+
346
+ def iter_markers_with_node(
347
+ self, name: str | None = None
348
+ ) -> Iterator[tuple[Node, Mark]]:
349
+ """Iterate over all markers of the node.
350
+
351
+ :param name: If given, filter the results by the name attribute.
352
+ :returns: An iterator of (node, mark) tuples.
353
+ """
354
+ for node in self.iter_parents():
355
+ for mark in node.own_markers:
356
+ if name is None or getattr(mark, "name", None) == name:
357
+ yield node, mark
358
+
359
+ @overload
360
+ def get_closest_marker(self, name: str) -> Mark | None: ...
361
+
362
+ @overload
363
+ def get_closest_marker(self, name: str, default: Mark) -> Mark: ...
364
+
365
+ def get_closest_marker(self, name: str, default: Mark | None = None) -> Mark | None:
366
+ """Return the first marker matching the name, from closest (for
367
+ example function) to farther level (for example module level).
368
+
369
+ :param default: Fallback return value if no marker was found.
370
+ :param name: Name to filter by.
371
+ """
372
+ return next(self.iter_markers(name=name), default)
373
+
374
+ def listextrakeywords(self) -> set[str]:
375
+ """Return a set of all extra keywords in self and any parents."""
376
+ extra_keywords: set[str] = set()
377
+ for item in self.listchain():
378
+ extra_keywords.update(item.extra_keyword_matches)
379
+ return extra_keywords
380
+
381
+ def listnames(self) -> list[str]:
382
+ return [x.name for x in self.listchain()]
383
+
384
+ def addfinalizer(self, fin: Callable[[], object]) -> None:
385
+ """Register a function to be called without arguments when this node is
386
+ finalized.
387
+
388
+ This method can only be called when this node is active
389
+ in a setup chain, for example during self.setup().
390
+ """
391
+ self.session._setupstate.addfinalizer(fin, self)
392
+
393
+ def getparent(self, cls: type[_NodeType]) -> _NodeType | None:
394
+ """Get the closest parent node (including self) which is an instance of
395
+ the given class.
396
+
397
+ :param cls: The node class to search for.
398
+ :returns: The node, if found.
399
+ """
400
+ for node in self.iter_parents():
401
+ if isinstance(node, cls):
402
+ return node
403
+ return None
404
+
405
+ def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback:
406
+ return excinfo.traceback
407
+
408
+ def _repr_failure_py(
409
+ self,
410
+ excinfo: ExceptionInfo[BaseException],
411
+ style: TracebackStyle | None = None,
412
+ ) -> TerminalRepr:
413
+ from _pytest.fixtures import FixtureLookupError
414
+
415
+ if isinstance(excinfo.value, ConftestImportFailure):
416
+ excinfo = ExceptionInfo.from_exception(excinfo.value.cause)
417
+ if isinstance(excinfo.value, fail.Exception):
418
+ if not excinfo.value.pytrace:
419
+ style = "value"
420
+ if isinstance(excinfo.value, FixtureLookupError):
421
+ return excinfo.value.formatrepr()
422
+
423
+ tbfilter: bool | Callable[[ExceptionInfo[BaseException]], Traceback]
424
+ if self.config.getoption("fulltrace", False):
425
+ style = "long"
426
+ tbfilter = False
427
+ else:
428
+ tbfilter = self._traceback_filter
429
+ if style == "auto":
430
+ style = "long"
431
+ # XXX should excinfo.getrepr record all data and toterminal() process it?
432
+ if style is None:
433
+ if self.config.getoption("tbstyle", "auto") == "short":
434
+ style = "short"
435
+ else:
436
+ style = "long"
437
+
438
+ if self.config.get_verbosity() > 1:
439
+ truncate_locals = False
440
+ else:
441
+ truncate_locals = True
442
+
443
+ truncate_args = False if self.config.get_verbosity() > 2 else True
444
+
445
+ # excinfo.getrepr() formats paths relative to the CWD if `abspath` is False.
446
+ # It is possible for a fixture/test to change the CWD while this code runs, which
447
+ # would then result in the user seeing confusing paths in the failure message.
448
+ # To fix this, if the CWD changed, always display the full absolute path.
449
+ # It will be better to just always display paths relative to invocation_dir, but
450
+ # this requires a lot of plumbing (#6428).
451
+ try:
452
+ abspath = Path(os.getcwd()) != self.config.invocation_params.dir
453
+ except OSError:
454
+ abspath = True
455
+
456
+ return excinfo.getrepr(
457
+ funcargs=True,
458
+ abspath=abspath,
459
+ showlocals=self.config.getoption("showlocals", False),
460
+ style=style,
461
+ tbfilter=tbfilter,
462
+ truncate_locals=truncate_locals,
463
+ truncate_args=truncate_args,
464
+ )
465
+
466
+ def repr_failure(
467
+ self,
468
+ excinfo: ExceptionInfo[BaseException],
469
+ style: TracebackStyle | None = None,
470
+ ) -> str | TerminalRepr:
471
+ """Return a representation of a collection or test failure.
472
+
473
+ .. seealso:: :ref:`non-python tests`
474
+
475
+ :param excinfo: Exception information for the failure.
476
+ """
477
+ return self._repr_failure_py(excinfo, style)
478
+
479
+
480
+ def get_fslocation_from_item(node: Node) -> tuple[str | Path, int | None]:
481
+ """Try to extract the actual location from a node, depending on available attributes:
482
+
483
+ * "location": a pair (path, lineno)
484
+ * "obj": a Python object that the node wraps.
485
+ * "path": just a path
486
+
487
+ :rtype: A tuple of (str|Path, int) with filename and 0-based line number.
488
+ """
489
+ # See Item.location.
490
+ location: tuple[str, int | None, str] | None = getattr(node, "location", None)
491
+ if location is not None:
492
+ return location[:2]
493
+ obj = getattr(node, "obj", None)
494
+ if obj is not None:
495
+ return getfslineno(obj)
496
+ return getattr(node, "path", "unknown location"), -1
497
+
498
+
499
+ class Collector(Node, abc.ABC):
500
+ """Base class of all collectors.
501
+
502
+ Collector create children through `collect()` and thus iteratively build
503
+ the collection tree.
504
+ """
505
+
506
+ class CollectError(Exception):
507
+ """An error during collection, contains a custom message."""
508
+
509
+ @abc.abstractmethod
510
+ def collect(self) -> Iterable[Item | Collector]:
511
+ """Collect children (items and collectors) for this collector."""
512
+ raise NotImplementedError("abstract")
513
+
514
+ # TODO: This omits the style= parameter which breaks Liskov Substitution.
515
+ def repr_failure( # type: ignore[override]
516
+ self, excinfo: ExceptionInfo[BaseException]
517
+ ) -> str | TerminalRepr:
518
+ """Return a representation of a collection failure.
519
+
520
+ :param excinfo: Exception information for the failure.
521
+ """
522
+ if isinstance(excinfo.value, self.CollectError) and not self.config.getoption(
523
+ "fulltrace", False
524
+ ):
525
+ exc = excinfo.value
526
+ return str(exc.args[0])
527
+
528
+ # Respect explicit tbstyle option, but default to "short"
529
+ # (_repr_failure_py uses "long" with "fulltrace" option always).
530
+ tbstyle = self.config.getoption("tbstyle", "auto")
531
+ if tbstyle == "auto":
532
+ tbstyle = "short"
533
+
534
+ return self._repr_failure_py(excinfo, style=tbstyle)
535
+
536
+ def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback:
537
+ if hasattr(self, "path"):
538
+ traceback = excinfo.traceback
539
+ ntraceback = traceback.cut(path=self.path)
540
+ if ntraceback == traceback:
541
+ ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
542
+ return ntraceback.filter(excinfo)
543
+ return excinfo.traceback
544
+
545
+
546
+ def _check_initialpaths_for_relpath(session: Session, path: Path) -> str | None:
547
+ for initial_path in session._initialpaths:
548
+ if commonpath(path, initial_path) == initial_path:
549
+ rel = str(path.relative_to(initial_path))
550
+ return "" if rel == "." else rel
551
+ return None
552
+
553
+
554
+ class FSCollector(Collector, abc.ABC):
555
+ """Base class for filesystem collectors."""
556
+
557
+ def __init__(
558
+ self,
559
+ fspath: LEGACY_PATH | None = None,
560
+ path_or_parent: Path | Node | None = None,
561
+ path: Path | None = None,
562
+ name: str | None = None,
563
+ parent: Node | None = None,
564
+ config: Config | None = None,
565
+ session: Session | None = None,
566
+ nodeid: str | None = None,
567
+ ) -> None:
568
+ if path_or_parent:
569
+ if isinstance(path_or_parent, Node):
570
+ assert parent is None
571
+ parent = cast(FSCollector, path_or_parent)
572
+ elif isinstance(path_or_parent, Path):
573
+ assert path is None
574
+ path = path_or_parent
575
+
576
+ path = _imply_path(type(self), path, fspath=fspath)
577
+ if name is None:
578
+ name = path.name
579
+ if parent is not None and parent.path != path:
580
+ try:
581
+ rel = path.relative_to(parent.path)
582
+ except ValueError:
583
+ pass
584
+ else:
585
+ name = str(rel)
586
+ name = name.replace(os.sep, SEP)
587
+ self.path = path
588
+
589
+ if session is None:
590
+ assert parent is not None
591
+ session = parent.session
592
+
593
+ if nodeid is None:
594
+ try:
595
+ nodeid = str(self.path.relative_to(session.config.rootpath))
596
+ except ValueError:
597
+ nodeid = _check_initialpaths_for_relpath(session, path)
598
+
599
+ if nodeid and os.sep != SEP:
600
+ nodeid = nodeid.replace(os.sep, SEP)
601
+
602
+ super().__init__(
603
+ name=name,
604
+ parent=parent,
605
+ config=config,
606
+ session=session,
607
+ nodeid=nodeid,
608
+ path=path,
609
+ )
610
+
611
+ @classmethod
612
+ def from_parent(
613
+ cls,
614
+ parent,
615
+ *,
616
+ fspath: LEGACY_PATH | None = None,
617
+ path: Path | None = None,
618
+ **kw,
619
+ ) -> Self:
620
+ """The public constructor."""
621
+ return super().from_parent(parent=parent, fspath=fspath, path=path, **kw)
622
+
623
+
624
+ class File(FSCollector, abc.ABC):
625
+ """Base class for collecting tests from a file.
626
+
627
+ :ref:`non-python tests`.
628
+ """
629
+
630
+
631
+ class Directory(FSCollector, abc.ABC):
632
+ """Base class for collecting files from a directory.
633
+
634
+ A basic directory collector does the following: goes over the files and
635
+ sub-directories in the directory and creates collectors for them by calling
636
+ the hooks :hook:`pytest_collect_directory` and :hook:`pytest_collect_file`,
637
+ after checking that they are not ignored using
638
+ :hook:`pytest_ignore_collect`.
639
+
640
+ The default directory collectors are :class:`~pytest.Dir` and
641
+ :class:`~pytest.Package`.
642
+
643
+ .. versionadded:: 8.0
644
+
645
+ :ref:`custom directory collectors`.
646
+ """
647
+
648
+
649
+ class Item(Node, abc.ABC):
650
+ """Base class of all test invocation items.
651
+
652
+ Note that for a single function there might be multiple test invocation items.
653
+ """
654
+
655
+ nextitem = None
656
+
657
+ def __init__(
658
+ self,
659
+ name,
660
+ parent=None,
661
+ config: Config | None = None,
662
+ session: Session | None = None,
663
+ nodeid: str | None = None,
664
+ **kw,
665
+ ) -> None:
666
+ # The first two arguments are intentionally passed positionally,
667
+ # to keep plugins who define a node type which inherits from
668
+ # (pytest.Item, pytest.File) working (see issue #8435).
669
+ # They can be made kwargs when the deprecation above is done.
670
+ super().__init__(
671
+ name,
672
+ parent,
673
+ config=config,
674
+ session=session,
675
+ nodeid=nodeid,
676
+ **kw,
677
+ )
678
+ self._report_sections: list[tuple[str, str, str]] = []
679
+
680
+ #: A list of tuples (name, value) that holds user defined properties
681
+ #: for this test.
682
+ self.user_properties: list[tuple[str, object]] = []
683
+
684
+ self._check_item_and_collector_diamond_inheritance()
685
+
686
+ def _check_item_and_collector_diamond_inheritance(self) -> None:
687
+ """
688
+ Check if the current type inherits from both File and Collector
689
+ at the same time, emitting a warning accordingly (#8447).
690
+ """
691
+ cls = type(self)
692
+
693
+ # We inject an attribute in the type to avoid issuing this warning
694
+ # for the same class more than once, which is not helpful.
695
+ # It is a hack, but was deemed acceptable in order to avoid
696
+ # flooding the user in the common case.
697
+ attr_name = "_pytest_diamond_inheritance_warning_shown"
698
+ if getattr(cls, attr_name, False):
699
+ return
700
+ setattr(cls, attr_name, True)
701
+
702
+ problems = ", ".join(
703
+ base.__name__ for base in cls.__bases__ if issubclass(base, Collector)
704
+ )
705
+ if problems:
706
+ warnings.warn(
707
+ f"{cls.__name__} is an Item subclass and should not be a collector, "
708
+ f"however its bases {problems} are collectors.\n"
709
+ "Please split the Collectors and the Item into separate node types.\n"
710
+ "Pytest Doc example: https://docs.pytest.org/en/latest/example/nonpython.html\n"
711
+ "example pull request on a plugin: https://github.com/asmeurer/pytest-flakes/pull/40/",
712
+ PytestWarning,
713
+ )
714
+
715
+ @abc.abstractmethod
716
+ def runtest(self) -> None:
717
+ """Run the test case for this item.
718
+
719
+ Must be implemented by subclasses.
720
+
721
+ .. seealso:: :ref:`non-python tests`
722
+ """
723
+ raise NotImplementedError("runtest must be implemented by Item subclass")
724
+
725
+ def add_report_section(self, when: str, key: str, content: str) -> None:
726
+ """Add a new report section, similar to what's done internally to add
727
+ stdout and stderr captured output::
728
+
729
+ item.add_report_section("call", "stdout", "report section contents")
730
+
731
+ :param str when:
732
+ One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``.
733
+ :param str key:
734
+ Name of the section, can be customized at will. Pytest uses ``"stdout"`` and
735
+ ``"stderr"`` internally.
736
+ :param str content:
737
+ The full contents as a string.
738
+ """
739
+ if content:
740
+ self._report_sections.append((when, key, content))
741
+
742
+ def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]:
743
+ """Get location information for this item for test reports.
744
+
745
+ Returns a tuple with three elements:
746
+
747
+ - The path of the test (default ``self.path``)
748
+ - The 0-based line number of the test (default ``None``)
749
+ - A name of the test to be shown (default ``""``)
750
+
751
+ .. seealso:: :ref:`non-python tests`
752
+ """
753
+ return self.path, None, ""
754
+
755
+ @cached_property
756
+ def location(self) -> tuple[str, int | None, str]:
757
+ """
758
+ Returns a tuple of ``(relfspath, lineno, testname)`` for this item
759
+ where ``relfspath`` is file path relative to ``config.rootpath``
760
+ and lineno is a 0-based line number.
761
+ """
762
+ location = self.reportinfo()
763
+ path = absolutepath(location[0])
764
+ relfspath = self.session._node_location_to_relpath(path)
765
+ assert type(location[2]) is str
766
+ return (relfspath, location[1], location[2])
.venv/lib/python3.11/site-packages/_pytest/python.py ADDED
@@ -0,0 +1,1679 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Python test discovery, setup and run of test functions."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import abc
7
+ from collections import Counter
8
+ from collections import defaultdict
9
+ import dataclasses
10
+ import enum
11
+ import fnmatch
12
+ from functools import partial
13
+ import inspect
14
+ import itertools
15
+ import os
16
+ from pathlib import Path
17
+ import types
18
+ from typing import Any
19
+ from typing import Callable
20
+ from typing import Dict
21
+ from typing import final
22
+ from typing import Generator
23
+ from typing import Iterable
24
+ from typing import Iterator
25
+ from typing import Literal
26
+ from typing import Mapping
27
+ from typing import Pattern
28
+ from typing import Sequence
29
+ from typing import TYPE_CHECKING
30
+ import warnings
31
+
32
+ import _pytest
33
+ from _pytest import fixtures
34
+ from _pytest import nodes
35
+ from _pytest._code import filter_traceback
36
+ from _pytest._code import getfslineno
37
+ from _pytest._code.code import ExceptionInfo
38
+ from _pytest._code.code import TerminalRepr
39
+ from _pytest._code.code import Traceback
40
+ from _pytest._io.saferepr import saferepr
41
+ from _pytest.compat import ascii_escaped
42
+ from _pytest.compat import get_default_arg_names
43
+ from _pytest.compat import get_real_func
44
+ from _pytest.compat import getimfunc
45
+ from _pytest.compat import is_async_function
46
+ from _pytest.compat import is_generator
47
+ from _pytest.compat import LEGACY_PATH
48
+ from _pytest.compat import NOTSET
49
+ from _pytest.compat import safe_getattr
50
+ from _pytest.compat import safe_isclass
51
+ from _pytest.config import Config
52
+ from _pytest.config import hookimpl
53
+ from _pytest.config.argparsing import Parser
54
+ from _pytest.deprecated import check_ispytest
55
+ from _pytest.fixtures import FixtureDef
56
+ from _pytest.fixtures import FixtureRequest
57
+ from _pytest.fixtures import FuncFixtureInfo
58
+ from _pytest.fixtures import get_scope_node
59
+ from _pytest.main import Session
60
+ from _pytest.mark import MARK_GEN
61
+ from _pytest.mark import ParameterSet
62
+ from _pytest.mark.structures import get_unpacked_marks
63
+ from _pytest.mark.structures import Mark
64
+ from _pytest.mark.structures import MarkDecorator
65
+ from _pytest.mark.structures import normalize_mark_list
66
+ from _pytest.outcomes import fail
67
+ from _pytest.outcomes import skip
68
+ from _pytest.pathlib import fnmatch_ex
69
+ from _pytest.pathlib import import_path
70
+ from _pytest.pathlib import ImportPathMismatchError
71
+ from _pytest.pathlib import scandir
72
+ from _pytest.scope import _ScopeName
73
+ from _pytest.scope import Scope
74
+ from _pytest.stash import StashKey
75
+ from _pytest.warning_types import PytestCollectionWarning
76
+ from _pytest.warning_types import PytestReturnNotNoneWarning
77
+ from _pytest.warning_types import PytestUnhandledCoroutineWarning
78
+
79
+
80
+ if TYPE_CHECKING:
81
+ from typing_extensions import Self
82
+
83
+
84
+ def pytest_addoption(parser: Parser) -> None:
85
+ parser.addini(
86
+ "python_files",
87
+ type="args",
88
+ # NOTE: default is also used in AssertionRewritingHook.
89
+ default=["test_*.py", "*_test.py"],
90
+ help="Glob-style file patterns for Python test module discovery",
91
+ )
92
+ parser.addini(
93
+ "python_classes",
94
+ type="args",
95
+ default=["Test"],
96
+ help="Prefixes or glob names for Python test class discovery",
97
+ )
98
+ parser.addini(
99
+ "python_functions",
100
+ type="args",
101
+ default=["test"],
102
+ help="Prefixes or glob names for Python test function and method discovery",
103
+ )
104
+ parser.addini(
105
+ "disable_test_id_escaping_and_forfeit_all_rights_to_community_support",
106
+ type="bool",
107
+ default=False,
108
+ help="Disable string escape non-ASCII characters, might cause unwanted "
109
+ "side effects(use at your own risk)",
110
+ )
111
+
112
+
113
+ def pytest_generate_tests(metafunc: Metafunc) -> None:
114
+ for marker in metafunc.definition.iter_markers(name="parametrize"):
115
+ metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker)
116
+
117
+
118
+ def pytest_configure(config: Config) -> None:
119
+ config.addinivalue_line(
120
+ "markers",
121
+ "parametrize(argnames, argvalues): call a test function multiple "
122
+ "times passing in different arguments in turn. argvalues generally "
123
+ "needs to be a list of values if argnames specifies only one name "
124
+ "or a list of tuples of values if argnames specifies multiple names. "
125
+ "Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
126
+ "decorated test function, one with arg1=1 and another with arg1=2."
127
+ "see https://docs.pytest.org/en/stable/how-to/parametrize.html for more info "
128
+ "and examples.",
129
+ )
130
+ config.addinivalue_line(
131
+ "markers",
132
+ "usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
133
+ "all of the specified fixtures. see "
134
+ "https://docs.pytest.org/en/stable/explanation/fixtures.html#usefixtures ",
135
+ )
136
+
137
+
138
+ def async_warn_and_skip(nodeid: str) -> None:
139
+ msg = "async def functions are not natively supported and have been skipped.\n"
140
+ msg += (
141
+ "You need to install a suitable plugin for your async framework, for example:\n"
142
+ )
143
+ msg += " - anyio\n"
144
+ msg += " - pytest-asyncio\n"
145
+ msg += " - pytest-tornasync\n"
146
+ msg += " - pytest-trio\n"
147
+ msg += " - pytest-twisted"
148
+ warnings.warn(PytestUnhandledCoroutineWarning(msg.format(nodeid)))
149
+ skip(reason="async def function and no async plugin installed (see warnings)")
150
+
151
+
152
+ @hookimpl(trylast=True)
153
+ def pytest_pyfunc_call(pyfuncitem: Function) -> object | None:
154
+ testfunction = pyfuncitem.obj
155
+ if is_async_function(testfunction):
156
+ async_warn_and_skip(pyfuncitem.nodeid)
157
+ funcargs = pyfuncitem.funcargs
158
+ testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
159
+ result = testfunction(**testargs)
160
+ if hasattr(result, "__await__") or hasattr(result, "__aiter__"):
161
+ async_warn_and_skip(pyfuncitem.nodeid)
162
+ elif result is not None:
163
+ warnings.warn(
164
+ PytestReturnNotNoneWarning(
165
+ f"Expected None, but {pyfuncitem.nodeid} returned {result!r}, which will be an error in a "
166
+ "future version of pytest. Did you mean to use `assert` instead of `return`?"
167
+ )
168
+ )
169
+ return True
170
+
171
+
172
+ def pytest_collect_directory(
173
+ path: Path, parent: nodes.Collector
174
+ ) -> nodes.Collector | None:
175
+ pkginit = path / "__init__.py"
176
+ try:
177
+ has_pkginit = pkginit.is_file()
178
+ except PermissionError:
179
+ # See https://github.com/pytest-dev/pytest/issues/12120#issuecomment-2106349096.
180
+ return None
181
+ if has_pkginit:
182
+ return Package.from_parent(parent, path=path)
183
+ return None
184
+
185
+
186
+ def pytest_collect_file(file_path: Path, parent: nodes.Collector) -> Module | None:
187
+ if file_path.suffix == ".py":
188
+ if not parent.session.isinitpath(file_path):
189
+ if not path_matches_patterns(
190
+ file_path, parent.config.getini("python_files")
191
+ ):
192
+ return None
193
+ ihook = parent.session.gethookproxy(file_path)
194
+ module: Module = ihook.pytest_pycollect_makemodule(
195
+ module_path=file_path, parent=parent
196
+ )
197
+ return module
198
+ return None
199
+
200
+
201
+ def path_matches_patterns(path: Path, patterns: Iterable[str]) -> bool:
202
+ """Return whether path matches any of the patterns in the list of globs given."""
203
+ return any(fnmatch_ex(pattern, path) for pattern in patterns)
204
+
205
+
206
+ def pytest_pycollect_makemodule(module_path: Path, parent) -> Module:
207
+ return Module.from_parent(parent, path=module_path)
208
+
209
+
210
+ @hookimpl(trylast=True)
211
+ def pytest_pycollect_makeitem(
212
+ collector: Module | Class, name: str, obj: object
213
+ ) -> None | nodes.Item | nodes.Collector | list[nodes.Item | nodes.Collector]:
214
+ assert isinstance(collector, (Class, Module)), type(collector)
215
+ # Nothing was collected elsewhere, let's do it here.
216
+ if safe_isclass(obj):
217
+ if collector.istestclass(obj, name):
218
+ return Class.from_parent(collector, name=name, obj=obj)
219
+ elif collector.istestfunction(obj, name):
220
+ # mock seems to store unbound methods (issue473), normalize it.
221
+ obj = getattr(obj, "__func__", obj)
222
+ # We need to try and unwrap the function if it's a functools.partial
223
+ # or a functools.wrapped.
224
+ # We mustn't if it's been wrapped with mock.patch (python 2 only).
225
+ if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))):
226
+ filename, lineno = getfslineno(obj)
227
+ warnings.warn_explicit(
228
+ message=PytestCollectionWarning(
229
+ f"cannot collect {name!r} because it is not a function."
230
+ ),
231
+ category=None,
232
+ filename=str(filename),
233
+ lineno=lineno + 1,
234
+ )
235
+ elif getattr(obj, "__test__", True):
236
+ if is_generator(obj):
237
+ res = Function.from_parent(collector, name=name)
238
+ reason = (
239
+ f"yield tests were removed in pytest 4.0 - {name} will be ignored"
240
+ )
241
+ res.add_marker(MARK_GEN.xfail(run=False, reason=reason))
242
+ res.warn(PytestCollectionWarning(reason))
243
+ return res
244
+ else:
245
+ return list(collector._genfunctions(name, obj))
246
+ return None
247
+
248
+
249
+ class PyobjMixin(nodes.Node):
250
+ """this mix-in inherits from Node to carry over the typing information
251
+
252
+ as its intended to always mix in before a node
253
+ its position in the mro is unaffected"""
254
+
255
+ _ALLOW_MARKERS = True
256
+
257
+ @property
258
+ def module(self):
259
+ """Python module object this node was collected from (can be None)."""
260
+ node = self.getparent(Module)
261
+ return node.obj if node is not None else None
262
+
263
+ @property
264
+ def cls(self):
265
+ """Python class object this node was collected from (can be None)."""
266
+ node = self.getparent(Class)
267
+ return node.obj if node is not None else None
268
+
269
+ @property
270
+ def instance(self):
271
+ """Python instance object the function is bound to.
272
+
273
+ Returns None if not a test method, e.g. for a standalone test function,
274
+ a class or a module.
275
+ """
276
+ # Overridden by Function.
277
+ return None
278
+
279
+ @property
280
+ def obj(self):
281
+ """Underlying Python object."""
282
+ obj = getattr(self, "_obj", None)
283
+ if obj is None:
284
+ self._obj = obj = self._getobj()
285
+ # XXX evil hack
286
+ # used to avoid Function marker duplication
287
+ if self._ALLOW_MARKERS:
288
+ self.own_markers.extend(get_unpacked_marks(self.obj))
289
+ # This assumes that `obj` is called before there is a chance
290
+ # to add custom keys to `self.keywords`, so no fear of overriding.
291
+ self.keywords.update((mark.name, mark) for mark in self.own_markers)
292
+ return obj
293
+
294
+ @obj.setter
295
+ def obj(self, value):
296
+ self._obj = value
297
+
298
+ def _getobj(self):
299
+ """Get the underlying Python object. May be overwritten by subclasses."""
300
+ # TODO: Improve the type of `parent` such that assert/ignore aren't needed.
301
+ assert self.parent is not None
302
+ obj = self.parent.obj # type: ignore[attr-defined]
303
+ return getattr(obj, self.name)
304
+
305
+ def getmodpath(self, stopatmodule: bool = True, includemodule: bool = False) -> str:
306
+ """Return Python path relative to the containing module."""
307
+ parts = []
308
+ for node in self.iter_parents():
309
+ name = node.name
310
+ if isinstance(node, Module):
311
+ name = os.path.splitext(name)[0]
312
+ if stopatmodule:
313
+ if includemodule:
314
+ parts.append(name)
315
+ break
316
+ parts.append(name)
317
+ parts.reverse()
318
+ return ".".join(parts)
319
+
320
+ def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]:
321
+ # XXX caching?
322
+ path, lineno = getfslineno(self.obj)
323
+ modpath = self.getmodpath()
324
+ return path, lineno, modpath
325
+
326
+
327
+ # As an optimization, these builtin attribute names are pre-ignored when
328
+ # iterating over an object during collection -- the pytest_pycollect_makeitem
329
+ # hook is not called for them.
330
+ # fmt: off
331
+ class _EmptyClass: pass # noqa: E701
332
+ IGNORED_ATTRIBUTES = frozenset.union(
333
+ frozenset(),
334
+ # Module.
335
+ dir(types.ModuleType("empty_module")),
336
+ # Some extra module attributes the above doesn't catch.
337
+ {"__builtins__", "__file__", "__cached__"},
338
+ # Class.
339
+ dir(_EmptyClass),
340
+ # Instance.
341
+ dir(_EmptyClass()),
342
+ )
343
+ del _EmptyClass
344
+ # fmt: on
345
+
346
+
347
+ class PyCollector(PyobjMixin, nodes.Collector, abc.ABC):
348
+ def funcnamefilter(self, name: str) -> bool:
349
+ return self._matches_prefix_or_glob_option("python_functions", name)
350
+
351
+ def isnosetest(self, obj: object) -> bool:
352
+ """Look for the __test__ attribute, which is applied by the
353
+ @nose.tools.istest decorator.
354
+ """
355
+ # We explicitly check for "is True" here to not mistakenly treat
356
+ # classes with a custom __getattr__ returning something truthy (like a
357
+ # function) as test classes.
358
+ return safe_getattr(obj, "__test__", False) is True
359
+
360
+ def classnamefilter(self, name: str) -> bool:
361
+ return self._matches_prefix_or_glob_option("python_classes", name)
362
+
363
+ def istestfunction(self, obj: object, name: str) -> bool:
364
+ if self.funcnamefilter(name) or self.isnosetest(obj):
365
+ if isinstance(obj, (staticmethod, classmethod)):
366
+ # staticmethods and classmethods need to be unwrapped.
367
+ obj = safe_getattr(obj, "__func__", False)
368
+ return callable(obj) and fixtures.getfixturemarker(obj) is None
369
+ else:
370
+ return False
371
+
372
+ def istestclass(self, obj: object, name: str) -> bool:
373
+ if not (self.classnamefilter(name) or self.isnosetest(obj)):
374
+ return False
375
+ if inspect.isabstract(obj):
376
+ return False
377
+ return True
378
+
379
+ def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool:
380
+ """Check if the given name matches the prefix or glob-pattern defined
381
+ in ini configuration."""
382
+ for option in self.config.getini(option_name):
383
+ if name.startswith(option):
384
+ return True
385
+ # Check that name looks like a glob-string before calling fnmatch
386
+ # because this is called for every name in each collected module,
387
+ # and fnmatch is somewhat expensive to call.
388
+ elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch(
389
+ name, option
390
+ ):
391
+ return True
392
+ return False
393
+
394
+ def collect(self) -> Iterable[nodes.Item | nodes.Collector]:
395
+ if not getattr(self.obj, "__test__", True):
396
+ return []
397
+
398
+ # Avoid random getattrs and peek in the __dict__ instead.
399
+ dicts = [getattr(self.obj, "__dict__", {})]
400
+ if isinstance(self.obj, type):
401
+ for basecls in self.obj.__mro__:
402
+ dicts.append(basecls.__dict__)
403
+
404
+ # In each class, nodes should be definition ordered.
405
+ # __dict__ is definition ordered.
406
+ seen: set[str] = set()
407
+ dict_values: list[list[nodes.Item | nodes.Collector]] = []
408
+ ihook = self.ihook
409
+ for dic in dicts:
410
+ values: list[nodes.Item | nodes.Collector] = []
411
+ # Note: seems like the dict can change during iteration -
412
+ # be careful not to remove the list() without consideration.
413
+ for name, obj in list(dic.items()):
414
+ if name in IGNORED_ATTRIBUTES:
415
+ continue
416
+ if name in seen:
417
+ continue
418
+ seen.add(name)
419
+ res = ihook.pytest_pycollect_makeitem(
420
+ collector=self, name=name, obj=obj
421
+ )
422
+ if res is None:
423
+ continue
424
+ elif isinstance(res, list):
425
+ values.extend(res)
426
+ else:
427
+ values.append(res)
428
+ dict_values.append(values)
429
+
430
+ # Between classes in the class hierarchy, reverse-MRO order -- nodes
431
+ # inherited from base classes should come before subclasses.
432
+ result = []
433
+ for values in reversed(dict_values):
434
+ result.extend(values)
435
+ return result
436
+
437
+ def _genfunctions(self, name: str, funcobj) -> Iterator[Function]:
438
+ modulecol = self.getparent(Module)
439
+ assert modulecol is not None
440
+ module = modulecol.obj
441
+ clscol = self.getparent(Class)
442
+ cls = clscol and clscol.obj or None
443
+
444
+ definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj)
445
+ fixtureinfo = definition._fixtureinfo
446
+
447
+ # pytest_generate_tests impls call metafunc.parametrize() which fills
448
+ # metafunc._calls, the outcome of the hook.
449
+ metafunc = Metafunc(
450
+ definition=definition,
451
+ fixtureinfo=fixtureinfo,
452
+ config=self.config,
453
+ cls=cls,
454
+ module=module,
455
+ _ispytest=True,
456
+ )
457
+ methods = []
458
+ if hasattr(module, "pytest_generate_tests"):
459
+ methods.append(module.pytest_generate_tests)
460
+ if cls is not None and hasattr(cls, "pytest_generate_tests"):
461
+ methods.append(cls().pytest_generate_tests)
462
+ self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc))
463
+
464
+ if not metafunc._calls:
465
+ yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo)
466
+ else:
467
+ # Direct parametrizations taking place in module/class-specific
468
+ # `metafunc.parametrize` calls may have shadowed some fixtures, so make sure
469
+ # we update what the function really needs a.k.a its fixture closure. Note that
470
+ # direct parametrizations using `@pytest.mark.parametrize` have already been considered
471
+ # into making the closure using `ignore_args` arg to `getfixtureclosure`.
472
+ fixtureinfo.prune_dependency_tree()
473
+
474
+ for callspec in metafunc._calls:
475
+ subname = f"{name}[{callspec.id}]"
476
+ yield Function.from_parent(
477
+ self,
478
+ name=subname,
479
+ callspec=callspec,
480
+ fixtureinfo=fixtureinfo,
481
+ keywords={callspec.id: True},
482
+ originalname=name,
483
+ )
484
+
485
+
486
+ def importtestmodule(
487
+ path: Path,
488
+ config: Config,
489
+ ):
490
+ # We assume we are only called once per module.
491
+ importmode = config.getoption("--import-mode")
492
+ try:
493
+ mod = import_path(
494
+ path,
495
+ mode=importmode,
496
+ root=config.rootpath,
497
+ consider_namespace_packages=config.getini("consider_namespace_packages"),
498
+ )
499
+ except SyntaxError as e:
500
+ raise nodes.Collector.CollectError(
501
+ ExceptionInfo.from_current().getrepr(style="short")
502
+ ) from e
503
+ except ImportPathMismatchError as e:
504
+ raise nodes.Collector.CollectError(
505
+ "import file mismatch:\n"
506
+ "imported module {!r} has this __file__ attribute:\n"
507
+ " {}\n"
508
+ "which is not the same as the test file we want to collect:\n"
509
+ " {}\n"
510
+ "HINT: remove __pycache__ / .pyc files and/or use a "
511
+ "unique basename for your test file modules".format(*e.args)
512
+ ) from e
513
+ except ImportError as e:
514
+ exc_info = ExceptionInfo.from_current()
515
+ if config.get_verbosity() < 2:
516
+ exc_info.traceback = exc_info.traceback.filter(filter_traceback)
517
+ exc_repr = (
518
+ exc_info.getrepr(style="short")
519
+ if exc_info.traceback
520
+ else exc_info.exconly()
521
+ )
522
+ formatted_tb = str(exc_repr)
523
+ raise nodes.Collector.CollectError(
524
+ f"ImportError while importing test module '{path}'.\n"
525
+ "Hint: make sure your test modules/packages have valid Python names.\n"
526
+ "Traceback:\n"
527
+ f"{formatted_tb}"
528
+ ) from e
529
+ except skip.Exception as e:
530
+ if e.allow_module_level:
531
+ raise
532
+ raise nodes.Collector.CollectError(
533
+ "Using pytest.skip outside of a test will skip the entire module. "
534
+ "If that's your intention, pass `allow_module_level=True`. "
535
+ "If you want to skip a specific test or an entire class, "
536
+ "use the @pytest.mark.skip or @pytest.mark.skipif decorators."
537
+ ) from e
538
+ config.pluginmanager.consider_module(mod)
539
+ return mod
540
+
541
+
542
+ class Module(nodes.File, PyCollector):
543
+ """Collector for test classes and functions in a Python module."""
544
+
545
+ def _getobj(self):
546
+ return importtestmodule(self.path, self.config)
547
+
548
+ def collect(self) -> Iterable[nodes.Item | nodes.Collector]:
549
+ self._register_setup_module_fixture()
550
+ self._register_setup_function_fixture()
551
+ self.session._fixturemanager.parsefactories(self)
552
+ return super().collect()
553
+
554
+ def _register_setup_module_fixture(self) -> None:
555
+ """Register an autouse, module-scoped fixture for the collected module object
556
+ that invokes setUpModule/tearDownModule if either or both are available.
557
+
558
+ Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
559
+ other fixtures (#517).
560
+ """
561
+ setup_module = _get_first_non_fixture_func(
562
+ self.obj, ("setUpModule", "setup_module")
563
+ )
564
+ teardown_module = _get_first_non_fixture_func(
565
+ self.obj, ("tearDownModule", "teardown_module")
566
+ )
567
+
568
+ if setup_module is None and teardown_module is None:
569
+ return
570
+
571
+ def xunit_setup_module_fixture(request) -> Generator[None]:
572
+ module = request.module
573
+ if setup_module is not None:
574
+ _call_with_optional_argument(setup_module, module)
575
+ yield
576
+ if teardown_module is not None:
577
+ _call_with_optional_argument(teardown_module, module)
578
+
579
+ self.session._fixturemanager._register_fixture(
580
+ # Use a unique name to speed up lookup.
581
+ name=f"_xunit_setup_module_fixture_{self.obj.__name__}",
582
+ func=xunit_setup_module_fixture,
583
+ nodeid=self.nodeid,
584
+ scope="module",
585
+ autouse=True,
586
+ )
587
+
588
+ def _register_setup_function_fixture(self) -> None:
589
+ """Register an autouse, function-scoped fixture for the collected module object
590
+ that invokes setup_function/teardown_function if either or both are available.
591
+
592
+ Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
593
+ other fixtures (#517).
594
+ """
595
+ setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",))
596
+ teardown_function = _get_first_non_fixture_func(
597
+ self.obj, ("teardown_function",)
598
+ )
599
+ if setup_function is None and teardown_function is None:
600
+ return
601
+
602
+ def xunit_setup_function_fixture(request) -> Generator[None]:
603
+ if request.instance is not None:
604
+ # in this case we are bound to an instance, so we need to let
605
+ # setup_method handle this
606
+ yield
607
+ return
608
+ function = request.function
609
+ if setup_function is not None:
610
+ _call_with_optional_argument(setup_function, function)
611
+ yield
612
+ if teardown_function is not None:
613
+ _call_with_optional_argument(teardown_function, function)
614
+
615
+ self.session._fixturemanager._register_fixture(
616
+ # Use a unique name to speed up lookup.
617
+ name=f"_xunit_setup_function_fixture_{self.obj.__name__}",
618
+ func=xunit_setup_function_fixture,
619
+ nodeid=self.nodeid,
620
+ scope="function",
621
+ autouse=True,
622
+ )
623
+
624
+
625
+ class Package(nodes.Directory):
626
+ """Collector for files and directories in a Python packages -- directories
627
+ with an `__init__.py` file.
628
+
629
+ .. note::
630
+
631
+ Directories without an `__init__.py` file are instead collected by
632
+ :class:`~pytest.Dir` by default. Both are :class:`~pytest.Directory`
633
+ collectors.
634
+
635
+ .. versionchanged:: 8.0
636
+
637
+ Now inherits from :class:`~pytest.Directory`.
638
+ """
639
+
640
+ def __init__(
641
+ self,
642
+ fspath: LEGACY_PATH | None,
643
+ parent: nodes.Collector,
644
+ # NOTE: following args are unused:
645
+ config=None,
646
+ session=None,
647
+ nodeid=None,
648
+ path: Path | None = None,
649
+ ) -> None:
650
+ # NOTE: Could be just the following, but kept as-is for compat.
651
+ # super().__init__(self, fspath, parent=parent)
652
+ session = parent.session
653
+ super().__init__(
654
+ fspath=fspath,
655
+ path=path,
656
+ parent=parent,
657
+ config=config,
658
+ session=session,
659
+ nodeid=nodeid,
660
+ )
661
+
662
+ def setup(self) -> None:
663
+ init_mod = importtestmodule(self.path / "__init__.py", self.config)
664
+
665
+ # Not using fixtures to call setup_module here because autouse fixtures
666
+ # from packages are not called automatically (#4085).
667
+ setup_module = _get_first_non_fixture_func(
668
+ init_mod, ("setUpModule", "setup_module")
669
+ )
670
+ if setup_module is not None:
671
+ _call_with_optional_argument(setup_module, init_mod)
672
+
673
+ teardown_module = _get_first_non_fixture_func(
674
+ init_mod, ("tearDownModule", "teardown_module")
675
+ )
676
+ if teardown_module is not None:
677
+ func = partial(_call_with_optional_argument, teardown_module, init_mod)
678
+ self.addfinalizer(func)
679
+
680
+ def collect(self) -> Iterable[nodes.Item | nodes.Collector]:
681
+ # Always collect __init__.py first.
682
+ def sort_key(entry: os.DirEntry[str]) -> object:
683
+ return (entry.name != "__init__.py", entry.name)
684
+
685
+ config = self.config
686
+ col: nodes.Collector | None
687
+ cols: Sequence[nodes.Collector]
688
+ ihook = self.ihook
689
+ for direntry in scandir(self.path, sort_key):
690
+ if direntry.is_dir():
691
+ path = Path(direntry.path)
692
+ if not self.session.isinitpath(path, with_parents=True):
693
+ if ihook.pytest_ignore_collect(collection_path=path, config=config):
694
+ continue
695
+ col = ihook.pytest_collect_directory(path=path, parent=self)
696
+ if col is not None:
697
+ yield col
698
+
699
+ elif direntry.is_file():
700
+ path = Path(direntry.path)
701
+ if not self.session.isinitpath(path):
702
+ if ihook.pytest_ignore_collect(collection_path=path, config=config):
703
+ continue
704
+ cols = ihook.pytest_collect_file(file_path=path, parent=self)
705
+ yield from cols
706
+
707
+
708
+ def _call_with_optional_argument(func, arg) -> None:
709
+ """Call the given function with the given argument if func accepts one argument, otherwise
710
+ calls func without arguments."""
711
+ arg_count = func.__code__.co_argcount
712
+ if inspect.ismethod(func):
713
+ arg_count -= 1
714
+ if arg_count:
715
+ func(arg)
716
+ else:
717
+ func()
718
+
719
+
720
+ def _get_first_non_fixture_func(obj: object, names: Iterable[str]) -> object | None:
721
+ """Return the attribute from the given object to be used as a setup/teardown
722
+ xunit-style function, but only if not marked as a fixture to avoid calling it twice.
723
+ """
724
+ for name in names:
725
+ meth: object | None = getattr(obj, name, None)
726
+ if meth is not None and fixtures.getfixturemarker(meth) is None:
727
+ return meth
728
+ return None
729
+
730
+
731
+ class Class(PyCollector):
732
+ """Collector for test methods (and nested classes) in a Python class."""
733
+
734
+ @classmethod
735
+ def from_parent(cls, parent, *, name, obj=None, **kw) -> Self: # type: ignore[override]
736
+ """The public constructor."""
737
+ return super().from_parent(name=name, parent=parent, **kw)
738
+
739
+ def newinstance(self):
740
+ return self.obj()
741
+
742
+ def collect(self) -> Iterable[nodes.Item | nodes.Collector]:
743
+ if not safe_getattr(self.obj, "__test__", True):
744
+ return []
745
+ if hasinit(self.obj):
746
+ assert self.parent is not None
747
+ self.warn(
748
+ PytestCollectionWarning(
749
+ f"cannot collect test class {self.obj.__name__!r} because it has a "
750
+ f"__init__ constructor (from: {self.parent.nodeid})"
751
+ )
752
+ )
753
+ return []
754
+ elif hasnew(self.obj):
755
+ assert self.parent is not None
756
+ self.warn(
757
+ PytestCollectionWarning(
758
+ f"cannot collect test class {self.obj.__name__!r} because it has a "
759
+ f"__new__ constructor (from: {self.parent.nodeid})"
760
+ )
761
+ )
762
+ return []
763
+
764
+ self._register_setup_class_fixture()
765
+ self._register_setup_method_fixture()
766
+
767
+ self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid)
768
+
769
+ return super().collect()
770
+
771
+ def _register_setup_class_fixture(self) -> None:
772
+ """Register an autouse, class scoped fixture into the collected class object
773
+ that invokes setup_class/teardown_class if either or both are available.
774
+
775
+ Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
776
+ other fixtures (#517).
777
+ """
778
+ setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",))
779
+ teardown_class = _get_first_non_fixture_func(self.obj, ("teardown_class",))
780
+ if setup_class is None and teardown_class is None:
781
+ return
782
+
783
+ def xunit_setup_class_fixture(request) -> Generator[None]:
784
+ cls = request.cls
785
+ if setup_class is not None:
786
+ func = getimfunc(setup_class)
787
+ _call_with_optional_argument(func, cls)
788
+ yield
789
+ if teardown_class is not None:
790
+ func = getimfunc(teardown_class)
791
+ _call_with_optional_argument(func, cls)
792
+
793
+ self.session._fixturemanager._register_fixture(
794
+ # Use a unique name to speed up lookup.
795
+ name=f"_xunit_setup_class_fixture_{self.obj.__qualname__}",
796
+ func=xunit_setup_class_fixture,
797
+ nodeid=self.nodeid,
798
+ scope="class",
799
+ autouse=True,
800
+ )
801
+
802
+ def _register_setup_method_fixture(self) -> None:
803
+ """Register an autouse, function scoped fixture into the collected class object
804
+ that invokes setup_method/teardown_method if either or both are available.
805
+
806
+ Using a fixture to invoke these methods ensures we play nicely and unsurprisingly with
807
+ other fixtures (#517).
808
+ """
809
+ setup_name = "setup_method"
810
+ setup_method = _get_first_non_fixture_func(self.obj, (setup_name,))
811
+ teardown_name = "teardown_method"
812
+ teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,))
813
+ if setup_method is None and teardown_method is None:
814
+ return
815
+
816
+ def xunit_setup_method_fixture(request) -> Generator[None]:
817
+ instance = request.instance
818
+ method = request.function
819
+ if setup_method is not None:
820
+ func = getattr(instance, setup_name)
821
+ _call_with_optional_argument(func, method)
822
+ yield
823
+ if teardown_method is not None:
824
+ func = getattr(instance, teardown_name)
825
+ _call_with_optional_argument(func, method)
826
+
827
+ self.session._fixturemanager._register_fixture(
828
+ # Use a unique name to speed up lookup.
829
+ name=f"_xunit_setup_method_fixture_{self.obj.__qualname__}",
830
+ func=xunit_setup_method_fixture,
831
+ nodeid=self.nodeid,
832
+ scope="function",
833
+ autouse=True,
834
+ )
835
+
836
+
837
+ def hasinit(obj: object) -> bool:
838
+ init: object = getattr(obj, "__init__", None)
839
+ if init:
840
+ return init != object.__init__
841
+ return False
842
+
843
+
844
+ def hasnew(obj: object) -> bool:
845
+ new: object = getattr(obj, "__new__", None)
846
+ if new:
847
+ return new != object.__new__
848
+ return False
849
+
850
+
851
+ @final
852
+ @dataclasses.dataclass(frozen=True)
853
+ class IdMaker:
854
+ """Make IDs for a parametrization."""
855
+
856
+ __slots__ = (
857
+ "argnames",
858
+ "parametersets",
859
+ "idfn",
860
+ "ids",
861
+ "config",
862
+ "nodeid",
863
+ "func_name",
864
+ )
865
+
866
+ # The argnames of the parametrization.
867
+ argnames: Sequence[str]
868
+ # The ParameterSets of the parametrization.
869
+ parametersets: Sequence[ParameterSet]
870
+ # Optionally, a user-provided callable to make IDs for parameters in a
871
+ # ParameterSet.
872
+ idfn: Callable[[Any], object | None] | None
873
+ # Optionally, explicit IDs for ParameterSets by index.
874
+ ids: Sequence[object | None] | None
875
+ # Optionally, the pytest config.
876
+ # Used for controlling ASCII escaping, and for calling the
877
+ # :hook:`pytest_make_parametrize_id` hook.
878
+ config: Config | None
879
+ # Optionally, the ID of the node being parametrized.
880
+ # Used only for clearer error messages.
881
+ nodeid: str | None
882
+ # Optionally, the ID of the function being parametrized.
883
+ # Used only for clearer error messages.
884
+ func_name: str | None
885
+
886
+ def make_unique_parameterset_ids(self) -> list[str]:
887
+ """Make a unique identifier for each ParameterSet, that may be used to
888
+ identify the parametrization in a node ID.
889
+
890
+ Format is <prm_1_token>-...-<prm_n_token>[counter], where prm_x_token is
891
+ - user-provided id, if given
892
+ - else an id derived from the value, applicable for certain types
893
+ - else <argname><parameterset index>
894
+ The counter suffix is appended only in case a string wouldn't be unique
895
+ otherwise.
896
+ """
897
+ resolved_ids = list(self._resolve_ids())
898
+ # All IDs must be unique!
899
+ if len(resolved_ids) != len(set(resolved_ids)):
900
+ # Record the number of occurrences of each ID.
901
+ id_counts = Counter(resolved_ids)
902
+ # Map the ID to its next suffix.
903
+ id_suffixes: dict[str, int] = defaultdict(int)
904
+ # Suffix non-unique IDs to make them unique.
905
+ for index, id in enumerate(resolved_ids):
906
+ if id_counts[id] > 1:
907
+ suffix = ""
908
+ if id and id[-1].isdigit():
909
+ suffix = "_"
910
+ new_id = f"{id}{suffix}{id_suffixes[id]}"
911
+ while new_id in set(resolved_ids):
912
+ id_suffixes[id] += 1
913
+ new_id = f"{id}{suffix}{id_suffixes[id]}"
914
+ resolved_ids[index] = new_id
915
+ id_suffixes[id] += 1
916
+ assert len(resolved_ids) == len(
917
+ set(resolved_ids)
918
+ ), f"Internal error: {resolved_ids=}"
919
+ return resolved_ids
920
+
921
+ def _resolve_ids(self) -> Iterable[str]:
922
+ """Resolve IDs for all ParameterSets (may contain duplicates)."""
923
+ for idx, parameterset in enumerate(self.parametersets):
924
+ if parameterset.id is not None:
925
+ # ID provided directly - pytest.param(..., id="...")
926
+ yield parameterset.id
927
+ elif self.ids and idx < len(self.ids) and self.ids[idx] is not None:
928
+ # ID provided in the IDs list - parametrize(..., ids=[...]).
929
+ yield self._idval_from_value_required(self.ids[idx], idx)
930
+ else:
931
+ # ID not provided - generate it.
932
+ yield "-".join(
933
+ self._idval(val, argname, idx)
934
+ for val, argname in zip(parameterset.values, self.argnames)
935
+ )
936
+
937
+ def _idval(self, val: object, argname: str, idx: int) -> str:
938
+ """Make an ID for a parameter in a ParameterSet."""
939
+ idval = self._idval_from_function(val, argname, idx)
940
+ if idval is not None:
941
+ return idval
942
+ idval = self._idval_from_hook(val, argname)
943
+ if idval is not None:
944
+ return idval
945
+ idval = self._idval_from_value(val)
946
+ if idval is not None:
947
+ return idval
948
+ return self._idval_from_argname(argname, idx)
949
+
950
+ def _idval_from_function(self, val: object, argname: str, idx: int) -> str | None:
951
+ """Try to make an ID for a parameter in a ParameterSet using the
952
+ user-provided id callable, if given."""
953
+ if self.idfn is None:
954
+ return None
955
+ try:
956
+ id = self.idfn(val)
957
+ except Exception as e:
958
+ prefix = f"{self.nodeid}: " if self.nodeid is not None else ""
959
+ msg = "error raised while trying to determine id of parameter '{}' at position {}"
960
+ msg = prefix + msg.format(argname, idx)
961
+ raise ValueError(msg) from e
962
+ if id is None:
963
+ return None
964
+ return self._idval_from_value(id)
965
+
966
+ def _idval_from_hook(self, val: object, argname: str) -> str | None:
967
+ """Try to make an ID for a parameter in a ParameterSet by calling the
968
+ :hook:`pytest_make_parametrize_id` hook."""
969
+ if self.config:
970
+ id: str | None = self.config.hook.pytest_make_parametrize_id(
971
+ config=self.config, val=val, argname=argname
972
+ )
973
+ return id
974
+ return None
975
+
976
+ def _idval_from_value(self, val: object) -> str | None:
977
+ """Try to make an ID for a parameter in a ParameterSet from its value,
978
+ if the value type is supported."""
979
+ if isinstance(val, (str, bytes)):
980
+ return _ascii_escaped_by_config(val, self.config)
981
+ elif val is None or isinstance(val, (float, int, bool, complex)):
982
+ return str(val)
983
+ elif isinstance(val, Pattern):
984
+ return ascii_escaped(val.pattern)
985
+ elif val is NOTSET:
986
+ # Fallback to default. Note that NOTSET is an enum.Enum.
987
+ pass
988
+ elif isinstance(val, enum.Enum):
989
+ return str(val)
990
+ elif isinstance(getattr(val, "__name__", None), str):
991
+ # Name of a class, function, module, etc.
992
+ name: str = getattr(val, "__name__")
993
+ return name
994
+ return None
995
+
996
+ def _idval_from_value_required(self, val: object, idx: int) -> str:
997
+ """Like _idval_from_value(), but fails if the type is not supported."""
998
+ id = self._idval_from_value(val)
999
+ if id is not None:
1000
+ return id
1001
+
1002
+ # Fail.
1003
+ if self.func_name is not None:
1004
+ prefix = f"In {self.func_name}: "
1005
+ elif self.nodeid is not None:
1006
+ prefix = f"In {self.nodeid}: "
1007
+ else:
1008
+ prefix = ""
1009
+ msg = (
1010
+ f"{prefix}ids contains unsupported value {saferepr(val)} (type: {type(val)!r}) at index {idx}. "
1011
+ "Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__."
1012
+ )
1013
+ fail(msg, pytrace=False)
1014
+
1015
+ @staticmethod
1016
+ def _idval_from_argname(argname: str, idx: int) -> str:
1017
+ """Make an ID for a parameter in a ParameterSet from the argument name
1018
+ and the index of the ParameterSet."""
1019
+ return str(argname) + str(idx)
1020
+
1021
+
1022
+ @final
1023
+ @dataclasses.dataclass(frozen=True)
1024
+ class CallSpec2:
1025
+ """A planned parameterized invocation of a test function.
1026
+
1027
+ Calculated during collection for a given test function's Metafunc.
1028
+ Once collection is over, each callspec is turned into a single Item
1029
+ and stored in item.callspec.
1030
+ """
1031
+
1032
+ # arg name -> arg value which will be passed to a fixture or pseudo-fixture
1033
+ # of the same name. (indirect or direct parametrization respectively)
1034
+ params: dict[str, object] = dataclasses.field(default_factory=dict)
1035
+ # arg name -> arg index.
1036
+ indices: dict[str, int] = dataclasses.field(default_factory=dict)
1037
+ # Used for sorting parametrized resources.
1038
+ _arg2scope: Mapping[str, Scope] = dataclasses.field(default_factory=dict)
1039
+ # Parts which will be added to the item's name in `[..]` separated by "-".
1040
+ _idlist: Sequence[str] = dataclasses.field(default_factory=tuple)
1041
+ # Marks which will be applied to the item.
1042
+ marks: list[Mark] = dataclasses.field(default_factory=list)
1043
+
1044
+ def setmulti(
1045
+ self,
1046
+ *,
1047
+ argnames: Iterable[str],
1048
+ valset: Iterable[object],
1049
+ id: str,
1050
+ marks: Iterable[Mark | MarkDecorator],
1051
+ scope: Scope,
1052
+ param_index: int,
1053
+ ) -> CallSpec2:
1054
+ params = self.params.copy()
1055
+ indices = self.indices.copy()
1056
+ arg2scope = dict(self._arg2scope)
1057
+ for arg, val in zip(argnames, valset):
1058
+ if arg in params:
1059
+ raise ValueError(f"duplicate parametrization of {arg!r}")
1060
+ params[arg] = val
1061
+ indices[arg] = param_index
1062
+ arg2scope[arg] = scope
1063
+ return CallSpec2(
1064
+ params=params,
1065
+ indices=indices,
1066
+ _arg2scope=arg2scope,
1067
+ _idlist=[*self._idlist, id],
1068
+ marks=[*self.marks, *normalize_mark_list(marks)],
1069
+ )
1070
+
1071
+ def getparam(self, name: str) -> object:
1072
+ try:
1073
+ return self.params[name]
1074
+ except KeyError as e:
1075
+ raise ValueError(name) from e
1076
+
1077
+ @property
1078
+ def id(self) -> str:
1079
+ return "-".join(self._idlist)
1080
+
1081
+
1082
+ def get_direct_param_fixture_func(request: FixtureRequest) -> Any:
1083
+ return request.param
1084
+
1085
+
1086
+ # Used for storing pseudo fixturedefs for direct parametrization.
1087
+ name2pseudofixturedef_key = StashKey[Dict[str, FixtureDef[Any]]]()
1088
+
1089
+
1090
+ @final
1091
+ class Metafunc:
1092
+ """Objects passed to the :hook:`pytest_generate_tests` hook.
1093
+
1094
+ They help to inspect a test function and to generate tests according to
1095
+ test configuration or values specified in the class or module where a
1096
+ test function is defined.
1097
+ """
1098
+
1099
+ def __init__(
1100
+ self,
1101
+ definition: FunctionDefinition,
1102
+ fixtureinfo: fixtures.FuncFixtureInfo,
1103
+ config: Config,
1104
+ cls=None,
1105
+ module=None,
1106
+ *,
1107
+ _ispytest: bool = False,
1108
+ ) -> None:
1109
+ check_ispytest(_ispytest)
1110
+
1111
+ #: Access to the underlying :class:`_pytest.python.FunctionDefinition`.
1112
+ self.definition = definition
1113
+
1114
+ #: Access to the :class:`pytest.Config` object for the test session.
1115
+ self.config = config
1116
+
1117
+ #: The module object where the test function is defined in.
1118
+ self.module = module
1119
+
1120
+ #: Underlying Python test function.
1121
+ self.function = definition.obj
1122
+
1123
+ #: Set of fixture names required by the test function.
1124
+ self.fixturenames = fixtureinfo.names_closure
1125
+
1126
+ #: Class object where the test function is defined in or ``None``.
1127
+ self.cls = cls
1128
+
1129
+ self._arg2fixturedefs = fixtureinfo.name2fixturedefs
1130
+
1131
+ # Result of parametrize().
1132
+ self._calls: list[CallSpec2] = []
1133
+
1134
+ def parametrize(
1135
+ self,
1136
+ argnames: str | Sequence[str],
1137
+ argvalues: Iterable[ParameterSet | Sequence[object] | object],
1138
+ indirect: bool | Sequence[str] = False,
1139
+ ids: Iterable[object | None] | Callable[[Any], object | None] | None = None,
1140
+ scope: _ScopeName | None = None,
1141
+ *,
1142
+ _param_mark: Mark | None = None,
1143
+ ) -> None:
1144
+ """Add new invocations to the underlying test function using the list
1145
+ of argvalues for the given argnames. Parametrization is performed
1146
+ during the collection phase. If you need to setup expensive resources
1147
+ see about setting indirect to do it rather than at test setup time.
1148
+
1149
+ Can be called multiple times per test function (but only on different
1150
+ argument names), in which case each call parametrizes all previous
1151
+ parametrizations, e.g.
1152
+
1153
+ ::
1154
+
1155
+ unparametrized: t
1156
+ parametrize ["x", "y"]: t[x], t[y]
1157
+ parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2]
1158
+
1159
+ :param argnames:
1160
+ A comma-separated string denoting one or more argument names, or
1161
+ a list/tuple of argument strings.
1162
+
1163
+ :param argvalues:
1164
+ The list of argvalues determines how often a test is invoked with
1165
+ different argument values.
1166
+
1167
+ If only one argname was specified argvalues is a list of values.
1168
+ If N argnames were specified, argvalues must be a list of
1169
+ N-tuples, where each tuple-element specifies a value for its
1170
+ respective argname.
1171
+ :type argvalues: Iterable[_pytest.mark.structures.ParameterSet | Sequence[object] | object]
1172
+ :param indirect:
1173
+ A list of arguments' names (subset of argnames) or a boolean.
1174
+ If True the list contains all names from the argnames. Each
1175
+ argvalue corresponding to an argname in this list will
1176
+ be passed as request.param to its respective argname fixture
1177
+ function so that it can perform more expensive setups during the
1178
+ setup phase of a test rather than at collection time.
1179
+
1180
+ :param ids:
1181
+ Sequence of (or generator for) ids for ``argvalues``,
1182
+ or a callable to return part of the id for each argvalue.
1183
+
1184
+ With sequences (and generators like ``itertools.count()``) the
1185
+ returned ids should be of type ``string``, ``int``, ``float``,
1186
+ ``bool``, or ``None``.
1187
+ They are mapped to the corresponding index in ``argvalues``.
1188
+ ``None`` means to use the auto-generated id.
1189
+
1190
+ If it is a callable it will be called for each entry in
1191
+ ``argvalues``, and the return value is used as part of the
1192
+ auto-generated id for the whole set (where parts are joined with
1193
+ dashes ("-")).
1194
+ This is useful to provide more specific ids for certain items, e.g.
1195
+ dates. Returning ``None`` will use an auto-generated id.
1196
+
1197
+ If no ids are provided they will be generated automatically from
1198
+ the argvalues.
1199
+
1200
+ :param scope:
1201
+ If specified it denotes the scope of the parameters.
1202
+ The scope is used for grouping tests by parameter instances.
1203
+ It will also override any fixture-function defined scope, allowing
1204
+ to set a dynamic scope using test context or configuration.
1205
+ """
1206
+ argnames, parametersets = ParameterSet._for_parametrize(
1207
+ argnames,
1208
+ argvalues,
1209
+ self.function,
1210
+ self.config,
1211
+ nodeid=self.definition.nodeid,
1212
+ )
1213
+ del argvalues
1214
+
1215
+ if "request" in argnames:
1216
+ fail(
1217
+ "'request' is a reserved name and cannot be used in @pytest.mark.parametrize",
1218
+ pytrace=False,
1219
+ )
1220
+
1221
+ if scope is not None:
1222
+ scope_ = Scope.from_user(
1223
+ scope, descr=f"parametrize() call in {self.function.__name__}"
1224
+ )
1225
+ else:
1226
+ scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
1227
+
1228
+ self._validate_if_using_arg_names(argnames, indirect)
1229
+
1230
+ # Use any already (possibly) generated ids with parametrize Marks.
1231
+ if _param_mark and _param_mark._param_ids_from:
1232
+ generated_ids = _param_mark._param_ids_from._param_ids_generated
1233
+ if generated_ids is not None:
1234
+ ids = generated_ids
1235
+
1236
+ ids = self._resolve_parameter_set_ids(
1237
+ argnames, ids, parametersets, nodeid=self.definition.nodeid
1238
+ )
1239
+
1240
+ # Store used (possibly generated) ids with parametrize Marks.
1241
+ if _param_mark and _param_mark._param_ids_from and generated_ids is None:
1242
+ object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids)
1243
+
1244
+ # Add funcargs as fixturedefs to fixtureinfo.arg2fixturedefs by registering
1245
+ # artificial "pseudo" FixtureDef's so that later at test execution time we can
1246
+ # rely on a proper FixtureDef to exist for fixture setup.
1247
+ node = None
1248
+ # If we have a scope that is higher than function, we need
1249
+ # to make sure we only ever create an according fixturedef on
1250
+ # a per-scope basis. We thus store and cache the fixturedef on the
1251
+ # node related to the scope.
1252
+ if scope_ is not Scope.Function:
1253
+ collector = self.definition.parent
1254
+ assert collector is not None
1255
+ node = get_scope_node(collector, scope_)
1256
+ if node is None:
1257
+ # If used class scope and there is no class, use module-level
1258
+ # collector (for now).
1259
+ if scope_ is Scope.Class:
1260
+ assert isinstance(collector, Module)
1261
+ node = collector
1262
+ # If used package scope and there is no package, use session
1263
+ # (for now).
1264
+ elif scope_ is Scope.Package:
1265
+ node = collector.session
1266
+ else:
1267
+ assert False, f"Unhandled missing scope: {scope}"
1268
+ if node is None:
1269
+ name2pseudofixturedef = None
1270
+ else:
1271
+ default: dict[str, FixtureDef[Any]] = {}
1272
+ name2pseudofixturedef = node.stash.setdefault(
1273
+ name2pseudofixturedef_key, default
1274
+ )
1275
+ arg_directness = self._resolve_args_directness(argnames, indirect)
1276
+ for argname in argnames:
1277
+ if arg_directness[argname] == "indirect":
1278
+ continue
1279
+ if name2pseudofixturedef is not None and argname in name2pseudofixturedef:
1280
+ fixturedef = name2pseudofixturedef[argname]
1281
+ else:
1282
+ fixturedef = FixtureDef(
1283
+ config=self.config,
1284
+ baseid="",
1285
+ argname=argname,
1286
+ func=get_direct_param_fixture_func,
1287
+ scope=scope_,
1288
+ params=None,
1289
+ ids=None,
1290
+ _ispytest=True,
1291
+ )
1292
+ if name2pseudofixturedef is not None:
1293
+ name2pseudofixturedef[argname] = fixturedef
1294
+ self._arg2fixturedefs[argname] = [fixturedef]
1295
+
1296
+ # Create the new calls: if we are parametrize() multiple times (by applying the decorator
1297
+ # more than once) then we accumulate those calls generating the cartesian product
1298
+ # of all calls.
1299
+ newcalls = []
1300
+ for callspec in self._calls or [CallSpec2()]:
1301
+ for param_index, (param_id, param_set) in enumerate(
1302
+ zip(ids, parametersets)
1303
+ ):
1304
+ newcallspec = callspec.setmulti(
1305
+ argnames=argnames,
1306
+ valset=param_set.values,
1307
+ id=param_id,
1308
+ marks=param_set.marks,
1309
+ scope=scope_,
1310
+ param_index=param_index,
1311
+ )
1312
+ newcalls.append(newcallspec)
1313
+ self._calls = newcalls
1314
+
1315
+ def _resolve_parameter_set_ids(
1316
+ self,
1317
+ argnames: Sequence[str],
1318
+ ids: Iterable[object | None] | Callable[[Any], object | None] | None,
1319
+ parametersets: Sequence[ParameterSet],
1320
+ nodeid: str,
1321
+ ) -> list[str]:
1322
+ """Resolve the actual ids for the given parameter sets.
1323
+
1324
+ :param argnames:
1325
+ Argument names passed to ``parametrize()``.
1326
+ :param ids:
1327
+ The `ids` parameter of the ``parametrize()`` call (see docs).
1328
+ :param parametersets:
1329
+ The parameter sets, each containing a set of values corresponding
1330
+ to ``argnames``.
1331
+ :param nodeid str:
1332
+ The nodeid of the definition item that generated this
1333
+ parametrization.
1334
+ :returns:
1335
+ List with ids for each parameter set given.
1336
+ """
1337
+ if ids is None:
1338
+ idfn = None
1339
+ ids_ = None
1340
+ elif callable(ids):
1341
+ idfn = ids
1342
+ ids_ = None
1343
+ else:
1344
+ idfn = None
1345
+ ids_ = self._validate_ids(ids, parametersets, self.function.__name__)
1346
+ id_maker = IdMaker(
1347
+ argnames,
1348
+ parametersets,
1349
+ idfn,
1350
+ ids_,
1351
+ self.config,
1352
+ nodeid=nodeid,
1353
+ func_name=self.function.__name__,
1354
+ )
1355
+ return id_maker.make_unique_parameterset_ids()
1356
+
1357
+ def _validate_ids(
1358
+ self,
1359
+ ids: Iterable[object | None],
1360
+ parametersets: Sequence[ParameterSet],
1361
+ func_name: str,
1362
+ ) -> list[object | None]:
1363
+ try:
1364
+ num_ids = len(ids) # type: ignore[arg-type]
1365
+ except TypeError:
1366
+ try:
1367
+ iter(ids)
1368
+ except TypeError as e:
1369
+ raise TypeError("ids must be a callable or an iterable") from e
1370
+ num_ids = len(parametersets)
1371
+
1372
+ # num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849
1373
+ if num_ids != len(parametersets) and num_ids != 0:
1374
+ msg = "In {}: {} parameter sets specified, with different number of ids: {}"
1375
+ fail(msg.format(func_name, len(parametersets), num_ids), pytrace=False)
1376
+
1377
+ return list(itertools.islice(ids, num_ids))
1378
+
1379
+ def _resolve_args_directness(
1380
+ self,
1381
+ argnames: Sequence[str],
1382
+ indirect: bool | Sequence[str],
1383
+ ) -> dict[str, Literal["indirect", "direct"]]:
1384
+ """Resolve if each parametrized argument must be considered an indirect
1385
+ parameter to a fixture of the same name, or a direct parameter to the
1386
+ parametrized function, based on the ``indirect`` parameter of the
1387
+ parametrized() call.
1388
+
1389
+ :param argnames:
1390
+ List of argument names passed to ``parametrize()``.
1391
+ :param indirect:
1392
+ Same as the ``indirect`` parameter of ``parametrize()``.
1393
+ :returns
1394
+ A dict mapping each arg name to either "indirect" or "direct".
1395
+ """
1396
+ arg_directness: dict[str, Literal["indirect", "direct"]]
1397
+ if isinstance(indirect, bool):
1398
+ arg_directness = dict.fromkeys(
1399
+ argnames, "indirect" if indirect else "direct"
1400
+ )
1401
+ elif isinstance(indirect, Sequence):
1402
+ arg_directness = dict.fromkeys(argnames, "direct")
1403
+ for arg in indirect:
1404
+ if arg not in argnames:
1405
+ fail(
1406
+ f"In {self.function.__name__}: indirect fixture '{arg}' doesn't exist",
1407
+ pytrace=False,
1408
+ )
1409
+ arg_directness[arg] = "indirect"
1410
+ else:
1411
+ fail(
1412
+ f"In {self.function.__name__}: expected Sequence or boolean"
1413
+ f" for indirect, got {type(indirect).__name__}",
1414
+ pytrace=False,
1415
+ )
1416
+ return arg_directness
1417
+
1418
+ def _validate_if_using_arg_names(
1419
+ self,
1420
+ argnames: Sequence[str],
1421
+ indirect: bool | Sequence[str],
1422
+ ) -> None:
1423
+ """Check if all argnames are being used, by default values, or directly/indirectly.
1424
+
1425
+ :param List[str] argnames: List of argument names passed to ``parametrize()``.
1426
+ :param indirect: Same as the ``indirect`` parameter of ``parametrize()``.
1427
+ :raises ValueError: If validation fails.
1428
+ """
1429
+ default_arg_names = set(get_default_arg_names(self.function))
1430
+ func_name = self.function.__name__
1431
+ for arg in argnames:
1432
+ if arg not in self.fixturenames:
1433
+ if arg in default_arg_names:
1434
+ fail(
1435
+ f"In {func_name}: function already takes an argument '{arg}' with a default value",
1436
+ pytrace=False,
1437
+ )
1438
+ else:
1439
+ if isinstance(indirect, Sequence):
1440
+ name = "fixture" if arg in indirect else "argument"
1441
+ else:
1442
+ name = "fixture" if indirect else "argument"
1443
+ fail(
1444
+ f"In {func_name}: function uses no {name} '{arg}'",
1445
+ pytrace=False,
1446
+ )
1447
+
1448
+
1449
+ def _find_parametrized_scope(
1450
+ argnames: Sequence[str],
1451
+ arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]],
1452
+ indirect: bool | Sequence[str],
1453
+ ) -> Scope:
1454
+ """Find the most appropriate scope for a parametrized call based on its arguments.
1455
+
1456
+ When there's at least one direct argument, always use "function" scope.
1457
+
1458
+ When a test function is parametrized and all its arguments are indirect
1459
+ (e.g. fixtures), return the most narrow scope based on the fixtures used.
1460
+
1461
+ Related to issue #1832, based on code posted by @Kingdread.
1462
+ """
1463
+ if isinstance(indirect, Sequence):
1464
+ all_arguments_are_fixtures = len(indirect) == len(argnames)
1465
+ else:
1466
+ all_arguments_are_fixtures = bool(indirect)
1467
+
1468
+ if all_arguments_are_fixtures:
1469
+ fixturedefs = arg2fixturedefs or {}
1470
+ used_scopes = [
1471
+ fixturedef[-1]._scope
1472
+ for name, fixturedef in fixturedefs.items()
1473
+ if name in argnames
1474
+ ]
1475
+ # Takes the most narrow scope from used fixtures.
1476
+ return min(used_scopes, default=Scope.Function)
1477
+
1478
+ return Scope.Function
1479
+
1480
+
1481
+ def _ascii_escaped_by_config(val: str | bytes, config: Config | None) -> str:
1482
+ if config is None:
1483
+ escape_option = False
1484
+ else:
1485
+ escape_option = config.getini(
1486
+ "disable_test_id_escaping_and_forfeit_all_rights_to_community_support"
1487
+ )
1488
+ # TODO: If escaping is turned off and the user passes bytes,
1489
+ # will return a bytes. For now we ignore this but the
1490
+ # code *probably* doesn't handle this case.
1491
+ return val if escape_option else ascii_escaped(val) # type: ignore
1492
+
1493
+
1494
+ class Function(PyobjMixin, nodes.Item):
1495
+ """Item responsible for setting up and executing a Python test function.
1496
+
1497
+ :param name:
1498
+ The full function name, including any decorations like those
1499
+ added by parametrization (``my_func[my_param]``).
1500
+ :param parent:
1501
+ The parent Node.
1502
+ :param config:
1503
+ The pytest Config object.
1504
+ :param callspec:
1505
+ If given, this function has been parametrized and the callspec contains
1506
+ meta information about the parametrization.
1507
+ :param callobj:
1508
+ If given, the object which will be called when the Function is invoked,
1509
+ otherwise the callobj will be obtained from ``parent`` using ``originalname``.
1510
+ :param keywords:
1511
+ Keywords bound to the function object for "-k" matching.
1512
+ :param session:
1513
+ The pytest Session object.
1514
+ :param fixtureinfo:
1515
+ Fixture information already resolved at this fixture node..
1516
+ :param originalname:
1517
+ The attribute name to use for accessing the underlying function object.
1518
+ Defaults to ``name``. Set this if name is different from the original name,
1519
+ for example when it contains decorations like those added by parametrization
1520
+ (``my_func[my_param]``).
1521
+ """
1522
+
1523
+ # Disable since functions handle it themselves.
1524
+ _ALLOW_MARKERS = False
1525
+
1526
+ def __init__(
1527
+ self,
1528
+ name: str,
1529
+ parent,
1530
+ config: Config | None = None,
1531
+ callspec: CallSpec2 | None = None,
1532
+ callobj=NOTSET,
1533
+ keywords: Mapping[str, Any] | None = None,
1534
+ session: Session | None = None,
1535
+ fixtureinfo: FuncFixtureInfo | None = None,
1536
+ originalname: str | None = None,
1537
+ ) -> None:
1538
+ super().__init__(name, parent, config=config, session=session)
1539
+
1540
+ if callobj is not NOTSET:
1541
+ self._obj = callobj
1542
+ self._instance = getattr(callobj, "__self__", None)
1543
+
1544
+ #: Original function name, without any decorations (for example
1545
+ #: parametrization adds a ``"[...]"`` suffix to function names), used to access
1546
+ #: the underlying function object from ``parent`` (in case ``callobj`` is not given
1547
+ #: explicitly).
1548
+ #:
1549
+ #: .. versionadded:: 3.0
1550
+ self.originalname = originalname or name
1551
+
1552
+ # Note: when FunctionDefinition is introduced, we should change ``originalname``
1553
+ # to a readonly property that returns FunctionDefinition.name.
1554
+
1555
+ self.own_markers.extend(get_unpacked_marks(self.obj))
1556
+ if callspec:
1557
+ self.callspec = callspec
1558
+ self.own_markers.extend(callspec.marks)
1559
+
1560
+ # todo: this is a hell of a hack
1561
+ # https://github.com/pytest-dev/pytest/issues/4569
1562
+ # Note: the order of the updates is important here; indicates what
1563
+ # takes priority (ctor argument over function attributes over markers).
1564
+ # Take own_markers only; NodeKeywords handles parent traversal on its own.
1565
+ self.keywords.update((mark.name, mark) for mark in self.own_markers)
1566
+ self.keywords.update(self.obj.__dict__)
1567
+ if keywords:
1568
+ self.keywords.update(keywords)
1569
+
1570
+ if fixtureinfo is None:
1571
+ fm = self.session._fixturemanager
1572
+ fixtureinfo = fm.getfixtureinfo(self, self.obj, self.cls)
1573
+ self._fixtureinfo: FuncFixtureInfo = fixtureinfo
1574
+ self.fixturenames = fixtureinfo.names_closure
1575
+ self._initrequest()
1576
+
1577
+ # todo: determine sound type limitations
1578
+ @classmethod
1579
+ def from_parent(cls, parent, **kw) -> Self:
1580
+ """The public constructor."""
1581
+ return super().from_parent(parent=parent, **kw)
1582
+
1583
+ def _initrequest(self) -> None:
1584
+ self.funcargs: dict[str, object] = {}
1585
+ self._request = fixtures.TopRequest(self, _ispytest=True)
1586
+
1587
+ @property
1588
+ def function(self):
1589
+ """Underlying python 'function' object."""
1590
+ return getimfunc(self.obj)
1591
+
1592
+ @property
1593
+ def instance(self):
1594
+ try:
1595
+ return self._instance
1596
+ except AttributeError:
1597
+ if isinstance(self.parent, Class):
1598
+ # Each Function gets a fresh class instance.
1599
+ self._instance = self._getinstance()
1600
+ else:
1601
+ self._instance = None
1602
+ return self._instance
1603
+
1604
+ def _getinstance(self):
1605
+ if isinstance(self.parent, Class):
1606
+ # Each Function gets a fresh class instance.
1607
+ return self.parent.newinstance()
1608
+ else:
1609
+ return None
1610
+
1611
+ def _getobj(self):
1612
+ instance = self.instance
1613
+ if instance is not None:
1614
+ parent_obj = instance
1615
+ else:
1616
+ assert self.parent is not None
1617
+ parent_obj = self.parent.obj # type: ignore[attr-defined]
1618
+ return getattr(parent_obj, self.originalname)
1619
+
1620
+ @property
1621
+ def _pyfuncitem(self):
1622
+ """(compatonly) for code expecting pytest-2.2 style request objects."""
1623
+ return self
1624
+
1625
+ def runtest(self) -> None:
1626
+ """Execute the underlying test function."""
1627
+ self.ihook.pytest_pyfunc_call(pyfuncitem=self)
1628
+
1629
+ def setup(self) -> None:
1630
+ self._request._fillfixtures()
1631
+
1632
+ def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback:
1633
+ if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False):
1634
+ code = _pytest._code.Code.from_function(get_real_func(self.obj))
1635
+ path, firstlineno = code.path, code.firstlineno
1636
+ traceback = excinfo.traceback
1637
+ ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
1638
+ if ntraceback == traceback:
1639
+ ntraceback = ntraceback.cut(path=path)
1640
+ if ntraceback == traceback:
1641
+ ntraceback = ntraceback.filter(filter_traceback)
1642
+ if not ntraceback:
1643
+ ntraceback = traceback
1644
+ ntraceback = ntraceback.filter(excinfo)
1645
+
1646
+ # issue364: mark all but first and last frames to
1647
+ # only show a single-line message for each frame.
1648
+ if self.config.getoption("tbstyle", "auto") == "auto":
1649
+ if len(ntraceback) > 2:
1650
+ ntraceback = Traceback(
1651
+ (
1652
+ ntraceback[0],
1653
+ *(t.with_repr_style("short") for t in ntraceback[1:-1]),
1654
+ ntraceback[-1],
1655
+ )
1656
+ )
1657
+
1658
+ return ntraceback
1659
+ return excinfo.traceback
1660
+
1661
+ # TODO: Type ignored -- breaks Liskov Substitution.
1662
+ def repr_failure( # type: ignore[override]
1663
+ self,
1664
+ excinfo: ExceptionInfo[BaseException],
1665
+ ) -> str | TerminalRepr:
1666
+ style = self.config.getoption("tbstyle", "auto")
1667
+ if style == "auto":
1668
+ style = "long"
1669
+ return self._repr_failure_py(excinfo, style=style)
1670
+
1671
+
1672
+ class FunctionDefinition(Function):
1673
+ """This class is a stop gap solution until we evolve to have actual function
1674
+ definition nodes and manage to get rid of ``metafunc``."""
1675
+
1676
+ def runtest(self) -> None:
1677
+ raise RuntimeError("function definitions are not supposed to be run as tests")
1678
+
1679
+ setup = runtest
.venv/lib/python3.11/site-packages/_pytest/python_api.py ADDED
@@ -0,0 +1,1028 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from __future__ import annotations
3
+
4
+ from collections.abc import Collection
5
+ from collections.abc import Sized
6
+ from decimal import Decimal
7
+ import math
8
+ from numbers import Complex
9
+ import pprint
10
+ import re
11
+ from types import TracebackType
12
+ from typing import Any
13
+ from typing import Callable
14
+ from typing import cast
15
+ from typing import ContextManager
16
+ from typing import final
17
+ from typing import Mapping
18
+ from typing import overload
19
+ from typing import Pattern
20
+ from typing import Sequence
21
+ from typing import Tuple
22
+ from typing import Type
23
+ from typing import TYPE_CHECKING
24
+ from typing import TypeVar
25
+
26
+ import _pytest._code
27
+ from _pytest.outcomes import fail
28
+
29
+
30
+ if TYPE_CHECKING:
31
+ from numpy import ndarray
32
+
33
+
34
+ def _compare_approx(
35
+ full_object: object,
36
+ message_data: Sequence[tuple[str, str, str]],
37
+ number_of_elements: int,
38
+ different_ids: Sequence[object],
39
+ max_abs_diff: float,
40
+ max_rel_diff: float,
41
+ ) -> list[str]:
42
+ message_list = list(message_data)
43
+ message_list.insert(0, ("Index", "Obtained", "Expected"))
44
+ max_sizes = [0, 0, 0]
45
+ for index, obtained, expected in message_list:
46
+ max_sizes[0] = max(max_sizes[0], len(index))
47
+ max_sizes[1] = max(max_sizes[1], len(obtained))
48
+ max_sizes[2] = max(max_sizes[2], len(expected))
49
+ explanation = [
50
+ f"comparison failed. Mismatched elements: {len(different_ids)} / {number_of_elements}:",
51
+ f"Max absolute difference: {max_abs_diff}",
52
+ f"Max relative difference: {max_rel_diff}",
53
+ ] + [
54
+ f"{indexes:<{max_sizes[0]}} | {obtained:<{max_sizes[1]}} | {expected:<{max_sizes[2]}}"
55
+ for indexes, obtained, expected in message_list
56
+ ]
57
+ return explanation
58
+
59
+
60
+ # builtin pytest.approx helper
61
+
62
+
63
+ class ApproxBase:
64
+ """Provide shared utilities for making approximate comparisons between
65
+ numbers or sequences of numbers."""
66
+
67
+ # Tell numpy to use our `__eq__` operator instead of its.
68
+ __array_ufunc__ = None
69
+ __array_priority__ = 100
70
+
71
+ def __init__(self, expected, rel=None, abs=None, nan_ok: bool = False) -> None:
72
+ __tracebackhide__ = True
73
+ self.expected = expected
74
+ self.abs = abs
75
+ self.rel = rel
76
+ self.nan_ok = nan_ok
77
+ self._check_type()
78
+
79
+ def __repr__(self) -> str:
80
+ raise NotImplementedError
81
+
82
+ def _repr_compare(self, other_side: Any) -> list[str]:
83
+ return [
84
+ "comparison failed",
85
+ f"Obtained: {other_side}",
86
+ f"Expected: {self}",
87
+ ]
88
+
89
+ def __eq__(self, actual) -> bool:
90
+ return all(
91
+ a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual)
92
+ )
93
+
94
+ def __bool__(self):
95
+ __tracebackhide__ = True
96
+ raise AssertionError(
97
+ "approx() is not supported in a boolean context.\nDid you mean: `assert a == approx(b)`?"
98
+ )
99
+
100
+ # Ignore type because of https://github.com/python/mypy/issues/4266.
101
+ __hash__ = None # type: ignore
102
+
103
+ def __ne__(self, actual) -> bool:
104
+ return not (actual == self)
105
+
106
+ def _approx_scalar(self, x) -> ApproxScalar:
107
+ if isinstance(x, Decimal):
108
+ return ApproxDecimal(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
109
+ return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
110
+
111
+ def _yield_comparisons(self, actual):
112
+ """Yield all the pairs of numbers to be compared.
113
+
114
+ This is used to implement the `__eq__` method.
115
+ """
116
+ raise NotImplementedError
117
+
118
+ def _check_type(self) -> None:
119
+ """Raise a TypeError if the expected value is not a valid type."""
120
+ # This is only a concern if the expected value is a sequence. In every
121
+ # other case, the approx() function ensures that the expected value has
122
+ # a numeric type. For this reason, the default is to do nothing. The
123
+ # classes that deal with sequences should reimplement this method to
124
+ # raise if there are any non-numeric elements in the sequence.
125
+
126
+
127
+ def _recursive_sequence_map(f, x):
128
+ """Recursively map a function over a sequence of arbitrary depth"""
129
+ if isinstance(x, (list, tuple)):
130
+ seq_type = type(x)
131
+ return seq_type(_recursive_sequence_map(f, xi) for xi in x)
132
+ elif _is_sequence_like(x):
133
+ return [_recursive_sequence_map(f, xi) for xi in x]
134
+ else:
135
+ return f(x)
136
+
137
+
138
+ class ApproxNumpy(ApproxBase):
139
+ """Perform approximate comparisons where the expected value is numpy array."""
140
+
141
+ def __repr__(self) -> str:
142
+ list_scalars = _recursive_sequence_map(
143
+ self._approx_scalar, self.expected.tolist()
144
+ )
145
+ return f"approx({list_scalars!r})"
146
+
147
+ def _repr_compare(self, other_side: ndarray | list[Any]) -> list[str]:
148
+ import itertools
149
+ import math
150
+
151
+ def get_value_from_nested_list(
152
+ nested_list: list[Any], nd_index: tuple[Any, ...]
153
+ ) -> Any:
154
+ """
155
+ Helper function to get the value out of a nested list, given an n-dimensional index.
156
+ This mimics numpy's indexing, but for raw nested python lists.
157
+ """
158
+ value: Any = nested_list
159
+ for i in nd_index:
160
+ value = value[i]
161
+ return value
162
+
163
+ np_array_shape = self.expected.shape
164
+ approx_side_as_seq = _recursive_sequence_map(
165
+ self._approx_scalar, self.expected.tolist()
166
+ )
167
+
168
+ # convert other_side to numpy array to ensure shape attribute is available
169
+ other_side_as_array = _as_numpy_array(other_side)
170
+ assert other_side_as_array is not None
171
+
172
+ if np_array_shape != other_side_as_array.shape:
173
+ return [
174
+ "Impossible to compare arrays with different shapes.",
175
+ f"Shapes: {np_array_shape} and {other_side_as_array.shape}",
176
+ ]
177
+
178
+ number_of_elements = self.expected.size
179
+ max_abs_diff = -math.inf
180
+ max_rel_diff = -math.inf
181
+ different_ids = []
182
+ for index in itertools.product(*(range(i) for i in np_array_shape)):
183
+ approx_value = get_value_from_nested_list(approx_side_as_seq, index)
184
+ other_value = get_value_from_nested_list(other_side_as_array, index)
185
+ if approx_value != other_value:
186
+ abs_diff = abs(approx_value.expected - other_value)
187
+ max_abs_diff = max(max_abs_diff, abs_diff)
188
+ if other_value == 0.0:
189
+ max_rel_diff = math.inf
190
+ else:
191
+ max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value))
192
+ different_ids.append(index)
193
+
194
+ message_data = [
195
+ (
196
+ str(index),
197
+ str(get_value_from_nested_list(other_side_as_array, index)),
198
+ str(get_value_from_nested_list(approx_side_as_seq, index)),
199
+ )
200
+ for index in different_ids
201
+ ]
202
+ return _compare_approx(
203
+ self.expected,
204
+ message_data,
205
+ number_of_elements,
206
+ different_ids,
207
+ max_abs_diff,
208
+ max_rel_diff,
209
+ )
210
+
211
+ def __eq__(self, actual) -> bool:
212
+ import numpy as np
213
+
214
+ # self.expected is supposed to always be an array here.
215
+
216
+ if not np.isscalar(actual):
217
+ try:
218
+ actual = np.asarray(actual)
219
+ except Exception as e:
220
+ raise TypeError(f"cannot compare '{actual}' to numpy.ndarray") from e
221
+
222
+ if not np.isscalar(actual) and actual.shape != self.expected.shape:
223
+ return False
224
+
225
+ return super().__eq__(actual)
226
+
227
+ def _yield_comparisons(self, actual):
228
+ import numpy as np
229
+
230
+ # `actual` can either be a numpy array or a scalar, it is treated in
231
+ # `__eq__` before being passed to `ApproxBase.__eq__`, which is the
232
+ # only method that calls this one.
233
+
234
+ if np.isscalar(actual):
235
+ for i in np.ndindex(self.expected.shape):
236
+ yield actual, self.expected[i].item()
237
+ else:
238
+ for i in np.ndindex(self.expected.shape):
239
+ yield actual[i].item(), self.expected[i].item()
240
+
241
+
242
+ class ApproxMapping(ApproxBase):
243
+ """Perform approximate comparisons where the expected value is a mapping
244
+ with numeric values (the keys can be anything)."""
245
+
246
+ def __repr__(self) -> str:
247
+ return f"approx({({k: self._approx_scalar(v) for k, v in self.expected.items()})!r})"
248
+
249
+ def _repr_compare(self, other_side: Mapping[object, float]) -> list[str]:
250
+ import math
251
+
252
+ approx_side_as_map = {
253
+ k: self._approx_scalar(v) for k, v in self.expected.items()
254
+ }
255
+
256
+ number_of_elements = len(approx_side_as_map)
257
+ max_abs_diff = -math.inf
258
+ max_rel_diff = -math.inf
259
+ different_ids = []
260
+ for (approx_key, approx_value), other_value in zip(
261
+ approx_side_as_map.items(), other_side.values()
262
+ ):
263
+ if approx_value != other_value:
264
+ if approx_value.expected is not None and other_value is not None:
265
+ try:
266
+ max_abs_diff = max(
267
+ max_abs_diff, abs(approx_value.expected - other_value)
268
+ )
269
+ if approx_value.expected == 0.0:
270
+ max_rel_diff = math.inf
271
+ else:
272
+ max_rel_diff = max(
273
+ max_rel_diff,
274
+ abs(
275
+ (approx_value.expected - other_value)
276
+ / approx_value.expected
277
+ ),
278
+ )
279
+ except ZeroDivisionError:
280
+ pass
281
+ different_ids.append(approx_key)
282
+
283
+ message_data = [
284
+ (str(key), str(other_side[key]), str(approx_side_as_map[key]))
285
+ for key in different_ids
286
+ ]
287
+
288
+ return _compare_approx(
289
+ self.expected,
290
+ message_data,
291
+ number_of_elements,
292
+ different_ids,
293
+ max_abs_diff,
294
+ max_rel_diff,
295
+ )
296
+
297
+ def __eq__(self, actual) -> bool:
298
+ try:
299
+ if set(actual.keys()) != set(self.expected.keys()):
300
+ return False
301
+ except AttributeError:
302
+ return False
303
+
304
+ return super().__eq__(actual)
305
+
306
+ def _yield_comparisons(self, actual):
307
+ for k in self.expected.keys():
308
+ yield actual[k], self.expected[k]
309
+
310
+ def _check_type(self) -> None:
311
+ __tracebackhide__ = True
312
+ for key, value in self.expected.items():
313
+ if isinstance(value, type(self.expected)):
314
+ msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}"
315
+ raise TypeError(msg.format(key, value, pprint.pformat(self.expected)))
316
+
317
+
318
+ class ApproxSequenceLike(ApproxBase):
319
+ """Perform approximate comparisons where the expected value is a sequence of numbers."""
320
+
321
+ def __repr__(self) -> str:
322
+ seq_type = type(self.expected)
323
+ if seq_type not in (tuple, list):
324
+ seq_type = list
325
+ return f"approx({seq_type(self._approx_scalar(x) for x in self.expected)!r})"
326
+
327
+ def _repr_compare(self, other_side: Sequence[float]) -> list[str]:
328
+ import math
329
+
330
+ if len(self.expected) != len(other_side):
331
+ return [
332
+ "Impossible to compare lists with different sizes.",
333
+ f"Lengths: {len(self.expected)} and {len(other_side)}",
334
+ ]
335
+
336
+ approx_side_as_map = _recursive_sequence_map(self._approx_scalar, self.expected)
337
+
338
+ number_of_elements = len(approx_side_as_map)
339
+ max_abs_diff = -math.inf
340
+ max_rel_diff = -math.inf
341
+ different_ids = []
342
+ for i, (approx_value, other_value) in enumerate(
343
+ zip(approx_side_as_map, other_side)
344
+ ):
345
+ if approx_value != other_value:
346
+ abs_diff = abs(approx_value.expected - other_value)
347
+ max_abs_diff = max(max_abs_diff, abs_diff)
348
+ if other_value == 0.0:
349
+ max_rel_diff = math.inf
350
+ else:
351
+ max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value))
352
+ different_ids.append(i)
353
+
354
+ message_data = [
355
+ (str(i), str(other_side[i]), str(approx_side_as_map[i]))
356
+ for i in different_ids
357
+ ]
358
+
359
+ return _compare_approx(
360
+ self.expected,
361
+ message_data,
362
+ number_of_elements,
363
+ different_ids,
364
+ max_abs_diff,
365
+ max_rel_diff,
366
+ )
367
+
368
+ def __eq__(self, actual) -> bool:
369
+ try:
370
+ if len(actual) != len(self.expected):
371
+ return False
372
+ except TypeError:
373
+ return False
374
+ return super().__eq__(actual)
375
+
376
+ def _yield_comparisons(self, actual):
377
+ return zip(actual, self.expected)
378
+
379
+ def _check_type(self) -> None:
380
+ __tracebackhide__ = True
381
+ for index, x in enumerate(self.expected):
382
+ if isinstance(x, type(self.expected)):
383
+ msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}"
384
+ raise TypeError(msg.format(x, index, pprint.pformat(self.expected)))
385
+
386
+
387
+ class ApproxScalar(ApproxBase):
388
+ """Perform approximate comparisons where the expected value is a single number."""
389
+
390
+ # Using Real should be better than this Union, but not possible yet:
391
+ # https://github.com/python/typeshed/pull/3108
392
+ DEFAULT_ABSOLUTE_TOLERANCE: float | Decimal = 1e-12
393
+ DEFAULT_RELATIVE_TOLERANCE: float | Decimal = 1e-6
394
+
395
+ def __repr__(self) -> str:
396
+ """Return a string communicating both the expected value and the
397
+ tolerance for the comparison being made.
398
+
399
+ For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠ ±180°``.
400
+ """
401
+ # Don't show a tolerance for values that aren't compared using
402
+ # tolerances, i.e. non-numerics and infinities. Need to call abs to
403
+ # handle complex numbers, e.g. (inf + 1j).
404
+ if (
405
+ isinstance(self.expected, bool)
406
+ or (not isinstance(self.expected, (Complex, Decimal)))
407
+ or math.isinf(abs(self.expected) or isinstance(self.expected, bool))
408
+ ):
409
+ return str(self.expected)
410
+
411
+ # If a sensible tolerance can't be calculated, self.tolerance will
412
+ # raise a ValueError. In this case, display '???'.
413
+ try:
414
+ vetted_tolerance = f"{self.tolerance:.1e}"
415
+ if (
416
+ isinstance(self.expected, Complex)
417
+ and self.expected.imag
418
+ and not math.isinf(self.tolerance)
419
+ ):
420
+ vetted_tolerance += " ∠ ±180°"
421
+ except ValueError:
422
+ vetted_tolerance = "???"
423
+
424
+ return f"{self.expected} ± {vetted_tolerance}"
425
+
426
+ def __eq__(self, actual) -> bool:
427
+ """Return whether the given value is equal to the expected value
428
+ within the pre-specified tolerance."""
429
+ asarray = _as_numpy_array(actual)
430
+ if asarray is not None:
431
+ # Call ``__eq__()`` manually to prevent infinite-recursion with
432
+ # numpy<1.13. See #3748.
433
+ return all(self.__eq__(a) for a in asarray.flat)
434
+
435
+ # Short-circuit exact equality, except for bool
436
+ if isinstance(self.expected, bool) and not isinstance(actual, bool):
437
+ return False
438
+ elif actual == self.expected:
439
+ return True
440
+
441
+ # If either type is non-numeric, fall back to strict equality.
442
+ # NB: we need Complex, rather than just Number, to ensure that __abs__,
443
+ # __sub__, and __float__ are defined. Also, consider bool to be
444
+ # nonnumeric, even though it has the required arithmetic.
445
+ if isinstance(self.expected, bool) or not (
446
+ isinstance(self.expected, (Complex, Decimal))
447
+ and isinstance(actual, (Complex, Decimal))
448
+ ):
449
+ return False
450
+
451
+ # Allow the user to control whether NaNs are considered equal to each
452
+ # other or not. The abs() calls are for compatibility with complex
453
+ # numbers.
454
+ if math.isnan(abs(self.expected)):
455
+ return self.nan_ok and math.isnan(abs(actual))
456
+
457
+ # Infinity shouldn't be approximately equal to anything but itself, but
458
+ # if there's a relative tolerance, it will be infinite and infinity
459
+ # will seem approximately equal to everything. The equal-to-itself
460
+ # case would have been short circuited above, so here we can just
461
+ # return false if the expected value is infinite. The abs() call is
462
+ # for compatibility with complex numbers.
463
+ if math.isinf(abs(self.expected)):
464
+ return False
465
+
466
+ # Return true if the two numbers are within the tolerance.
467
+ result: bool = abs(self.expected - actual) <= self.tolerance
468
+ return result
469
+
470
+ # Ignore type because of https://github.com/python/mypy/issues/4266.
471
+ __hash__ = None # type: ignore
472
+
473
+ @property
474
+ def tolerance(self):
475
+ """Return the tolerance for the comparison.
476
+
477
+ This could be either an absolute tolerance or a relative tolerance,
478
+ depending on what the user specified or which would be larger.
479
+ """
480
+
481
+ def set_default(x, default):
482
+ return x if x is not None else default
483
+
484
+ # Figure out what the absolute tolerance should be. ``self.abs`` is
485
+ # either None or a value specified by the user.
486
+ absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE)
487
+
488
+ if absolute_tolerance < 0:
489
+ raise ValueError(
490
+ f"absolute tolerance can't be negative: {absolute_tolerance}"
491
+ )
492
+ if math.isnan(absolute_tolerance):
493
+ raise ValueError("absolute tolerance can't be NaN.")
494
+
495
+ # If the user specified an absolute tolerance but not a relative one,
496
+ # just return the absolute tolerance.
497
+ if self.rel is None:
498
+ if self.abs is not None:
499
+ return absolute_tolerance
500
+
501
+ # Figure out what the relative tolerance should be. ``self.rel`` is
502
+ # either None or a value specified by the user. This is done after
503
+ # we've made sure the user didn't ask for an absolute tolerance only,
504
+ # because we don't want to raise errors about the relative tolerance if
505
+ # we aren't even going to use it.
506
+ relative_tolerance = set_default(
507
+ self.rel, self.DEFAULT_RELATIVE_TOLERANCE
508
+ ) * abs(self.expected)
509
+
510
+ if relative_tolerance < 0:
511
+ raise ValueError(
512
+ f"relative tolerance can't be negative: {relative_tolerance}"
513
+ )
514
+ if math.isnan(relative_tolerance):
515
+ raise ValueError("relative tolerance can't be NaN.")
516
+
517
+ # Return the larger of the relative and absolute tolerances.
518
+ return max(relative_tolerance, absolute_tolerance)
519
+
520
+
521
+ class ApproxDecimal(ApproxScalar):
522
+ """Perform approximate comparisons where the expected value is a Decimal."""
523
+
524
+ DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12")
525
+ DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6")
526
+
527
+
528
+ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase:
529
+ """Assert that two numbers (or two ordered sequences of numbers) are equal to each other
530
+ within some tolerance.
531
+
532
+ Due to the :doc:`python:tutorial/floatingpoint`, numbers that we
533
+ would intuitively expect to be equal are not always so::
534
+
535
+ >>> 0.1 + 0.2 == 0.3
536
+ False
537
+
538
+ This problem is commonly encountered when writing tests, e.g. when making
539
+ sure that floating-point values are what you expect them to be. One way to
540
+ deal with this problem is to assert that two floating-point numbers are
541
+ equal to within some appropriate tolerance::
542
+
543
+ >>> abs((0.1 + 0.2) - 0.3) < 1e-6
544
+ True
545
+
546
+ However, comparisons like this are tedious to write and difficult to
547
+ understand. Furthermore, absolute comparisons like the one above are
548
+ usually discouraged because there's no tolerance that works well for all
549
+ situations. ``1e-6`` is good for numbers around ``1``, but too small for
550
+ very big numbers and too big for very small ones. It's better to express
551
+ the tolerance as a fraction of the expected value, but relative comparisons
552
+ like that are even more difficult to write correctly and concisely.
553
+
554
+ The ``approx`` class performs floating-point comparisons using a syntax
555
+ that's as intuitive as possible::
556
+
557
+ >>> from pytest import approx
558
+ >>> 0.1 + 0.2 == approx(0.3)
559
+ True
560
+
561
+ The same syntax also works for ordered sequences of numbers::
562
+
563
+ >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
564
+ True
565
+
566
+ ``numpy`` arrays::
567
+
568
+ >>> import numpy as np # doctest: +SKIP
569
+ >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP
570
+ True
571
+
572
+ And for a ``numpy`` array against a scalar::
573
+
574
+ >>> import numpy as np # doctest: +SKIP
575
+ >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP
576
+ True
577
+
578
+ Only ordered sequences are supported, because ``approx`` needs
579
+ to infer the relative position of the sequences without ambiguity. This means
580
+ ``sets`` and other unordered sequences are not supported.
581
+
582
+ Finally, dictionary *values* can also be compared::
583
+
584
+ >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})
585
+ True
586
+
587
+ The comparison will be true if both mappings have the same keys and their
588
+ respective values match the expected tolerances.
589
+
590
+ **Tolerances**
591
+
592
+ By default, ``approx`` considers numbers within a relative tolerance of
593
+ ``1e-6`` (i.e. one part in a million) of its expected value to be equal.
594
+ This treatment would lead to surprising results if the expected value was
595
+ ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
596
+ To handle this case less surprisingly, ``approx`` also considers numbers
597
+ within an absolute tolerance of ``1e-12`` of its expected value to be
598
+ equal. Infinity and NaN are special cases. Infinity is only considered
599
+ equal to itself, regardless of the relative tolerance. NaN is not
600
+ considered equal to anything by default, but you can make it be equal to
601
+ itself by setting the ``nan_ok`` argument to True. (This is meant to
602
+ facilitate comparing arrays that use NaN to mean "no data".)
603
+
604
+ Both the relative and absolute tolerances can be changed by passing
605
+ arguments to the ``approx`` constructor::
606
+
607
+ >>> 1.0001 == approx(1)
608
+ False
609
+ >>> 1.0001 == approx(1, rel=1e-3)
610
+ True
611
+ >>> 1.0001 == approx(1, abs=1e-3)
612
+ True
613
+
614
+ If you specify ``abs`` but not ``rel``, the comparison will not consider
615
+ the relative tolerance at all. In other words, two numbers that are within
616
+ the default relative tolerance of ``1e-6`` will still be considered unequal
617
+ if they exceed the specified absolute tolerance. If you specify both
618
+ ``abs`` and ``rel``, the numbers will be considered equal if either
619
+ tolerance is met::
620
+
621
+ >>> 1 + 1e-8 == approx(1)
622
+ True
623
+ >>> 1 + 1e-8 == approx(1, abs=1e-12)
624
+ False
625
+ >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
626
+ True
627
+
628
+ You can also use ``approx`` to compare nonnumeric types, or dicts and
629
+ sequences containing nonnumeric types, in which case it falls back to
630
+ strict equality. This can be useful for comparing dicts and sequences that
631
+ can contain optional values::
632
+
633
+ >>> {"required": 1.0000005, "optional": None} == approx({"required": 1, "optional": None})
634
+ True
635
+ >>> [None, 1.0000005] == approx([None,1])
636
+ True
637
+ >>> ["foo", 1.0000005] == approx([None,1])
638
+ False
639
+
640
+ If you're thinking about using ``approx``, then you might want to know how
641
+ it compares to other good ways of comparing floating-point numbers. All of
642
+ these algorithms are based on relative and absolute tolerances and should
643
+ agree for the most part, but they do have meaningful differences:
644
+
645
+ - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
646
+ tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
647
+ tolerance is met. Because the relative tolerance is calculated w.r.t.
648
+ both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
649
+ ``b`` is a "reference value"). You have to specify an absolute tolerance
650
+ if you want to compare to ``0.0`` because there is no tolerance by
651
+ default. More information: :py:func:`math.isclose`.
652
+
653
+ - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
654
+ between ``a`` and ``b`` is less that the sum of the relative tolerance
655
+ w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
656
+ is only calculated w.r.t. ``b``, this test is asymmetric and you can
657
+ think of ``b`` as the reference value. Support for comparing sequences
658
+ is provided by :py:func:`numpy.allclose`. More information:
659
+ :std:doc:`numpy:reference/generated/numpy.isclose`.
660
+
661
+ - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
662
+ are within an absolute tolerance of ``1e-7``. No relative tolerance is
663
+ considered , so this function is not appropriate for very large or very
664
+ small numbers. Also, it's only available in subclasses of ``unittest.TestCase``
665
+ and it's ugly because it doesn't follow PEP8. More information:
666
+ :py:meth:`unittest.TestCase.assertAlmostEqual`.
667
+
668
+ - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
669
+ tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
670
+ Because the relative tolerance is only calculated w.r.t. ``b``, this test
671
+ is asymmetric and you can think of ``b`` as the reference value. In the
672
+ special case that you explicitly specify an absolute tolerance but not a
673
+ relative tolerance, only the absolute tolerance is considered.
674
+
675
+ .. note::
676
+
677
+ ``approx`` can handle numpy arrays, but we recommend the
678
+ specialised test helpers in :std:doc:`numpy:reference/routines.testing`
679
+ if you need support for comparisons, NaNs, or ULP-based tolerances.
680
+
681
+ To match strings using regex, you can use
682
+ `Matches <https://github.com/asottile/re-assert#re_assertmatchespattern-str-args-kwargs>`_
683
+ from the
684
+ `re_assert package <https://github.com/asottile/re-assert>`_.
685
+
686
+ .. warning::
687
+
688
+ .. versionchanged:: 3.2
689
+
690
+ In order to avoid inconsistent behavior, :py:exc:`TypeError` is
691
+ raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons.
692
+ The example below illustrates the problem::
693
+
694
+ assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10)
695
+ assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10)
696
+
697
+ In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)``
698
+ to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to
699
+ comparison. This is because the call hierarchy of rich comparisons
700
+ follows a fixed behavior. More information: :py:meth:`object.__ge__`
701
+
702
+ .. versionchanged:: 3.7.1
703
+ ``approx`` raises ``TypeError`` when it encounters a dict value or
704
+ sequence element of nonnumeric type.
705
+
706
+ .. versionchanged:: 6.1.0
707
+ ``approx`` falls back to strict equality for nonnumeric types instead
708
+ of raising ``TypeError``.
709
+ """
710
+ # Delegate the comparison to a class that knows how to deal with the type
711
+ # of the expected value (e.g. int, float, list, dict, numpy.array, etc).
712
+ #
713
+ # The primary responsibility of these classes is to implement ``__eq__()``
714
+ # and ``__repr__()``. The former is used to actually check if some
715
+ # "actual" value is equivalent to the given expected value within the
716
+ # allowed tolerance. The latter is used to show the user the expected
717
+ # value and tolerance, in the case that a test failed.
718
+ #
719
+ # The actual logic for making approximate comparisons can be found in
720
+ # ApproxScalar, which is used to compare individual numbers. All of the
721
+ # other Approx classes eventually delegate to this class. The ApproxBase
722
+ # class provides some convenient methods and overloads, but isn't really
723
+ # essential.
724
+
725
+ __tracebackhide__ = True
726
+
727
+ if isinstance(expected, Decimal):
728
+ cls: type[ApproxBase] = ApproxDecimal
729
+ elif isinstance(expected, Mapping):
730
+ cls = ApproxMapping
731
+ elif _is_numpy_array(expected):
732
+ expected = _as_numpy_array(expected)
733
+ cls = ApproxNumpy
734
+ elif _is_sequence_like(expected):
735
+ cls = ApproxSequenceLike
736
+ elif isinstance(expected, Collection) and not isinstance(expected, (str, bytes)):
737
+ msg = f"pytest.approx() only supports ordered sequences, but got: {expected!r}"
738
+ raise TypeError(msg)
739
+ else:
740
+ cls = ApproxScalar
741
+
742
+ return cls(expected, rel, abs, nan_ok)
743
+
744
+
745
+ def _is_sequence_like(expected: object) -> bool:
746
+ return (
747
+ hasattr(expected, "__getitem__")
748
+ and isinstance(expected, Sized)
749
+ and not isinstance(expected, (str, bytes))
750
+ )
751
+
752
+
753
+ def _is_numpy_array(obj: object) -> bool:
754
+ """
755
+ Return true if the given object is implicitly convertible to ndarray,
756
+ and numpy is already imported.
757
+ """
758
+ return _as_numpy_array(obj) is not None
759
+
760
+
761
+ def _as_numpy_array(obj: object) -> ndarray | None:
762
+ """
763
+ Return an ndarray if the given object is implicitly convertible to ndarray,
764
+ and numpy is already imported, otherwise None.
765
+ """
766
+ import sys
767
+
768
+ np: Any = sys.modules.get("numpy")
769
+ if np is not None:
770
+ # avoid infinite recursion on numpy scalars, which have __array__
771
+ if np.isscalar(obj):
772
+ return None
773
+ elif isinstance(obj, np.ndarray):
774
+ return obj
775
+ elif hasattr(obj, "__array__") or hasattr("obj", "__array_interface__"):
776
+ return np.asarray(obj)
777
+ return None
778
+
779
+
780
+ # builtin pytest.raises helper
781
+
782
+ E = TypeVar("E", bound=BaseException)
783
+
784
+
785
+ @overload
786
+ def raises(
787
+ expected_exception: type[E] | tuple[type[E], ...],
788
+ *,
789
+ match: str | Pattern[str] | None = ...,
790
+ ) -> RaisesContext[E]: ...
791
+
792
+
793
+ @overload
794
+ def raises(
795
+ expected_exception: type[E] | tuple[type[E], ...],
796
+ func: Callable[..., Any],
797
+ *args: Any,
798
+ **kwargs: Any,
799
+ ) -> _pytest._code.ExceptionInfo[E]: ...
800
+
801
+
802
+ def raises(
803
+ expected_exception: type[E] | tuple[type[E], ...], *args: Any, **kwargs: Any
804
+ ) -> RaisesContext[E] | _pytest._code.ExceptionInfo[E]:
805
+ r"""Assert that a code block/function call raises an exception type, or one of its subclasses.
806
+
807
+ :param expected_exception:
808
+ The expected exception type, or a tuple if one of multiple possible
809
+ exception types are expected. Note that subclasses of the passed exceptions
810
+ will also match.
811
+
812
+ :kwparam str | re.Pattern[str] | None match:
813
+ If specified, a string containing a regular expression,
814
+ or a regular expression object, that is tested against the string
815
+ representation of the exception and its :pep:`678` `__notes__`
816
+ using :func:`re.search`.
817
+
818
+ To match a literal string that may contain :ref:`special characters
819
+ <re-syntax>`, the pattern can first be escaped with :func:`re.escape`.
820
+
821
+ (This is only used when ``pytest.raises`` is used as a context manager,
822
+ and passed through to the function otherwise.
823
+ When using ``pytest.raises`` as a function, you can use:
824
+ ``pytest.raises(Exc, func, match="passed on").match("my pattern")``.)
825
+
826
+ Use ``pytest.raises`` as a context manager, which will capture the exception of the given
827
+ type, or any of its subclasses::
828
+
829
+ >>> import pytest
830
+ >>> with pytest.raises(ZeroDivisionError):
831
+ ... 1/0
832
+
833
+ If the code block does not raise the expected exception (:class:`ZeroDivisionError` in the example
834
+ above), or no exception at all, the check will fail instead.
835
+
836
+ You can also use the keyword argument ``match`` to assert that the
837
+ exception matches a text or regex::
838
+
839
+ >>> with pytest.raises(ValueError, match='must be 0 or None'):
840
+ ... raise ValueError("value must be 0 or None")
841
+
842
+ >>> with pytest.raises(ValueError, match=r'must be \d+$'):
843
+ ... raise ValueError("value must be 42")
844
+
845
+ The ``match`` argument searches the formatted exception string, which includes any
846
+ `PEP-678 <https://peps.python.org/pep-0678/>`__ ``__notes__``:
847
+
848
+ >>> with pytest.raises(ValueError, match=r"had a note added"): # doctest: +SKIP
849
+ ... e = ValueError("value must be 42")
850
+ ... e.add_note("had a note added")
851
+ ... raise e
852
+
853
+ The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the
854
+ details of the captured exception::
855
+
856
+ >>> with pytest.raises(ValueError) as exc_info:
857
+ ... raise ValueError("value must be 42")
858
+ >>> assert exc_info.type is ValueError
859
+ >>> assert exc_info.value.args[0] == "value must be 42"
860
+
861
+ .. warning::
862
+
863
+ Given that ``pytest.raises`` matches subclasses, be wary of using it to match :class:`Exception` like this::
864
+
865
+ with pytest.raises(Exception): # Careful, this will catch ANY exception raised.
866
+ some_function()
867
+
868
+ Because :class:`Exception` is the base class of almost all exceptions, it is easy for this to hide
869
+ real bugs, where the user wrote this expecting a specific exception, but some other exception is being
870
+ raised due to a bug introduced during a refactoring.
871
+
872
+ Avoid using ``pytest.raises`` to catch :class:`Exception` unless certain that you really want to catch
873
+ **any** exception raised.
874
+
875
+ .. note::
876
+
877
+ When using ``pytest.raises`` as a context manager, it's worthwhile to
878
+ note that normal context manager rules apply and that the exception
879
+ raised *must* be the final line in the scope of the context manager.
880
+ Lines of code after that, within the scope of the context manager will
881
+ not be executed. For example::
882
+
883
+ >>> value = 15
884
+ >>> with pytest.raises(ValueError) as exc_info:
885
+ ... if value > 10:
886
+ ... raise ValueError("value must be <= 10")
887
+ ... assert exc_info.type is ValueError # This will not execute.
888
+
889
+ Instead, the following approach must be taken (note the difference in
890
+ scope)::
891
+
892
+ >>> with pytest.raises(ValueError) as exc_info:
893
+ ... if value > 10:
894
+ ... raise ValueError("value must be <= 10")
895
+ ...
896
+ >>> assert exc_info.type is ValueError
897
+
898
+ **Using with** ``pytest.mark.parametrize``
899
+
900
+ When using :ref:`pytest.mark.parametrize ref`
901
+ it is possible to parametrize tests such that
902
+ some runs raise an exception and others do not.
903
+
904
+ See :ref:`parametrizing_conditional_raising` for an example.
905
+
906
+ .. seealso::
907
+
908
+ :ref:`assertraises` for more examples and detailed discussion.
909
+
910
+ **Legacy form**
911
+
912
+ It is possible to specify a callable by passing a to-be-called lambda::
913
+
914
+ >>> raises(ZeroDivisionError, lambda: 1/0)
915
+ <ExceptionInfo ...>
916
+
917
+ or you can specify an arbitrary callable with arguments::
918
+
919
+ >>> def f(x): return 1/x
920
+ ...
921
+ >>> raises(ZeroDivisionError, f, 0)
922
+ <ExceptionInfo ...>
923
+ >>> raises(ZeroDivisionError, f, x=0)
924
+ <ExceptionInfo ...>
925
+
926
+ The form above is fully supported but discouraged for new code because the
927
+ context manager form is regarded as more readable and less error-prone.
928
+
929
+ .. note::
930
+ Similar to caught exception objects in Python, explicitly clearing
931
+ local references to returned ``ExceptionInfo`` objects can
932
+ help the Python interpreter speed up its garbage collection.
933
+
934
+ Clearing those references breaks a reference cycle
935
+ (``ExceptionInfo`` --> caught exception --> frame stack raising
936
+ the exception --> current frame stack --> local variables -->
937
+ ``ExceptionInfo``) which makes Python keep all objects referenced
938
+ from that cycle (including all local variables in the current
939
+ frame) alive until the next cyclic garbage collection run.
940
+ More detailed information can be found in the official Python
941
+ documentation for :ref:`the try statement <python:try>`.
942
+ """
943
+ __tracebackhide__ = True
944
+
945
+ if not expected_exception:
946
+ raise ValueError(
947
+ f"Expected an exception type or a tuple of exception types, but got `{expected_exception!r}`. "
948
+ f"Raising exceptions is already understood as failing the test, so you don't need "
949
+ f"any special code to say 'this should never raise an exception'."
950
+ )
951
+ if isinstance(expected_exception, type):
952
+ expected_exceptions: tuple[type[E], ...] = (expected_exception,)
953
+ else:
954
+ expected_exceptions = expected_exception
955
+ for exc in expected_exceptions:
956
+ if not isinstance(exc, type) or not issubclass(exc, BaseException):
957
+ msg = "expected exception must be a BaseException type, not {}" # type: ignore[unreachable]
958
+ not_a = exc.__name__ if isinstance(exc, type) else type(exc).__name__
959
+ raise TypeError(msg.format(not_a))
960
+
961
+ message = f"DID NOT RAISE {expected_exception}"
962
+
963
+ if not args:
964
+ match: str | Pattern[str] | None = kwargs.pop("match", None)
965
+ if kwargs:
966
+ msg = "Unexpected keyword arguments passed to pytest.raises: "
967
+ msg += ", ".join(sorted(kwargs))
968
+ msg += "\nUse context-manager form instead?"
969
+ raise TypeError(msg)
970
+ return RaisesContext(expected_exception, message, match)
971
+ else:
972
+ func = args[0]
973
+ if not callable(func):
974
+ raise TypeError(f"{func!r} object (type: {type(func)}) must be callable")
975
+ try:
976
+ func(*args[1:], **kwargs)
977
+ except expected_exception as e:
978
+ return _pytest._code.ExceptionInfo.from_exception(e)
979
+ fail(message)
980
+
981
+
982
+ # This doesn't work with mypy for now. Use fail.Exception instead.
983
+ raises.Exception = fail.Exception # type: ignore
984
+
985
+
986
+ @final
987
+ class RaisesContext(ContextManager[_pytest._code.ExceptionInfo[E]]):
988
+ def __init__(
989
+ self,
990
+ expected_exception: type[E] | tuple[type[E], ...],
991
+ message: str,
992
+ match_expr: str | Pattern[str] | None = None,
993
+ ) -> None:
994
+ self.expected_exception = expected_exception
995
+ self.message = message
996
+ self.match_expr = match_expr
997
+ self.excinfo: _pytest._code.ExceptionInfo[E] | None = None
998
+ if self.match_expr is not None:
999
+ re_error = None
1000
+ try:
1001
+ re.compile(self.match_expr)
1002
+ except re.error as e:
1003
+ re_error = e
1004
+ if re_error is not None:
1005
+ fail(f"Invalid regex pattern provided to 'match': {re_error}")
1006
+
1007
+ def __enter__(self) -> _pytest._code.ExceptionInfo[E]:
1008
+ self.excinfo = _pytest._code.ExceptionInfo.for_later()
1009
+ return self.excinfo
1010
+
1011
+ def __exit__(
1012
+ self,
1013
+ exc_type: type[BaseException] | None,
1014
+ exc_val: BaseException | None,
1015
+ exc_tb: TracebackType | None,
1016
+ ) -> bool:
1017
+ __tracebackhide__ = True
1018
+ if exc_type is None:
1019
+ fail(self.message)
1020
+ assert self.excinfo is not None
1021
+ if not issubclass(exc_type, self.expected_exception):
1022
+ return False
1023
+ # Cast to narrow the exception type now that it's verified.
1024
+ exc_info = cast(Tuple[Type[E], E, TracebackType], (exc_type, exc_val, exc_tb))
1025
+ self.excinfo.fill_unfilled(exc_info)
1026
+ if self.match_expr is not None:
1027
+ self.excinfo.match(self.match_expr)
1028
+ return True
.venv/lib/python3.11/site-packages/_pytest/recwarn.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Record warnings during test function execution."""
3
+
4
+ from __future__ import annotations
5
+
6
+ from pprint import pformat
7
+ import re
8
+ from types import TracebackType
9
+ from typing import Any
10
+ from typing import Callable
11
+ from typing import final
12
+ from typing import Generator
13
+ from typing import Iterator
14
+ from typing import overload
15
+ from typing import Pattern
16
+ from typing import TYPE_CHECKING
17
+ from typing import TypeVar
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from typing_extensions import Self
22
+
23
+ import warnings
24
+
25
+ from _pytest.deprecated import check_ispytest
26
+ from _pytest.fixtures import fixture
27
+ from _pytest.outcomes import Exit
28
+ from _pytest.outcomes import fail
29
+
30
+
31
+ T = TypeVar("T")
32
+
33
+
34
+ @fixture
35
+ def recwarn() -> Generator[WarningsRecorder]:
36
+ """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions.
37
+
38
+ See :ref:`warnings` for information on warning categories.
39
+ """
40
+ wrec = WarningsRecorder(_ispytest=True)
41
+ with wrec:
42
+ warnings.simplefilter("default")
43
+ yield wrec
44
+
45
+
46
+ @overload
47
+ def deprecated_call(*, match: str | Pattern[str] | None = ...) -> WarningsRecorder: ...
48
+
49
+
50
+ @overload
51
+ def deprecated_call(func: Callable[..., T], *args: Any, **kwargs: Any) -> T: ...
52
+
53
+
54
+ def deprecated_call(
55
+ func: Callable[..., Any] | None = None, *args: Any, **kwargs: Any
56
+ ) -> WarningsRecorder | Any:
57
+ """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning`` or ``FutureWarning``.
58
+
59
+ This function can be used as a context manager::
60
+
61
+ >>> import warnings
62
+ >>> def api_call_v2():
63
+ ... warnings.warn('use v3 of this api', DeprecationWarning)
64
+ ... return 200
65
+
66
+ >>> import pytest
67
+ >>> with pytest.deprecated_call():
68
+ ... assert api_call_v2() == 200
69
+
70
+ It can also be used by passing a function and ``*args`` and ``**kwargs``,
71
+ in which case it will ensure calling ``func(*args, **kwargs)`` produces one of
72
+ the warnings types above. The return value is the return value of the function.
73
+
74
+ In the context manager form you may use the keyword argument ``match`` to assert
75
+ that the warning matches a text or regex.
76
+
77
+ The context manager produces a list of :class:`warnings.WarningMessage` objects,
78
+ one for each warning raised.
79
+ """
80
+ __tracebackhide__ = True
81
+ if func is not None:
82
+ args = (func, *args)
83
+ return warns(
84
+ (DeprecationWarning, PendingDeprecationWarning, FutureWarning), *args, **kwargs
85
+ )
86
+
87
+
88
+ @overload
89
+ def warns(
90
+ expected_warning: type[Warning] | tuple[type[Warning], ...] = ...,
91
+ *,
92
+ match: str | Pattern[str] | None = ...,
93
+ ) -> WarningsChecker: ...
94
+
95
+
96
+ @overload
97
+ def warns(
98
+ expected_warning: type[Warning] | tuple[type[Warning], ...],
99
+ func: Callable[..., T],
100
+ *args: Any,
101
+ **kwargs: Any,
102
+ ) -> T: ...
103
+
104
+
105
+ def warns(
106
+ expected_warning: type[Warning] | tuple[type[Warning], ...] = Warning,
107
+ *args: Any,
108
+ match: str | Pattern[str] | None = None,
109
+ **kwargs: Any,
110
+ ) -> WarningsChecker | Any:
111
+ r"""Assert that code raises a particular class of warning.
112
+
113
+ Specifically, the parameter ``expected_warning`` can be a warning class or tuple
114
+ of warning classes, and the code inside the ``with`` block must issue at least one
115
+ warning of that class or classes.
116
+
117
+ This helper produces a list of :class:`warnings.WarningMessage` objects, one for
118
+ each warning emitted (regardless of whether it is an ``expected_warning`` or not).
119
+ Since pytest 8.0, unmatched warnings are also re-emitted when the context closes.
120
+
121
+ This function can be used as a context manager::
122
+
123
+ >>> import pytest
124
+ >>> with pytest.warns(RuntimeWarning):
125
+ ... warnings.warn("my warning", RuntimeWarning)
126
+
127
+ In the context manager form you may use the keyword argument ``match`` to assert
128
+ that the warning matches a text or regex::
129
+
130
+ >>> with pytest.warns(UserWarning, match='must be 0 or None'):
131
+ ... warnings.warn("value must be 0 or None", UserWarning)
132
+
133
+ >>> with pytest.warns(UserWarning, match=r'must be \d+$'):
134
+ ... warnings.warn("value must be 42", UserWarning)
135
+
136
+ >>> with pytest.warns(UserWarning): # catch re-emitted warning
137
+ ... with pytest.warns(UserWarning, match=r'must be \d+$'):
138
+ ... warnings.warn("this is not here", UserWarning)
139
+ Traceback (most recent call last):
140
+ ...
141
+ Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted...
142
+
143
+ **Using with** ``pytest.mark.parametrize``
144
+
145
+ When using :ref:`pytest.mark.parametrize ref` it is possible to parametrize tests
146
+ such that some runs raise a warning and others do not.
147
+
148
+ This could be achieved in the same way as with exceptions, see
149
+ :ref:`parametrizing_conditional_raising` for an example.
150
+
151
+ """
152
+ __tracebackhide__ = True
153
+ if not args:
154
+ if kwargs:
155
+ argnames = ", ".join(sorted(kwargs))
156
+ raise TypeError(
157
+ f"Unexpected keyword arguments passed to pytest.warns: {argnames}"
158
+ "\nUse context-manager form instead?"
159
+ )
160
+ return WarningsChecker(expected_warning, match_expr=match, _ispytest=True)
161
+ else:
162
+ func = args[0]
163
+ if not callable(func):
164
+ raise TypeError(f"{func!r} object (type: {type(func)}) must be callable")
165
+ with WarningsChecker(expected_warning, _ispytest=True):
166
+ return func(*args[1:], **kwargs)
167
+
168
+
169
+ class WarningsRecorder(warnings.catch_warnings): # type:ignore[type-arg]
170
+ """A context manager to record raised warnings.
171
+
172
+ Each recorded warning is an instance of :class:`warnings.WarningMessage`.
173
+
174
+ Adapted from `warnings.catch_warnings`.
175
+
176
+ .. note::
177
+ ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated
178
+ differently; see :ref:`ensuring_function_triggers`.
179
+
180
+ """
181
+
182
+ def __init__(self, *, _ispytest: bool = False) -> None:
183
+ check_ispytest(_ispytest)
184
+ super().__init__(record=True)
185
+ self._entered = False
186
+ self._list: list[warnings.WarningMessage] = []
187
+
188
+ @property
189
+ def list(self) -> list[warnings.WarningMessage]:
190
+ """The list of recorded warnings."""
191
+ return self._list
192
+
193
+ def __getitem__(self, i: int) -> warnings.WarningMessage:
194
+ """Get a recorded warning by index."""
195
+ return self._list[i]
196
+
197
+ def __iter__(self) -> Iterator[warnings.WarningMessage]:
198
+ """Iterate through the recorded warnings."""
199
+ return iter(self._list)
200
+
201
+ def __len__(self) -> int:
202
+ """The number of recorded warnings."""
203
+ return len(self._list)
204
+
205
+ def pop(self, cls: type[Warning] = Warning) -> warnings.WarningMessage:
206
+ """Pop the first recorded warning which is an instance of ``cls``,
207
+ but not an instance of a child class of any other match.
208
+ Raises ``AssertionError`` if there is no match.
209
+ """
210
+ best_idx: int | None = None
211
+ for i, w in enumerate(self._list):
212
+ if w.category == cls:
213
+ return self._list.pop(i) # exact match, stop looking
214
+ if issubclass(w.category, cls) and (
215
+ best_idx is None
216
+ or not issubclass(w.category, self._list[best_idx].category)
217
+ ):
218
+ best_idx = i
219
+ if best_idx is not None:
220
+ return self._list.pop(best_idx)
221
+ __tracebackhide__ = True
222
+ raise AssertionError(f"{cls!r} not found in warning list")
223
+
224
+ def clear(self) -> None:
225
+ """Clear the list of recorded warnings."""
226
+ self._list[:] = []
227
+
228
+ def __enter__(self) -> Self:
229
+ if self._entered:
230
+ __tracebackhide__ = True
231
+ raise RuntimeError(f"Cannot enter {self!r} twice")
232
+ _list = super().__enter__()
233
+ # record=True means it's None.
234
+ assert _list is not None
235
+ self._list = _list
236
+ warnings.simplefilter("always")
237
+ return self
238
+
239
+ def __exit__(
240
+ self,
241
+ exc_type: type[BaseException] | None,
242
+ exc_val: BaseException | None,
243
+ exc_tb: TracebackType | None,
244
+ ) -> None:
245
+ if not self._entered:
246
+ __tracebackhide__ = True
247
+ raise RuntimeError(f"Cannot exit {self!r} without entering first")
248
+
249
+ super().__exit__(exc_type, exc_val, exc_tb)
250
+
251
+ # Built-in catch_warnings does not reset entered state so we do it
252
+ # manually here for this context manager to become reusable.
253
+ self._entered = False
254
+
255
+
256
+ @final
257
+ class WarningsChecker(WarningsRecorder):
258
+ def __init__(
259
+ self,
260
+ expected_warning: type[Warning] | tuple[type[Warning], ...] = Warning,
261
+ match_expr: str | Pattern[str] | None = None,
262
+ *,
263
+ _ispytest: bool = False,
264
+ ) -> None:
265
+ check_ispytest(_ispytest)
266
+ super().__init__(_ispytest=True)
267
+
268
+ msg = "exceptions must be derived from Warning, not %s"
269
+ if isinstance(expected_warning, tuple):
270
+ for exc in expected_warning:
271
+ if not issubclass(exc, Warning):
272
+ raise TypeError(msg % type(exc))
273
+ expected_warning_tup = expected_warning
274
+ elif isinstance(expected_warning, type) and issubclass(
275
+ expected_warning, Warning
276
+ ):
277
+ expected_warning_tup = (expected_warning,)
278
+ else:
279
+ raise TypeError(msg % type(expected_warning))
280
+
281
+ self.expected_warning = expected_warning_tup
282
+ self.match_expr = match_expr
283
+
284
+ def matches(self, warning: warnings.WarningMessage) -> bool:
285
+ assert self.expected_warning is not None
286
+ return issubclass(warning.category, self.expected_warning) and bool(
287
+ self.match_expr is None or re.search(self.match_expr, str(warning.message))
288
+ )
289
+
290
+ def __exit__(
291
+ self,
292
+ exc_type: type[BaseException] | None,
293
+ exc_val: BaseException | None,
294
+ exc_tb: TracebackType | None,
295
+ ) -> None:
296
+ super().__exit__(exc_type, exc_val, exc_tb)
297
+
298
+ __tracebackhide__ = True
299
+
300
+ # BaseExceptions like pytest.{skip,fail,xfail,exit} or Ctrl-C within
301
+ # pytest.warns should *not* trigger "DID NOT WARN" and get suppressed
302
+ # when the warning doesn't happen. Control-flow exceptions should always
303
+ # propagate.
304
+ if exc_val is not None and (
305
+ not isinstance(exc_val, Exception)
306
+ # Exit is an Exception, not a BaseException, for some reason.
307
+ or isinstance(exc_val, Exit)
308
+ ):
309
+ return
310
+
311
+ def found_str() -> str:
312
+ return pformat([record.message for record in self], indent=2)
313
+
314
+ try:
315
+ if not any(issubclass(w.category, self.expected_warning) for w in self):
316
+ fail(
317
+ f"DID NOT WARN. No warnings of type {self.expected_warning} were emitted.\n"
318
+ f" Emitted warnings: {found_str()}."
319
+ )
320
+ elif not any(self.matches(w) for w in self):
321
+ fail(
322
+ f"DID NOT WARN. No warnings of type {self.expected_warning} matching the regex were emitted.\n"
323
+ f" Regex: {self.match_expr}\n"
324
+ f" Emitted warnings: {found_str()}."
325
+ )
326
+ finally:
327
+ # Whether or not any warnings matched, we want to re-emit all unmatched warnings.
328
+ for w in self:
329
+ if not self.matches(w):
330
+ warnings.warn_explicit(
331
+ message=w.message,
332
+ category=w.category,
333
+ filename=w.filename,
334
+ lineno=w.lineno,
335
+ module=w.__module__,
336
+ source=w.source,
337
+ )
338
+
339
+ # Currently in Python it is possible to pass other types than an
340
+ # `str` message when creating `Warning` instances, however this
341
+ # causes an exception when :func:`warnings.filterwarnings` is used
342
+ # to filter those warnings. See
343
+ # https://github.com/python/cpython/issues/103577 for a discussion.
344
+ # While this can be considered a bug in CPython, we put guards in
345
+ # pytest as the error message produced without this check in place
346
+ # is confusing (#10865).
347
+ for w in self:
348
+ if type(w.message) is not UserWarning:
349
+ # If the warning was of an incorrect type then `warnings.warn()`
350
+ # creates a UserWarning. Any other warning must have been specified
351
+ # explicitly.
352
+ continue
353
+ if not w.message.args:
354
+ # UserWarning() without arguments must have been specified explicitly.
355
+ continue
356
+ msg = w.message.args[0]
357
+ if isinstance(msg, str):
358
+ continue
359
+ # It's possible that UserWarning was explicitly specified, and
360
+ # its first argument was not a string. But that case can't be
361
+ # distinguished from an invalid type.
362
+ raise TypeError(
363
+ f"Warning must be str or Warning, got {msg!r} (type {type(msg).__name__})"
364
+ )
.venv/lib/python3.11/site-packages/_pytest/tmpdir.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Support for providing temporary directories to test functions."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ import os
8
+ from pathlib import Path
9
+ import re
10
+ from shutil import rmtree
11
+ import tempfile
12
+ from typing import Any
13
+ from typing import Dict
14
+ from typing import final
15
+ from typing import Generator
16
+ from typing import Literal
17
+
18
+ from .pathlib import cleanup_dead_symlinks
19
+ from .pathlib import LOCK_TIMEOUT
20
+ from .pathlib import make_numbered_dir
21
+ from .pathlib import make_numbered_dir_with_cleanup
22
+ from .pathlib import rm_rf
23
+ from _pytest.compat import get_user_id
24
+ from _pytest.config import Config
25
+ from _pytest.config import ExitCode
26
+ from _pytest.config import hookimpl
27
+ from _pytest.config.argparsing import Parser
28
+ from _pytest.deprecated import check_ispytest
29
+ from _pytest.fixtures import fixture
30
+ from _pytest.fixtures import FixtureRequest
31
+ from _pytest.monkeypatch import MonkeyPatch
32
+ from _pytest.nodes import Item
33
+ from _pytest.reports import TestReport
34
+ from _pytest.stash import StashKey
35
+
36
+
37
+ tmppath_result_key = StashKey[Dict[str, bool]]()
38
+ RetentionType = Literal["all", "failed", "none"]
39
+
40
+
41
+ @final
42
+ @dataclasses.dataclass
43
+ class TempPathFactory:
44
+ """Factory for temporary directories under the common base temp directory,
45
+ as discussed at :ref:`temporary directory location and retention`.
46
+ """
47
+
48
+ _given_basetemp: Path | None
49
+ # pluggy TagTracerSub, not currently exposed, so Any.
50
+ _trace: Any
51
+ _basetemp: Path | None
52
+ _retention_count: int
53
+ _retention_policy: RetentionType
54
+
55
+ def __init__(
56
+ self,
57
+ given_basetemp: Path | None,
58
+ retention_count: int,
59
+ retention_policy: RetentionType,
60
+ trace,
61
+ basetemp: Path | None = None,
62
+ *,
63
+ _ispytest: bool = False,
64
+ ) -> None:
65
+ check_ispytest(_ispytest)
66
+ if given_basetemp is None:
67
+ self._given_basetemp = None
68
+ else:
69
+ # Use os.path.abspath() to get absolute path instead of resolve() as it
70
+ # does not work the same in all platforms (see #4427).
71
+ # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012).
72
+ self._given_basetemp = Path(os.path.abspath(str(given_basetemp)))
73
+ self._trace = trace
74
+ self._retention_count = retention_count
75
+ self._retention_policy = retention_policy
76
+ self._basetemp = basetemp
77
+
78
+ @classmethod
79
+ def from_config(
80
+ cls,
81
+ config: Config,
82
+ *,
83
+ _ispytest: bool = False,
84
+ ) -> TempPathFactory:
85
+ """Create a factory according to pytest configuration.
86
+
87
+ :meta private:
88
+ """
89
+ check_ispytest(_ispytest)
90
+ count = int(config.getini("tmp_path_retention_count"))
91
+ if count < 0:
92
+ raise ValueError(
93
+ f"tmp_path_retention_count must be >= 0. Current input: {count}."
94
+ )
95
+
96
+ policy = config.getini("tmp_path_retention_policy")
97
+ if policy not in ("all", "failed", "none"):
98
+ raise ValueError(
99
+ f"tmp_path_retention_policy must be either all, failed, none. Current input: {policy}."
100
+ )
101
+
102
+ return cls(
103
+ given_basetemp=config.option.basetemp,
104
+ trace=config.trace.get("tmpdir"),
105
+ retention_count=count,
106
+ retention_policy=policy,
107
+ _ispytest=True,
108
+ )
109
+
110
+ def _ensure_relative_to_basetemp(self, basename: str) -> str:
111
+ basename = os.path.normpath(basename)
112
+ if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp():
113
+ raise ValueError(f"{basename} is not a normalized and relative path")
114
+ return basename
115
+
116
+ def mktemp(self, basename: str, numbered: bool = True) -> Path:
117
+ """Create a new temporary directory managed by the factory.
118
+
119
+ :param basename:
120
+ Directory base name, must be a relative path.
121
+
122
+ :param numbered:
123
+ If ``True``, ensure the directory is unique by adding a numbered
124
+ suffix greater than any existing one: ``basename="foo-"`` and ``numbered=True``
125
+ means that this function will create directories named ``"foo-0"``,
126
+ ``"foo-1"``, ``"foo-2"`` and so on.
127
+
128
+ :returns:
129
+ The path to the new directory.
130
+ """
131
+ basename = self._ensure_relative_to_basetemp(basename)
132
+ if not numbered:
133
+ p = self.getbasetemp().joinpath(basename)
134
+ p.mkdir(mode=0o700)
135
+ else:
136
+ p = make_numbered_dir(root=self.getbasetemp(), prefix=basename, mode=0o700)
137
+ self._trace("mktemp", p)
138
+ return p
139
+
140
+ def getbasetemp(self) -> Path:
141
+ """Return the base temporary directory, creating it if needed.
142
+
143
+ :returns:
144
+ The base temporary directory.
145
+ """
146
+ if self._basetemp is not None:
147
+ return self._basetemp
148
+
149
+ if self._given_basetemp is not None:
150
+ basetemp = self._given_basetemp
151
+ if basetemp.exists():
152
+ rm_rf(basetemp)
153
+ basetemp.mkdir(mode=0o700)
154
+ basetemp = basetemp.resolve()
155
+ else:
156
+ from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT")
157
+ temproot = Path(from_env or tempfile.gettempdir()).resolve()
158
+ user = get_user() or "unknown"
159
+ # use a sub-directory in the temproot to speed-up
160
+ # make_numbered_dir() call
161
+ rootdir = temproot.joinpath(f"pytest-of-{user}")
162
+ try:
163
+ rootdir.mkdir(mode=0o700, exist_ok=True)
164
+ except OSError:
165
+ # getuser() likely returned illegal characters for the platform, use unknown back off mechanism
166
+ rootdir = temproot.joinpath("pytest-of-unknown")
167
+ rootdir.mkdir(mode=0o700, exist_ok=True)
168
+ # Because we use exist_ok=True with a predictable name, make sure
169
+ # we are the owners, to prevent any funny business (on unix, where
170
+ # temproot is usually shared).
171
+ # Also, to keep things private, fixup any world-readable temp
172
+ # rootdir's permissions. Historically 0o755 was used, so we can't
173
+ # just error out on this, at least for a while.
174
+ uid = get_user_id()
175
+ if uid is not None:
176
+ rootdir_stat = rootdir.stat()
177
+ if rootdir_stat.st_uid != uid:
178
+ raise OSError(
179
+ f"The temporary directory {rootdir} is not owned by the current user. "
180
+ "Fix this and try again."
181
+ )
182
+ if (rootdir_stat.st_mode & 0o077) != 0:
183
+ os.chmod(rootdir, rootdir_stat.st_mode & ~0o077)
184
+ keep = self._retention_count
185
+ if self._retention_policy == "none":
186
+ keep = 0
187
+ basetemp = make_numbered_dir_with_cleanup(
188
+ prefix="pytest-",
189
+ root=rootdir,
190
+ keep=keep,
191
+ lock_timeout=LOCK_TIMEOUT,
192
+ mode=0o700,
193
+ )
194
+ assert basetemp is not None, basetemp
195
+ self._basetemp = basetemp
196
+ self._trace("new basetemp", basetemp)
197
+ return basetemp
198
+
199
+
200
+ def get_user() -> str | None:
201
+ """Return the current user name, or None if getuser() does not work
202
+ in the current environment (see #1010)."""
203
+ try:
204
+ # In some exotic environments, getpass may not be importable.
205
+ import getpass
206
+
207
+ return getpass.getuser()
208
+ except (ImportError, OSError, KeyError):
209
+ return None
210
+
211
+
212
+ def pytest_configure(config: Config) -> None:
213
+ """Create a TempPathFactory and attach it to the config object.
214
+
215
+ This is to comply with existing plugins which expect the handler to be
216
+ available at pytest_configure time, but ideally should be moved entirely
217
+ to the tmp_path_factory session fixture.
218
+ """
219
+ mp = MonkeyPatch()
220
+ config.add_cleanup(mp.undo)
221
+ _tmp_path_factory = TempPathFactory.from_config(config, _ispytest=True)
222
+ mp.setattr(config, "_tmp_path_factory", _tmp_path_factory, raising=False)
223
+
224
+
225
+ def pytest_addoption(parser: Parser) -> None:
226
+ parser.addini(
227
+ "tmp_path_retention_count",
228
+ help="How many sessions should we keep the `tmp_path` directories, according to `tmp_path_retention_policy`.",
229
+ default=3,
230
+ )
231
+
232
+ parser.addini(
233
+ "tmp_path_retention_policy",
234
+ help="Controls which directories created by the `tmp_path` fixture are kept around, based on test outcome. "
235
+ "(all/failed/none)",
236
+ default="all",
237
+ )
238
+
239
+
240
+ @fixture(scope="session")
241
+ def tmp_path_factory(request: FixtureRequest) -> TempPathFactory:
242
+ """Return a :class:`pytest.TempPathFactory` instance for the test session."""
243
+ # Set dynamically by pytest_configure() above.
244
+ return request.config._tmp_path_factory # type: ignore
245
+
246
+
247
+ def _mk_tmp(request: FixtureRequest, factory: TempPathFactory) -> Path:
248
+ name = request.node.name
249
+ name = re.sub(r"[\W]", "_", name)
250
+ MAXVAL = 30
251
+ name = name[:MAXVAL]
252
+ return factory.mktemp(name, numbered=True)
253
+
254
+
255
+ @fixture
256
+ def tmp_path(
257
+ request: FixtureRequest, tmp_path_factory: TempPathFactory
258
+ ) -> Generator[Path]:
259
+ """Return a temporary directory (as :class:`pathlib.Path` object)
260
+ which is unique to each test function invocation.
261
+ The temporary directory is created as a subdirectory
262
+ of the base temporary directory, with configurable retention,
263
+ as discussed in :ref:`temporary directory location and retention`.
264
+ """
265
+ path = _mk_tmp(request, tmp_path_factory)
266
+ yield path
267
+
268
+ # Remove the tmpdir if the policy is "failed" and the test passed.
269
+ tmp_path_factory: TempPathFactory = request.session.config._tmp_path_factory # type: ignore
270
+ policy = tmp_path_factory._retention_policy
271
+ result_dict = request.node.stash[tmppath_result_key]
272
+
273
+ if policy == "failed" and result_dict.get("call", True):
274
+ # We do a "best effort" to remove files, but it might not be possible due to some leaked resource,
275
+ # permissions, etc, in which case we ignore it.
276
+ rmtree(path, ignore_errors=True)
277
+
278
+ del request.node.stash[tmppath_result_key]
279
+
280
+
281
+ def pytest_sessionfinish(session, exitstatus: int | ExitCode):
282
+ """After each session, remove base directory if all the tests passed,
283
+ the policy is "failed", and the basetemp is not specified by a user.
284
+ """
285
+ tmp_path_factory: TempPathFactory = session.config._tmp_path_factory
286
+ basetemp = tmp_path_factory._basetemp
287
+ if basetemp is None:
288
+ return
289
+
290
+ policy = tmp_path_factory._retention_policy
291
+ if (
292
+ exitstatus == 0
293
+ and policy == "failed"
294
+ and tmp_path_factory._given_basetemp is None
295
+ ):
296
+ if basetemp.is_dir():
297
+ # We do a "best effort" to remove files, but it might not be possible due to some leaked resource,
298
+ # permissions, etc, in which case we ignore it.
299
+ rmtree(basetemp, ignore_errors=True)
300
+
301
+ # Remove dead symlinks.
302
+ if basetemp.is_dir():
303
+ cleanup_dead_symlinks(basetemp)
304
+
305
+
306
+ @hookimpl(wrapper=True, tryfirst=True)
307
+ def pytest_runtest_makereport(
308
+ item: Item, call
309
+ ) -> Generator[None, TestReport, TestReport]:
310
+ rep = yield
311
+ assert rep.when is not None
312
+ empty: dict[str, bool] = {}
313
+ item.stash.setdefault(tmppath_result_key, empty)[rep.when] = rep.passed
314
+ return rep
.venv/lib/python3.11/site-packages/_pytest/unittest.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Discover and run std-library "unittest" style tests."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import inspect
7
+ import sys
8
+ import traceback
9
+ import types
10
+ from typing import Any
11
+ from typing import Callable
12
+ from typing import Generator
13
+ from typing import Iterable
14
+ from typing import Tuple
15
+ from typing import Type
16
+ from typing import TYPE_CHECKING
17
+ from typing import Union
18
+
19
+ import _pytest._code
20
+ from _pytest.compat import is_async_function
21
+ from _pytest.config import hookimpl
22
+ from _pytest.fixtures import FixtureRequest
23
+ from _pytest.nodes import Collector
24
+ from _pytest.nodes import Item
25
+ from _pytest.outcomes import exit
26
+ from _pytest.outcomes import fail
27
+ from _pytest.outcomes import skip
28
+ from _pytest.outcomes import xfail
29
+ from _pytest.python import Class
30
+ from _pytest.python import Function
31
+ from _pytest.python import Module
32
+ from _pytest.runner import CallInfo
33
+ import pytest
34
+
35
+
36
+ if sys.version_info[:2] < (3, 11):
37
+ from exceptiongroup import ExceptionGroup
38
+
39
+ if TYPE_CHECKING:
40
+ import unittest
41
+
42
+ import twisted.trial.unittest
43
+
44
+
45
+ _SysExcInfoType = Union[
46
+ Tuple[Type[BaseException], BaseException, types.TracebackType],
47
+ Tuple[None, None, None],
48
+ ]
49
+
50
+
51
+ def pytest_pycollect_makeitem(
52
+ collector: Module | Class, name: str, obj: object
53
+ ) -> UnitTestCase | None:
54
+ try:
55
+ # Has unittest been imported?
56
+ ut = sys.modules["unittest"]
57
+ # Is obj a subclass of unittest.TestCase?
58
+ # Type ignored because `ut` is an opaque module.
59
+ if not issubclass(obj, ut.TestCase): # type: ignore
60
+ return None
61
+ except Exception:
62
+ return None
63
+ # Is obj a concrete class?
64
+ # Abstract classes can't be instantiated so no point collecting them.
65
+ if inspect.isabstract(obj):
66
+ return None
67
+ # Yes, so let's collect it.
68
+ return UnitTestCase.from_parent(collector, name=name, obj=obj)
69
+
70
+
71
+ class UnitTestCase(Class):
72
+ # Marker for fixturemanger.getfixtureinfo()
73
+ # to declare that our children do not support funcargs.
74
+ nofuncargs = True
75
+
76
+ def newinstance(self):
77
+ # TestCase __init__ takes the method (test) name. The TestCase
78
+ # constructor treats the name "runTest" as a special no-op, so it can be
79
+ # used when a dummy instance is needed. While unittest.TestCase has a
80
+ # default, some subclasses omit the default (#9610), so always supply
81
+ # it.
82
+ return self.obj("runTest")
83
+
84
+ def collect(self) -> Iterable[Item | Collector]:
85
+ from unittest import TestLoader
86
+
87
+ cls = self.obj
88
+ if not getattr(cls, "__test__", True):
89
+ return
90
+
91
+ skipped = _is_skipped(cls)
92
+ if not skipped:
93
+ self._register_unittest_setup_method_fixture(cls)
94
+ self._register_unittest_setup_class_fixture(cls)
95
+ self._register_setup_class_fixture()
96
+
97
+ self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid)
98
+
99
+ loader = TestLoader()
100
+ foundsomething = False
101
+ for name in loader.getTestCaseNames(self.obj):
102
+ x = getattr(self.obj, name)
103
+ if not getattr(x, "__test__", True):
104
+ continue
105
+ yield TestCaseFunction.from_parent(self, name=name)
106
+ foundsomething = True
107
+
108
+ if not foundsomething:
109
+ runtest = getattr(self.obj, "runTest", None)
110
+ if runtest is not None:
111
+ ut = sys.modules.get("twisted.trial.unittest", None)
112
+ if ut is None or runtest != ut.TestCase.runTest:
113
+ yield TestCaseFunction.from_parent(self, name="runTest")
114
+
115
+ def _register_unittest_setup_class_fixture(self, cls: type) -> None:
116
+ """Register an auto-use fixture to invoke setUpClass and
117
+ tearDownClass (#517)."""
118
+ setup = getattr(cls, "setUpClass", None)
119
+ teardown = getattr(cls, "tearDownClass", None)
120
+ if setup is None and teardown is None:
121
+ return None
122
+ cleanup = getattr(cls, "doClassCleanups", lambda: None)
123
+
124
+ def process_teardown_exceptions() -> None:
125
+ # tearDown_exceptions is a list set in the class containing exc_infos for errors during
126
+ # teardown for the class.
127
+ exc_infos = getattr(cls, "tearDown_exceptions", None)
128
+ if not exc_infos:
129
+ return
130
+ exceptions = [exc for (_, exc, _) in exc_infos]
131
+ # If a single exception, raise it directly as this provides a more readable
132
+ # error (hopefully this will improve in #12255).
133
+ if len(exceptions) == 1:
134
+ raise exceptions[0]
135
+ else:
136
+ raise ExceptionGroup("Unittest class cleanup errors", exceptions)
137
+
138
+ def unittest_setup_class_fixture(
139
+ request: FixtureRequest,
140
+ ) -> Generator[None]:
141
+ cls = request.cls
142
+ if _is_skipped(cls):
143
+ reason = cls.__unittest_skip_why__
144
+ raise pytest.skip.Exception(reason, _use_item_location=True)
145
+ if setup is not None:
146
+ try:
147
+ setup()
148
+ # unittest does not call the cleanup function for every BaseException, so we
149
+ # follow this here.
150
+ except Exception:
151
+ cleanup()
152
+ process_teardown_exceptions()
153
+ raise
154
+ yield
155
+ try:
156
+ if teardown is not None:
157
+ teardown()
158
+ finally:
159
+ cleanup()
160
+ process_teardown_exceptions()
161
+
162
+ self.session._fixturemanager._register_fixture(
163
+ # Use a unique name to speed up lookup.
164
+ name=f"_unittest_setUpClass_fixture_{cls.__qualname__}",
165
+ func=unittest_setup_class_fixture,
166
+ nodeid=self.nodeid,
167
+ scope="class",
168
+ autouse=True,
169
+ )
170
+
171
+ def _register_unittest_setup_method_fixture(self, cls: type) -> None:
172
+ """Register an auto-use fixture to invoke setup_method and
173
+ teardown_method (#517)."""
174
+ setup = getattr(cls, "setup_method", None)
175
+ teardown = getattr(cls, "teardown_method", None)
176
+ if setup is None and teardown is None:
177
+ return None
178
+
179
+ def unittest_setup_method_fixture(
180
+ request: FixtureRequest,
181
+ ) -> Generator[None]:
182
+ self = request.instance
183
+ if _is_skipped(self):
184
+ reason = self.__unittest_skip_why__
185
+ raise pytest.skip.Exception(reason, _use_item_location=True)
186
+ if setup is not None:
187
+ setup(self, request.function)
188
+ yield
189
+ if teardown is not None:
190
+ teardown(self, request.function)
191
+
192
+ self.session._fixturemanager._register_fixture(
193
+ # Use a unique name to speed up lookup.
194
+ name=f"_unittest_setup_method_fixture_{cls.__qualname__}",
195
+ func=unittest_setup_method_fixture,
196
+ nodeid=self.nodeid,
197
+ scope="function",
198
+ autouse=True,
199
+ )
200
+
201
+
202
+ class TestCaseFunction(Function):
203
+ nofuncargs = True
204
+ _excinfo: list[_pytest._code.ExceptionInfo[BaseException]] | None = None
205
+
206
+ def _getinstance(self):
207
+ assert isinstance(self.parent, UnitTestCase)
208
+ return self.parent.obj(self.name)
209
+
210
+ # Backward compat for pytest-django; can be removed after pytest-django
211
+ # updates + some slack.
212
+ @property
213
+ def _testcase(self):
214
+ return self.instance
215
+
216
+ def setup(self) -> None:
217
+ # A bound method to be called during teardown() if set (see 'runtest()').
218
+ self._explicit_tearDown: Callable[[], None] | None = None
219
+ super().setup()
220
+
221
+ def teardown(self) -> None:
222
+ if self._explicit_tearDown is not None:
223
+ self._explicit_tearDown()
224
+ self._explicit_tearDown = None
225
+ self._obj = None
226
+ del self._instance
227
+ super().teardown()
228
+
229
+ def startTest(self, testcase: unittest.TestCase) -> None:
230
+ pass
231
+
232
+ def _addexcinfo(self, rawexcinfo: _SysExcInfoType) -> None:
233
+ # Unwrap potential exception info (see twisted trial support below).
234
+ rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo)
235
+ try:
236
+ excinfo = _pytest._code.ExceptionInfo[BaseException].from_exc_info(
237
+ rawexcinfo # type: ignore[arg-type]
238
+ )
239
+ # Invoke the attributes to trigger storing the traceback
240
+ # trial causes some issue there.
241
+ _ = excinfo.value
242
+ _ = excinfo.traceback
243
+ except TypeError:
244
+ try:
245
+ try:
246
+ values = traceback.format_exception(*rawexcinfo)
247
+ values.insert(
248
+ 0,
249
+ "NOTE: Incompatible Exception Representation, "
250
+ "displaying natively:\n\n",
251
+ )
252
+ fail("".join(values), pytrace=False)
253
+ except (fail.Exception, KeyboardInterrupt):
254
+ raise
255
+ except BaseException:
256
+ fail(
257
+ "ERROR: Unknown Incompatible Exception "
258
+ f"representation:\n{rawexcinfo!r}",
259
+ pytrace=False,
260
+ )
261
+ except KeyboardInterrupt:
262
+ raise
263
+ except fail.Exception:
264
+ excinfo = _pytest._code.ExceptionInfo.from_current()
265
+ self.__dict__.setdefault("_excinfo", []).append(excinfo)
266
+
267
+ def addError(
268
+ self, testcase: unittest.TestCase, rawexcinfo: _SysExcInfoType
269
+ ) -> None:
270
+ try:
271
+ if isinstance(rawexcinfo[1], exit.Exception):
272
+ exit(rawexcinfo[1].msg)
273
+ except TypeError:
274
+ pass
275
+ self._addexcinfo(rawexcinfo)
276
+
277
+ def addFailure(
278
+ self, testcase: unittest.TestCase, rawexcinfo: _SysExcInfoType
279
+ ) -> None:
280
+ self._addexcinfo(rawexcinfo)
281
+
282
+ def addSkip(self, testcase: unittest.TestCase, reason: str) -> None:
283
+ try:
284
+ raise pytest.skip.Exception(reason, _use_item_location=True)
285
+ except skip.Exception:
286
+ self._addexcinfo(sys.exc_info())
287
+
288
+ def addExpectedFailure(
289
+ self,
290
+ testcase: unittest.TestCase,
291
+ rawexcinfo: _SysExcInfoType,
292
+ reason: str = "",
293
+ ) -> None:
294
+ try:
295
+ xfail(str(reason))
296
+ except xfail.Exception:
297
+ self._addexcinfo(sys.exc_info())
298
+
299
+ def addUnexpectedSuccess(
300
+ self,
301
+ testcase: unittest.TestCase,
302
+ reason: twisted.trial.unittest.Todo | None = None,
303
+ ) -> None:
304
+ msg = "Unexpected success"
305
+ if reason:
306
+ msg += f": {reason.reason}"
307
+ # Preserve unittest behaviour - fail the test. Explicitly not an XPASS.
308
+ try:
309
+ fail(msg, pytrace=False)
310
+ except fail.Exception:
311
+ self._addexcinfo(sys.exc_info())
312
+
313
+ def addSuccess(self, testcase: unittest.TestCase) -> None:
314
+ pass
315
+
316
+ def stopTest(self, testcase: unittest.TestCase) -> None:
317
+ pass
318
+
319
+ def addDuration(self, testcase: unittest.TestCase, elapsed: float) -> None:
320
+ pass
321
+
322
+ def runtest(self) -> None:
323
+ from _pytest.debugging import maybe_wrap_pytest_function_for_tracing
324
+
325
+ testcase = self.instance
326
+ assert testcase is not None
327
+
328
+ maybe_wrap_pytest_function_for_tracing(self)
329
+
330
+ # Let the unittest framework handle async functions.
331
+ if is_async_function(self.obj):
332
+ testcase(result=self)
333
+ else:
334
+ # When --pdb is given, we want to postpone calling tearDown() otherwise
335
+ # when entering the pdb prompt, tearDown() would have probably cleaned up
336
+ # instance variables, which makes it difficult to debug.
337
+ # Arguably we could always postpone tearDown(), but this changes the moment where the
338
+ # TestCase instance interacts with the results object, so better to only do it
339
+ # when absolutely needed.
340
+ # We need to consider if the test itself is skipped, or the whole class.
341
+ assert isinstance(self.parent, UnitTestCase)
342
+ skipped = _is_skipped(self.obj) or _is_skipped(self.parent.obj)
343
+ if self.config.getoption("usepdb") and not skipped:
344
+ self._explicit_tearDown = testcase.tearDown
345
+ setattr(testcase, "tearDown", lambda *args: None)
346
+
347
+ # We need to update the actual bound method with self.obj, because
348
+ # wrap_pytest_function_for_tracing replaces self.obj by a wrapper.
349
+ setattr(testcase, self.name, self.obj)
350
+ try:
351
+ testcase(result=self)
352
+ finally:
353
+ delattr(testcase, self.name)
354
+
355
+ def _traceback_filter(
356
+ self, excinfo: _pytest._code.ExceptionInfo[BaseException]
357
+ ) -> _pytest._code.Traceback:
358
+ traceback = super()._traceback_filter(excinfo)
359
+ ntraceback = traceback.filter(
360
+ lambda x: not x.frame.f_globals.get("__unittest"),
361
+ )
362
+ if not ntraceback:
363
+ ntraceback = traceback
364
+ return ntraceback
365
+
366
+
367
+ @hookimpl(tryfirst=True)
368
+ def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None:
369
+ if isinstance(item, TestCaseFunction):
370
+ if item._excinfo:
371
+ call.excinfo = item._excinfo.pop(0)
372
+ try:
373
+ del call.result
374
+ except AttributeError:
375
+ pass
376
+
377
+ # Convert unittest.SkipTest to pytest.skip.
378
+ # This is actually only needed for nose, which reuses unittest.SkipTest for
379
+ # its own nose.SkipTest. For unittest TestCases, SkipTest is already
380
+ # handled internally, and doesn't reach here.
381
+ unittest = sys.modules.get("unittest")
382
+ if unittest and call.excinfo and isinstance(call.excinfo.value, unittest.SkipTest):
383
+ excinfo = call.excinfo
384
+ call2 = CallInfo[None].from_call(
385
+ lambda: pytest.skip(str(excinfo.value)), call.when
386
+ )
387
+ call.excinfo = call2.excinfo
388
+
389
+
390
+ # Twisted trial support.
391
+ classImplements_has_run = False
392
+
393
+
394
+ @hookimpl(wrapper=True)
395
+ def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]:
396
+ if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules:
397
+ ut: Any = sys.modules["twisted.python.failure"]
398
+ global classImplements_has_run
399
+ Failure__init__ = ut.Failure.__init__
400
+ if not classImplements_has_run:
401
+ from twisted.trial.itrial import IReporter
402
+ from zope.interface import classImplements
403
+
404
+ classImplements(TestCaseFunction, IReporter)
405
+ classImplements_has_run = True
406
+
407
+ def excstore(
408
+ self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None
409
+ ):
410
+ if exc_value is None:
411
+ self._rawexcinfo = sys.exc_info()
412
+ else:
413
+ if exc_type is None:
414
+ exc_type = type(exc_value)
415
+ self._rawexcinfo = (exc_type, exc_value, exc_tb)
416
+ try:
417
+ Failure__init__(
418
+ self, exc_value, exc_type, exc_tb, captureVars=captureVars
419
+ )
420
+ except TypeError:
421
+ Failure__init__(self, exc_value, exc_type, exc_tb)
422
+
423
+ ut.Failure.__init__ = excstore
424
+ try:
425
+ res = yield
426
+ finally:
427
+ ut.Failure.__init__ = Failure__init__
428
+ else:
429
+ res = yield
430
+ return res
431
+
432
+
433
+ def _is_skipped(obj) -> bool:
434
+ """Return True if the given object has been marked with @unittest.skip."""
435
+ return bool(getattr(obj, "__unittest_skip__", False))
.venv/lib/python3.11/site-packages/cpuinfo/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+
2
+ import sys
3
+ from cpuinfo.cpuinfo import *
4
+
5
+
.venv/lib/python3.11/site-packages/cpuinfo/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (242 Bytes). View file
 
.venv/lib/python3.11/site-packages/cpuinfo/__pycache__/__main__.cpython-311.pyc ADDED
Binary file (251 Bytes). View file
 
.venv/lib/python3.11/site-packages/outlines_core-0.1.26.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
.venv/lib/python3.11/site-packages/outlines_core-0.1.26.dist-info/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2023- The Outlines developers
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
.venv/lib/python3.11/site-packages/outlines_core-0.1.26.dist-info/METADATA ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: outlines_core
3
+ Version: 0.1.26
4
+ Summary: Structured Text Generation in Rust
5
+ Author: Outlines Developers
6
+ License: Apache-2.0
7
+ Project-URL: homepage, https://github.com/dottxt-ai/outlines-core
8
+ Project-URL: documentation, https://dottxt-ai.github.io/outlines-core/
9
+ Project-URL: repository, https://github.com/dottxt-ai/outlines-core
10
+ Keywords: machine learning,deep learning,language models,structured generation
11
+ Classifier: Development Status :: 5 - Production/Stable
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Intended Audience :: Information Technology
14
+ Classifier: Intended Audience :: Science/Research
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
18
+ Requires-Python: >=3.8
19
+ Description-Content-Type: text/markdown
20
+ License-File: LICENSE
21
+ Requires-Dist: interegular
22
+ Requires-Dist: jsonschema
23
+ Provides-Extra: test
24
+ Requires-Dist: pre-commit; extra == "test"
25
+ Requires-Dist: pydantic; extra == "test"
26
+ Requires-Dist: pytest; extra == "test"
27
+ Requires-Dist: pytest-benchmark; extra == "test"
28
+ Requires-Dist: pytest-cov; extra == "test"
29
+ Requires-Dist: pytest-mock; extra == "test"
30
+ Requires-Dist: coverage[toml]>=5.1; extra == "test"
31
+ Requires-Dist: diff-cover; extra == "test"
32
+ Requires-Dist: accelerate; extra == "test"
33
+ Requires-Dist: beartype<0.16.0; extra == "test"
34
+ Requires-Dist: huggingface_hub; extra == "test"
35
+ Requires-Dist: torch; extra == "test"
36
+ Requires-Dist: numpy; extra == "test"
37
+ Requires-Dist: scipy; extra == "test"
38
+ Requires-Dist: transformers; extra == "test"
39
+ Requires-Dist: datasets; extra == "test"
40
+ Requires-Dist: pillow; extra == "test"
41
+ Requires-Dist: asv; extra == "test"
42
+ Requires-Dist: psutil; extra == "test"
43
+ Requires-Dist: setuptools-rust; extra == "test"
44
+
45
+ <div align="center" style="margin-bottom: 1em;">
46
+
47
+ <img src="./docs/assets/images/logo.png" alt="Outlines-core Logo" width=500></img>
48
+
49
+ [![Contributors][contributors-badge]][contributors]
50
+
51
+ *Structured generation (in Rust).*
52
+ </div>
53
+
54
+ This package provides the core functionality for structured generation, formerly implemented in [Outlines][outlines], with a focus on performance and portability.
55
+
56
+ # Install
57
+
58
+ We provide bindings to the following languages:
59
+ - [Rust][rust-implementation] (Original implementation)
60
+ - [Python][python-bindings]
61
+
62
+ The latest release of the Python bindings is available on PyPi using `pip`:
63
+
64
+ ``` python
65
+ pip install outlines-core
66
+ ```
67
+
68
+ The current development branch of `outlines-core` can be installed from GitHub, also using `pip`:
69
+
70
+ ``` shell
71
+ pip install git+https://github.com/outlines-dev/outlines-core
72
+ ```
73
+
74
+ Or install in a rust project with cargo:
75
+ ``` bash
76
+ cargo add outlines-core
77
+ ```
78
+
79
+ # How to contribute?
80
+
81
+ ## Setup
82
+
83
+ First, fork the repository on GitHub and clone the fork locally:
84
+
85
+ ```bash
86
+ git clone git@github.com/YourUserName/outlines-core.git
87
+ cd outlines-core
88
+ ```
89
+
90
+ Create a new virtual environment:
91
+
92
+ ``` bash
93
+ python -m venv .venv
94
+ source .venv/bin/activate
95
+ ```
96
+
97
+ Then install the dependencies in editable mode, and install the pre-commit hooks:
98
+
99
+ ``` bash
100
+ pip install -e ".[test]"
101
+ pre-commit install
102
+ ```
103
+
104
+ ## Before pushing your code
105
+
106
+ Run the tests:
107
+
108
+
109
+ ``` bash
110
+ pytest
111
+ ```
112
+
113
+ And run the code style checks:
114
+
115
+ ``` bash
116
+ pre-commit run --all-files
117
+ ```
118
+
119
+
120
+ [outlines]: https://github.com/dottxt-ai/outlines
121
+ [contributors]: https://github.com/outlines-dev/outlines-core/graphs/contributors
122
+ [contributors-badge]: https://img.shields.io/github/contributors/outlines-dev/outlines-core?style=flat-square&logo=github&logoColor=white&color=ECEFF4
123
+ [rust-implementation]: https://github.com/outlines-dev/outlines-core/tree/readme/src
124
+ [python-bindings]: https://github.com/outlines-dev/outlines-core/tree/readme/python/outlines_core
.venv/lib/python3.11/site-packages/outlines_core-0.1.26.dist-info/RECORD ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ outlines_core-0.1.26.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ outlines_core-0.1.26.dist-info/LICENSE,sha256=9xB47oqqPVZwSIdW8Zk7neOuZMlUagIy67vdWVxTddc,11354
3
+ outlines_core-0.1.26.dist-info/METADATA,sha256=YNmRZrAWCy_JWcTTuWsTzD9qZu-Wdm5J8JTTolLRigM,3761
4
+ outlines_core-0.1.26.dist-info/RECORD,,
5
+ outlines_core-0.1.26.dist-info/WHEEL,sha256=9BFfIe-Zq441iQ0ehutX65O5faGDpmB1Uw3WaQGk4f0,151
6
+ outlines_core-0.1.26.dist-info/top_level.txt,sha256=45vDiTQKP-oMzuyEv9_QNERQrpBud0CXZ1BDiFJjyV4,14
7
+ outlines_core/__init__.py,sha256=U-Sz4BgH-fmII5vv_AL_qSp2a2mLd6g6aHbEfnUPmKU,208
8
+ outlines_core/__pycache__/__init__.cpython-311.pyc,,
9
+ outlines_core/__pycache__/_version.cpython-311.pyc,,
10
+ outlines_core/_version.py,sha256=pHptU6h1OxA8-tsynXa5Rz3N6XELevZ_27Ye-N1R-ds,413
11
+ outlines_core/fsm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ outlines_core/fsm/__pycache__/__init__.cpython-311.pyc,,
13
+ outlines_core/fsm/__pycache__/guide.cpython-311.pyc,,
14
+ outlines_core/fsm/__pycache__/json_schema.cpython-311.pyc,,
15
+ outlines_core/fsm/__pycache__/regex.cpython-311.pyc,,
16
+ outlines_core/fsm/guide.py,sha256=PujEqMU_UgYHBfOmWsAvVbdEb1HeniffGHQj61ZtCBE,9584
17
+ outlines_core/fsm/json_schema.py,sha256=nXgIMF6uVI7sYqy9b1kJOChFfNutvbmvwmOgFbHcnl0,230
18
+ outlines_core/fsm/outlines_core_rs.cpython-311-x86_64-linux-gnu.so,sha256=O3cV_9q1Ah16BFzf7g2xbNTxx7H8P6gbW8P9yt8QQ98,686792
19
+ outlines_core/fsm/outlines_core_rs.pyi,sha256=1pvCFg3KDIDySgwhyzC4-r7tljU-DoWVr-AWs5KuhM0,2917
20
+ outlines_core/fsm/regex.py,sha256=jw4pOwLhXXVlErnN6QUN56F6_nmPp8ATIDN-e3cl1y8,17243
21
+ outlines_core/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
.venv/lib/python3.11/site-packages/outlines_core-0.1.26.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (75.6.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp311-cp311-manylinux_2_17_x86_64
5
+ Tag: cp311-cp311-manylinux2014_x86_64
6
+
.venv/lib/python3.11/site-packages/outlines_core-0.1.26.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ outlines_core
.venv/lib/python3.11/site-packages/ray/cloudpickle/__init__.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import absolute_import
2
+
3
+ import os
4
+ from pickle import PicklingError
5
+
6
+ from ray.cloudpickle.cloudpickle import * # noqa
7
+ from ray.cloudpickle.cloudpickle_fast import CloudPickler, dumps, dump # noqa
8
+
9
+
10
+ # Conform to the convention used by python serialization libraries, which
11
+ # expose their Pickler subclass at top-level under the "Pickler" name.
12
+ Pickler = CloudPickler
13
+
14
+ __version__ = '2.0.0'
15
+
16
+
17
+ def _warn_msg(obj, method, exc):
18
+ return (
19
+ f"{method}({str(obj)}) failed."
20
+ "\nTo check which non-serializable variables are captured "
21
+ "in scope, re-run the ray script with 'RAY_PICKLE_VERBOSE_DEBUG=1'.")
22
+
23
+
24
+ def dump_debug(obj, *args, **kwargs):
25
+ try:
26
+ return dump(obj, *args, **kwargs)
27
+ except (TypeError, PicklingError) as exc:
28
+ if os.environ.get("RAY_PICKLE_VERBOSE_DEBUG"):
29
+ from ray.util.check_serialize import inspect_serializability
30
+ inspect_serializability(obj)
31
+ raise
32
+ else:
33
+ msg = _warn_msg(obj, "ray.cloudpickle.dump", exc)
34
+ raise type(exc)(msg)
35
+
36
+
37
+ def dumps_debug(obj, *args, **kwargs):
38
+ try:
39
+ return dumps(obj, *args, **kwargs)
40
+ except (TypeError, PicklingError) as exc:
41
+ if os.environ.get("RAY_PICKLE_VERBOSE_DEBUG"):
42
+ from ray.util.check_serialize import inspect_serializability
43
+ inspect_serializability(obj)
44
+ raise
45
+ else:
46
+ msg = _warn_msg(obj, "ray.cloudpickle.dumps", exc)
47
+ raise type(exc)(msg)
.venv/lib/python3.11/site-packages/ray/cloudpickle/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (2.32 kB). View file
 
.venv/lib/python3.11/site-packages/ray/cloudpickle/__pycache__/cloudpickle.cpython-311.pyc ADDED
Binary file (56.8 kB). View file
 
.venv/lib/python3.11/site-packages/ray/cloudpickle/__pycache__/cloudpickle_fast.cpython-311.pyc ADDED
Binary file (687 Bytes). View file
 
.venv/lib/python3.11/site-packages/ray/cloudpickle/__pycache__/compat.cpython-311.pyc ADDED
Binary file (1.02 kB). View file
 
.venv/lib/python3.11/site-packages/ray/cloudpickle/__pycache__/py_pickle.cpython-311.pyc ADDED
Binary file (1.35 kB). View file
 
.venv/lib/python3.11/site-packages/ray/cloudpickle/cloudpickle.py ADDED
@@ -0,0 +1,1487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Pickler class to extend the standard pickle.Pickler functionality
2
+
3
+ The main objective is to make it natural to perform distributed computing on
4
+ clusters (such as PySpark, Dask, Ray...) with interactively defined code
5
+ (functions, classes, ...) written in notebooks or console.
6
+
7
+ In particular this pickler adds the following features:
8
+ - serialize interactively-defined or locally-defined functions, classes,
9
+ enums, typevars, lambdas and nested functions to compiled byte code;
10
+ - deal with some other non-serializable objects in an ad-hoc manner where
11
+ applicable.
12
+
13
+ This pickler is therefore meant to be used for the communication between short
14
+ lived Python processes running the same version of Python and libraries. In
15
+ particular, it is not meant to be used for long term storage of Python objects.
16
+
17
+ It does not include an unpickler, as standard Python unpickling suffices.
18
+
19
+ This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
20
+ <https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
21
+
22
+ Copyright (c) 2012-now, CloudPickle developers and contributors.
23
+ Copyright (c) 2012, Regents of the University of California.
24
+ Copyright (c) 2009 `PiCloud, Inc. <https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
25
+ All rights reserved.
26
+
27
+ Redistribution and use in source and binary forms, with or without
28
+ modification, are permitted provided that the following conditions
29
+ are met:
30
+ * Redistributions of source code must retain the above copyright
31
+ notice, this list of conditions and the following disclaimer.
32
+ * Redistributions in binary form must reproduce the above copyright
33
+ notice, this list of conditions and the following disclaimer in the
34
+ documentation and/or other materials provided with the distribution.
35
+ * Neither the name of the University of California, Berkeley nor the
36
+ names of its contributors may be used to endorse or promote
37
+ products derived from this software without specific prior written
38
+ permission.
39
+
40
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
46
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
47
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
48
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
49
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
50
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51
+ """
52
+
53
+ import _collections_abc
54
+ from collections import ChainMap, OrderedDict
55
+ import abc
56
+ import builtins
57
+ import copyreg
58
+ import dataclasses
59
+ import dis
60
+ from enum import Enum
61
+ import io
62
+ import itertools
63
+ import logging
64
+ import opcode
65
+ import pickle
66
+ from pickle import _getattribute
67
+ import platform
68
+ import struct
69
+ import sys
70
+ import threading
71
+ import types
72
+ import typing
73
+ import uuid
74
+ import warnings
75
+ import weakref
76
+
77
+ # The following import is required to be imported in the cloudpickle
78
+ # namespace to be able to load pickle files generated with older versions of
79
+ # cloudpickle. See: tests/test_backward_compat.py
80
+ from types import CellType # noqa: F401
81
+
82
+
83
+ # cloudpickle is meant for inter process communication: we expect all
84
+ # communicating processes to run the same Python version hence we favor
85
+ # communication speed over compatibility:
86
+ DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
87
+
88
+ # Names of modules whose resources should be treated as dynamic.
89
+ _PICKLE_BY_VALUE_MODULES = set()
90
+
91
+ # Track the provenance of reconstructed dynamic classes to make it possible to
92
+ # reconstruct instances from the matching singleton class definition when
93
+ # appropriate and preserve the usual "isinstance" semantics of Python objects.
94
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary()
95
+ _DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary()
96
+ _DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock()
97
+
98
+ PYPY = platform.python_implementation() == "PyPy"
99
+
100
+ builtin_code_type = None
101
+ if PYPY:
102
+ # builtin-code objects only exist in pypy
103
+ builtin_code_type = type(float.__new__.__code__)
104
+
105
+ _extract_code_globals_cache = weakref.WeakKeyDictionary()
106
+
107
+
108
+ def _get_or_create_tracker_id(class_def):
109
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
110
+ class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def)
111
+ if class_tracker_id is None:
112
+ class_tracker_id = uuid.uuid4().hex
113
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
114
+ _DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def
115
+ return class_tracker_id
116
+
117
+
118
+ def _lookup_class_or_track(class_tracker_id, class_def):
119
+ if class_tracker_id is not None:
120
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
121
+ class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault(
122
+ class_tracker_id, class_def
123
+ )
124
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
125
+ return class_def
126
+
127
+
128
+ def register_pickle_by_value(module):
129
+ """Register a module to make it functions and classes picklable by value.
130
+
131
+ By default, functions and classes that are attributes of an importable
132
+ module are to be pickled by reference, that is relying on re-importing
133
+ the attribute from the module at load time.
134
+
135
+ If `register_pickle_by_value(module)` is called, all its functions and
136
+ classes are subsequently to be pickled by value, meaning that they can
137
+ be loaded in Python processes where the module is not importable.
138
+
139
+ This is especially useful when developing a module in a distributed
140
+ execution environment: restarting the client Python process with the new
141
+ source code is enough: there is no need to re-install the new version
142
+ of the module on all the worker nodes nor to restart the workers.
143
+
144
+ Note: this feature is considered experimental. See the cloudpickle
145
+ README.md file for more details and limitations.
146
+ """
147
+ if not isinstance(module, types.ModuleType):
148
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
149
+ # In the future, cloudpickle may need a way to access any module registered
150
+ # for pickling by value in order to introspect relative imports inside
151
+ # functions pickled by value. (see
152
+ # https://github.com/cloudpipe/cloudpickle/pull/417#issuecomment-873684633).
153
+ # This access can be ensured by checking that module is present in
154
+ # sys.modules at registering time and assuming that it will still be in
155
+ # there when accessed during pickling. Another alternative would be to
156
+ # store a weakref to the module. Even though cloudpickle does not implement
157
+ # this introspection yet, in order to avoid a possible breaking change
158
+ # later, we still enforce the presence of module inside sys.modules.
159
+ if module.__name__ not in sys.modules:
160
+ raise ValueError(
161
+ f"{module} was not imported correctly, have you used an "
162
+ "`import` statement to access it?"
163
+ )
164
+ _PICKLE_BY_VALUE_MODULES.add(module.__name__)
165
+
166
+
167
+ def unregister_pickle_by_value(module):
168
+ """Unregister that the input module should be pickled by value."""
169
+ if not isinstance(module, types.ModuleType):
170
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
171
+ if module.__name__ not in _PICKLE_BY_VALUE_MODULES:
172
+ raise ValueError(f"{module} is not registered for pickle by value")
173
+ else:
174
+ _PICKLE_BY_VALUE_MODULES.remove(module.__name__)
175
+
176
+
177
+ def list_registry_pickle_by_value():
178
+ return _PICKLE_BY_VALUE_MODULES.copy()
179
+
180
+
181
+ def _is_registered_pickle_by_value(module):
182
+ module_name = module.__name__
183
+ if module_name in _PICKLE_BY_VALUE_MODULES:
184
+ return True
185
+ while True:
186
+ parent_name = module_name.rsplit(".", 1)[0]
187
+ if parent_name == module_name:
188
+ break
189
+ if parent_name in _PICKLE_BY_VALUE_MODULES:
190
+ return True
191
+ module_name = parent_name
192
+ return False
193
+
194
+
195
+ def _whichmodule(obj, name):
196
+ """Find the module an object belongs to.
197
+
198
+ This function differs from ``pickle.whichmodule`` in two ways:
199
+ - it does not mangle the cases where obj's module is __main__ and obj was
200
+ not found in any module.
201
+ - Errors arising during module introspection are ignored, as those errors
202
+ are considered unwanted side effects.
203
+ """
204
+ module_name = getattr(obj, "__module__", None)
205
+
206
+ if module_name is not None:
207
+ return module_name
208
+ # Protect the iteration by using a copy of sys.modules against dynamic
209
+ # modules that trigger imports of other modules upon calls to getattr or
210
+ # other threads importing at the same time.
211
+ for module_name, module in sys.modules.copy().items():
212
+ # Some modules such as coverage can inject non-module objects inside
213
+ # sys.modules
214
+ if (
215
+ module_name == "__main__"
216
+ or module is None
217
+ or not isinstance(module, types.ModuleType)
218
+ ):
219
+ continue
220
+ try:
221
+ if _getattribute(module, name)[0] is obj:
222
+ return module_name
223
+ except Exception:
224
+ pass
225
+ return None
226
+
227
+
228
+ def _should_pickle_by_reference(obj, name=None):
229
+ """Test whether an function or a class should be pickled by reference
230
+
231
+ Pickling by reference means by that the object (typically a function or a
232
+ class) is an attribute of a module that is assumed to be importable in the
233
+ target Python environment. Loading will therefore rely on importing the
234
+ module and then calling `getattr` on it to access the function or class.
235
+
236
+ Pickling by reference is the only option to pickle functions and classes
237
+ in the standard library. In cloudpickle the alternative option is to
238
+ pickle by value (for instance for interactively or locally defined
239
+ functions and classes or for attributes of modules that have been
240
+ explicitly registered to be pickled by value.
241
+ """
242
+ if isinstance(obj, types.FunctionType) or issubclass(type(obj), type):
243
+ module_and_name = _lookup_module_and_qualname(obj, name=name)
244
+ if module_and_name is None:
245
+ return False
246
+ module, name = module_and_name
247
+ return not _is_registered_pickle_by_value(module)
248
+
249
+ elif isinstance(obj, types.ModuleType):
250
+ # We assume that sys.modules is primarily used as a cache mechanism for
251
+ # the Python import machinery. Checking if a module has been added in
252
+ # is sys.modules therefore a cheap and simple heuristic to tell us
253
+ # whether we can assume that a given module could be imported by name
254
+ # in another Python process.
255
+ if _is_registered_pickle_by_value(obj):
256
+ return False
257
+ return obj.__name__ in sys.modules
258
+ else:
259
+ raise TypeError(
260
+ "cannot check importability of {} instances".format(type(obj).__name__)
261
+ )
262
+
263
+
264
+ def _lookup_module_and_qualname(obj, name=None):
265
+ if name is None:
266
+ name = getattr(obj, "__qualname__", None)
267
+ if name is None: # pragma: no cover
268
+ # This used to be needed for Python 2.7 support but is probably not
269
+ # needed anymore. However we keep the __name__ introspection in case
270
+ # users of cloudpickle rely on this old behavior for unknown reasons.
271
+ name = getattr(obj, "__name__", None)
272
+
273
+ module_name = _whichmodule(obj, name)
274
+
275
+ if module_name is None:
276
+ # In this case, obj.__module__ is None AND obj was not found in any
277
+ # imported module. obj is thus treated as dynamic.
278
+ return None
279
+
280
+ if module_name == "__main__":
281
+ return None
282
+
283
+ # Note: if module_name is in sys.modules, the corresponding module is
284
+ # assumed importable at unpickling time. See #357
285
+ module = sys.modules.get(module_name, None)
286
+ if module is None:
287
+ # The main reason why obj's module would not be imported is that this
288
+ # module has been dynamically created, using for example
289
+ # types.ModuleType. The other possibility is that module was removed
290
+ # from sys.modules after obj was created/imported. But this case is not
291
+ # supported, as the standard pickle does not support it either.
292
+ return None
293
+
294
+ try:
295
+ obj2, parent = _getattribute(module, name)
296
+ except AttributeError:
297
+ # obj was not found inside the module it points to
298
+ return None
299
+ if obj2 is not obj:
300
+ return None
301
+ return module, name
302
+
303
+
304
+ def _extract_code_globals(co):
305
+ """Find all globals names read or written to by codeblock co."""
306
+ out_names = _extract_code_globals_cache.get(co)
307
+ if out_names is None:
308
+ # We use a dict with None values instead of a set to get a
309
+ # deterministic order and avoid introducing non-deterministic pickle
310
+ # bytes as a results.
311
+ out_names = {name: None for name in _walk_global_ops(co)}
312
+
313
+ # Declaring a function inside another one using the "def ..." syntax
314
+ # generates a constant code object corresponding to the one of the
315
+ # nested function's As the nested function may itself need global
316
+ # variables, we need to introspect its code, extract its globals, (look
317
+ # for code object in it's co_consts attribute..) and add the result to
318
+ # code_globals
319
+ if co.co_consts:
320
+ for const in co.co_consts:
321
+ if isinstance(const, types.CodeType):
322
+ out_names.update(_extract_code_globals(const))
323
+
324
+ _extract_code_globals_cache[co] = out_names
325
+
326
+ return out_names
327
+
328
+
329
+ def _find_imported_submodules(code, top_level_dependencies):
330
+ """Find currently imported submodules used by a function.
331
+
332
+ Submodules used by a function need to be detected and referenced for the
333
+ function to work correctly at depickling time. Because submodules can be
334
+ referenced as attribute of their parent package (``package.submodule``), we
335
+ need a special introspection technique that does not rely on GLOBAL-related
336
+ opcodes to find references of them in a code object.
337
+
338
+ Example:
339
+ ```
340
+ import concurrent.futures
341
+ import cloudpickle
342
+ def func():
343
+ x = concurrent.futures.ThreadPoolExecutor
344
+ if __name__ == '__main__':
345
+ cloudpickle.dumps(func)
346
+ ```
347
+ The globals extracted by cloudpickle in the function's state include the
348
+ concurrent package, but not its submodule (here, concurrent.futures), which
349
+ is the module used by func. Find_imported_submodules will detect the usage
350
+ of concurrent.futures. Saving this module alongside with func will ensure
351
+ that calling func once depickled does not fail due to concurrent.futures
352
+ not being imported
353
+ """
354
+
355
+ subimports = []
356
+ # check if any known dependency is an imported package
357
+ for x in top_level_dependencies:
358
+ if (
359
+ isinstance(x, types.ModuleType)
360
+ and hasattr(x, "__package__")
361
+ and x.__package__
362
+ ):
363
+ # check if the package has any currently loaded sub-imports
364
+ prefix = x.__name__ + "."
365
+ # A concurrent thread could mutate sys.modules,
366
+ # make sure we iterate over a copy to avoid exceptions
367
+ for name in list(sys.modules):
368
+ # Older versions of pytest will add a "None" module to
369
+ # sys.modules.
370
+ if name is not None and name.startswith(prefix):
371
+ # check whether the function can address the sub-module
372
+ tokens = set(name[len(prefix) :].split("."))
373
+ if not tokens - set(code.co_names):
374
+ subimports.append(sys.modules[name])
375
+ return subimports
376
+
377
+
378
+ # relevant opcodes
379
+ STORE_GLOBAL = opcode.opmap["STORE_GLOBAL"]
380
+ DELETE_GLOBAL = opcode.opmap["DELETE_GLOBAL"]
381
+ LOAD_GLOBAL = opcode.opmap["LOAD_GLOBAL"]
382
+ GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL)
383
+ HAVE_ARGUMENT = dis.HAVE_ARGUMENT
384
+ EXTENDED_ARG = dis.EXTENDED_ARG
385
+
386
+
387
+ _BUILTIN_TYPE_NAMES = {}
388
+ for k, v in types.__dict__.items():
389
+ if type(v) is type:
390
+ _BUILTIN_TYPE_NAMES[v] = k
391
+
392
+
393
+ def _builtin_type(name):
394
+ if name == "ClassType": # pragma: no cover
395
+ # Backward compat to load pickle files generated with cloudpickle
396
+ # < 1.3 even if loading pickle files from older versions is not
397
+ # officially supported.
398
+ return type
399
+ return getattr(types, name)
400
+
401
+
402
+ def _walk_global_ops(code):
403
+ """Yield referenced name for global-referencing instructions in code."""
404
+ for instr in dis.get_instructions(code):
405
+ op = instr.opcode
406
+ if op in GLOBAL_OPS:
407
+ yield instr.argval
408
+
409
+
410
+ def _extract_class_dict(cls):
411
+ """Retrieve a copy of the dict of a class without the inherited method."""
412
+ clsdict = dict(cls.__dict__) # copy dict proxy to a dict
413
+ if len(cls.__bases__) == 1:
414
+ inherited_dict = cls.__bases__[0].__dict__
415
+ else:
416
+ inherited_dict = {}
417
+ for base in reversed(cls.__bases__):
418
+ inherited_dict.update(base.__dict__)
419
+ to_remove = []
420
+ for name, value in clsdict.items():
421
+ try:
422
+ base_value = inherited_dict[name]
423
+ if value is base_value:
424
+ to_remove.append(name)
425
+ except KeyError:
426
+ pass
427
+ for name in to_remove:
428
+ clsdict.pop(name)
429
+ return clsdict
430
+
431
+
432
+ def is_tornado_coroutine(func):
433
+ """Return whether `func` is a Tornado coroutine function.
434
+
435
+ Running coroutines are not supported.
436
+ """
437
+ warnings.warn(
438
+ "is_tornado_coroutine is deprecated in cloudpickle 3.0 and will be "
439
+ "removed in cloudpickle 4.0. Use tornado.gen.is_coroutine_function "
440
+ "directly instead.",
441
+ category=DeprecationWarning,
442
+ )
443
+ if "tornado.gen" not in sys.modules:
444
+ return False
445
+ gen = sys.modules["tornado.gen"]
446
+ if not hasattr(gen, "is_coroutine_function"):
447
+ # Tornado version is too old
448
+ return False
449
+ return gen.is_coroutine_function(func)
450
+
451
+
452
+ def subimport(name):
453
+ # We cannot do simply: `return __import__(name)`: Indeed, if ``name`` is
454
+ # the name of a submodule, __import__ will return the top-level root module
455
+ # of this submodule. For instance, __import__('os.path') returns the `os`
456
+ # module.
457
+ __import__(name)
458
+ return sys.modules[name]
459
+
460
+
461
+ def dynamic_subimport(name, vars):
462
+ mod = types.ModuleType(name)
463
+ mod.__dict__.update(vars)
464
+ mod.__dict__["__builtins__"] = builtins.__dict__
465
+ return mod
466
+
467
+
468
+ def _get_cell_contents(cell):
469
+ try:
470
+ return cell.cell_contents
471
+ except ValueError:
472
+ # Handle empty cells explicitly with a sentinel value.
473
+ return _empty_cell_value
474
+
475
+
476
+ def instance(cls):
477
+ """Create a new instance of a class.
478
+
479
+ Parameters
480
+ ----------
481
+ cls : type
482
+ The class to create an instance of.
483
+
484
+ Returns
485
+ -------
486
+ instance : cls
487
+ A new instance of ``cls``.
488
+ """
489
+ return cls()
490
+
491
+
492
+ @instance
493
+ class _empty_cell_value:
494
+ """Sentinel for empty closures."""
495
+
496
+ @classmethod
497
+ def __reduce__(cls):
498
+ return cls.__name__
499
+
500
+
501
+ def _make_function(code, globals, name, argdefs, closure):
502
+ # Setting __builtins__ in globals is needed for nogil CPython.
503
+ globals["__builtins__"] = __builtins__
504
+ return types.FunctionType(code, globals, name, argdefs, closure)
505
+
506
+
507
+ def _make_empty_cell():
508
+ if False:
509
+ # trick the compiler into creating an empty cell in our lambda
510
+ cell = None
511
+ raise AssertionError("this route should not be executed")
512
+
513
+ return (lambda: cell).__closure__[0]
514
+
515
+
516
+ def _make_cell(value=_empty_cell_value):
517
+ cell = _make_empty_cell()
518
+ if value is not _empty_cell_value:
519
+ cell.cell_contents = value
520
+ return cell
521
+
522
+
523
+ def _make_skeleton_class(
524
+ type_constructor, name, bases, type_kwargs, class_tracker_id, extra
525
+ ):
526
+ """Build dynamic class with an empty __dict__ to be filled once memoized
527
+
528
+ If class_tracker_id is not None, try to lookup an existing class definition
529
+ matching that id. If none is found, track a newly reconstructed class
530
+ definition under that id so that other instances stemming from the same
531
+ class id will also reuse this class definition.
532
+
533
+ The "extra" variable is meant to be a dict (or None) that can be used for
534
+ forward compatibility shall the need arise.
535
+ """
536
+ skeleton_class = types.new_class(
537
+ name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs)
538
+ )
539
+ return _lookup_class_or_track(class_tracker_id, skeleton_class)
540
+
541
+
542
+ def _make_skeleton_enum(
543
+ bases, name, qualname, members, module, class_tracker_id, extra
544
+ ):
545
+ """Build dynamic enum with an empty __dict__ to be filled once memoized
546
+
547
+ The creation of the enum class is inspired by the code of
548
+ EnumMeta._create_.
549
+
550
+ If class_tracker_id is not None, try to lookup an existing enum definition
551
+ matching that id. If none is found, track a newly reconstructed enum
552
+ definition under that id so that other instances stemming from the same
553
+ class id will also reuse this enum definition.
554
+
555
+ The "extra" variable is meant to be a dict (or None) that can be used for
556
+ forward compatibility shall the need arise.
557
+ """
558
+ # enums always inherit from their base Enum class at the last position in
559
+ # the list of base classes:
560
+ enum_base = bases[-1]
561
+ metacls = enum_base.__class__
562
+ classdict = metacls.__prepare__(name, bases)
563
+
564
+ for member_name, member_value in members.items():
565
+ classdict[member_name] = member_value
566
+ enum_class = metacls.__new__(metacls, name, bases, classdict)
567
+ enum_class.__module__ = module
568
+ enum_class.__qualname__ = qualname
569
+
570
+ return _lookup_class_or_track(class_tracker_id, enum_class)
571
+
572
+
573
+ def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id):
574
+ tv = typing.TypeVar(
575
+ name,
576
+ *constraints,
577
+ bound=bound,
578
+ covariant=covariant,
579
+ contravariant=contravariant,
580
+ )
581
+ return _lookup_class_or_track(class_tracker_id, tv)
582
+
583
+
584
+ def _decompose_typevar(obj):
585
+ return (
586
+ obj.__name__,
587
+ obj.__bound__,
588
+ obj.__constraints__,
589
+ obj.__covariant__,
590
+ obj.__contravariant__,
591
+ _get_or_create_tracker_id(obj),
592
+ )
593
+
594
+
595
+ def _typevar_reduce(obj):
596
+ # TypeVar instances require the module information hence why we
597
+ # are not using the _should_pickle_by_reference directly
598
+ module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__)
599
+
600
+ if module_and_name is None:
601
+ return (_make_typevar, _decompose_typevar(obj))
602
+ elif _is_registered_pickle_by_value(module_and_name[0]):
603
+ return (_make_typevar, _decompose_typevar(obj))
604
+
605
+ return (getattr, module_and_name)
606
+
607
+
608
+ def _get_bases(typ):
609
+ if "__orig_bases__" in getattr(typ, "__dict__", {}):
610
+ # For generic types (see PEP 560)
611
+ # Note that simply checking `hasattr(typ, '__orig_bases__')` is not
612
+ # correct. Subclasses of a fully-parameterized generic class does not
613
+ # have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')`
614
+ # will return True because it's defined in the base class.
615
+ bases_attr = "__orig_bases__"
616
+ else:
617
+ # For regular class objects
618
+ bases_attr = "__bases__"
619
+ return getattr(typ, bases_attr)
620
+
621
+
622
+ def _make_dict_keys(obj, is_ordered=False):
623
+ if is_ordered:
624
+ return OrderedDict.fromkeys(obj).keys()
625
+ else:
626
+ return dict.fromkeys(obj).keys()
627
+
628
+
629
+ def _make_dict_values(obj, is_ordered=False):
630
+ if is_ordered:
631
+ return OrderedDict((i, _) for i, _ in enumerate(obj)).values()
632
+ else:
633
+ return {i: _ for i, _ in enumerate(obj)}.values()
634
+
635
+
636
+ def _make_dict_items(obj, is_ordered=False):
637
+ if is_ordered:
638
+ return OrderedDict(obj).items()
639
+ else:
640
+ return obj.items()
641
+
642
+
643
+ # COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
644
+ # -------------------------------------------------
645
+
646
+
647
+ def _class_getnewargs(obj):
648
+ type_kwargs = {}
649
+ if "__module__" in obj.__dict__:
650
+ type_kwargs["__module__"] = obj.__module__
651
+
652
+ __dict__ = obj.__dict__.get("__dict__", None)
653
+ if isinstance(__dict__, property):
654
+ type_kwargs["__dict__"] = __dict__
655
+
656
+ return (
657
+ type(obj),
658
+ obj.__name__,
659
+ _get_bases(obj),
660
+ type_kwargs,
661
+ _get_or_create_tracker_id(obj),
662
+ None,
663
+ )
664
+
665
+
666
+ def _enum_getnewargs(obj):
667
+ members = {e.name: e.value for e in obj}
668
+ return (
669
+ obj.__bases__,
670
+ obj.__name__,
671
+ obj.__qualname__,
672
+ members,
673
+ obj.__module__,
674
+ _get_or_create_tracker_id(obj),
675
+ None,
676
+ )
677
+
678
+
679
+ # COLLECTION OF OBJECTS RECONSTRUCTORS
680
+ # ------------------------------------
681
+ def _file_reconstructor(retval):
682
+ return retval
683
+
684
+
685
+ # COLLECTION OF OBJECTS STATE GETTERS
686
+ # -----------------------------------
687
+
688
+
689
+ def _function_getstate(func):
690
+ # - Put func's dynamic attributes (stored in func.__dict__) in state. These
691
+ # attributes will be restored at unpickling time using
692
+ # f.__dict__.update(state)
693
+ # - Put func's members into slotstate. Such attributes will be restored at
694
+ # unpickling time by iterating over slotstate and calling setattr(func,
695
+ # slotname, slotvalue)
696
+ slotstate = {
697
+ "__name__": func.__name__,
698
+ "__qualname__": func.__qualname__,
699
+ "__annotations__": func.__annotations__,
700
+ "__kwdefaults__": func.__kwdefaults__,
701
+ "__defaults__": func.__defaults__,
702
+ "__module__": func.__module__,
703
+ "__doc__": func.__doc__,
704
+ "__closure__": func.__closure__,
705
+ }
706
+
707
+ f_globals_ref = _extract_code_globals(func.__code__)
708
+ f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__}
709
+
710
+ if func.__closure__ is not None:
711
+ closure_values = list(map(_get_cell_contents, func.__closure__))
712
+ else:
713
+ closure_values = ()
714
+
715
+ # Extract currently-imported submodules used by func. Storing these modules
716
+ # in a smoke _cloudpickle_subimports attribute of the object's state will
717
+ # trigger the side effect of importing these modules at unpickling time
718
+ # (which is necessary for func to work correctly once depickled)
719
+ slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
720
+ func.__code__, itertools.chain(f_globals.values(), closure_values)
721
+ )
722
+ slotstate["__globals__"] = f_globals
723
+
724
+ state = func.__dict__
725
+ return state, slotstate
726
+
727
+
728
+ def _class_getstate(obj):
729
+ clsdict = _extract_class_dict(obj)
730
+ clsdict.pop("__weakref__", None)
731
+
732
+ if issubclass(type(obj), abc.ABCMeta):
733
+ # If obj is an instance of an ABCMeta subclass, don't pickle the
734
+ # cache/negative caches populated during isinstance/issubclass
735
+ # checks, but pickle the list of registered subclasses of obj.
736
+ clsdict.pop("_abc_cache", None)
737
+ clsdict.pop("_abc_negative_cache", None)
738
+ clsdict.pop("_abc_negative_cache_version", None)
739
+ registry = clsdict.pop("_abc_registry", None)
740
+ if registry is None:
741
+ # The abc caches and registered subclasses of a
742
+ # class are bundled into the single _abc_impl attribute
743
+ clsdict.pop("_abc_impl", None)
744
+ (registry, _, _, _) = abc._get_dump(obj)
745
+
746
+ clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry]
747
+ else:
748
+ # In the above if clause, registry is a set of weakrefs -- in
749
+ # this case, registry is a WeakSet
750
+ clsdict["_abc_impl"] = [type_ for type_ in registry]
751
+
752
+ if "__slots__" in clsdict:
753
+ # pickle string length optimization: member descriptors of obj are
754
+ # created automatically from obj's __slots__ attribute, no need to
755
+ # save them in obj's state
756
+ if isinstance(obj.__slots__, str):
757
+ clsdict.pop(obj.__slots__)
758
+ else:
759
+ for k in obj.__slots__:
760
+ clsdict.pop(k, None)
761
+
762
+ clsdict.pop("__dict__", None) # unpicklable property object
763
+
764
+ return (clsdict, {})
765
+
766
+
767
+ def _enum_getstate(obj):
768
+ clsdict, slotstate = _class_getstate(obj)
769
+
770
+ members = {e.name: e.value for e in obj}
771
+ # Cleanup the clsdict that will be passed to _make_skeleton_enum:
772
+ # Those attributes are already handled by the metaclass.
773
+ for attrname in [
774
+ "_generate_next_value_",
775
+ "_member_names_",
776
+ "_member_map_",
777
+ "_member_type_",
778
+ "_value2member_map_",
779
+ ]:
780
+ clsdict.pop(attrname, None)
781
+ for member in members:
782
+ clsdict.pop(member)
783
+ # Special handling of Enum subclasses
784
+ return clsdict, slotstate
785
+
786
+
787
+ # COLLECTIONS OF OBJECTS REDUCERS
788
+ # -------------------------------
789
+ # A reducer is a function taking a single argument (obj), and that returns a
790
+ # tuple with all the necessary data to re-construct obj. Apart from a few
791
+ # exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
792
+ # correctly pickle an object.
793
+ # While many built-in objects (Exceptions objects, instances of the "object"
794
+ # class, etc), are shipped with their own built-in reducer (invoked using
795
+ # obj.__reduce__), some do not. The following methods were created to "fill
796
+ # these holes".
797
+
798
+
799
+ def _code_reduce(obj):
800
+ """code object reducer."""
801
+ # If you are not sure about the order of arguments, take a look at help
802
+ # of the specific type from types, for example:
803
+ # >>> from types import CodeType
804
+ # >>> help(CodeType)
805
+ if hasattr(obj, "co_exceptiontable"):
806
+ # Python 3.11 and later: there are some new attributes
807
+ # related to the enhanced exceptions.
808
+ args = (
809
+ obj.co_argcount,
810
+ obj.co_posonlyargcount,
811
+ obj.co_kwonlyargcount,
812
+ obj.co_nlocals,
813
+ obj.co_stacksize,
814
+ obj.co_flags,
815
+ obj.co_code,
816
+ obj.co_consts,
817
+ obj.co_names,
818
+ obj.co_varnames,
819
+ obj.co_filename,
820
+ obj.co_name,
821
+ obj.co_qualname,
822
+ obj.co_firstlineno,
823
+ obj.co_linetable,
824
+ obj.co_exceptiontable,
825
+ obj.co_freevars,
826
+ obj.co_cellvars,
827
+ )
828
+ elif hasattr(obj, "co_linetable"):
829
+ # Python 3.10 and later: obj.co_lnotab is deprecated and constructor
830
+ # expects obj.co_linetable instead.
831
+ args = (
832
+ obj.co_argcount,
833
+ obj.co_posonlyargcount,
834
+ obj.co_kwonlyargcount,
835
+ obj.co_nlocals,
836
+ obj.co_stacksize,
837
+ obj.co_flags,
838
+ obj.co_code,
839
+ obj.co_consts,
840
+ obj.co_names,
841
+ obj.co_varnames,
842
+ obj.co_filename,
843
+ obj.co_name,
844
+ obj.co_firstlineno,
845
+ obj.co_linetable,
846
+ obj.co_freevars,
847
+ obj.co_cellvars,
848
+ )
849
+ elif hasattr(obj, "co_nmeta"): # pragma: no cover
850
+ # "nogil" Python: modified attributes from 3.9
851
+ args = (
852
+ obj.co_argcount,
853
+ obj.co_posonlyargcount,
854
+ obj.co_kwonlyargcount,
855
+ obj.co_nlocals,
856
+ obj.co_framesize,
857
+ obj.co_ndefaultargs,
858
+ obj.co_nmeta,
859
+ obj.co_flags,
860
+ obj.co_code,
861
+ obj.co_consts,
862
+ obj.co_varnames,
863
+ obj.co_filename,
864
+ obj.co_name,
865
+ obj.co_firstlineno,
866
+ obj.co_lnotab,
867
+ obj.co_exc_handlers,
868
+ obj.co_jump_table,
869
+ obj.co_freevars,
870
+ obj.co_cellvars,
871
+ obj.co_free2reg,
872
+ obj.co_cell2reg,
873
+ )
874
+ else:
875
+ # Backward compat for 3.8 and 3.9
876
+ args = (
877
+ obj.co_argcount,
878
+ obj.co_posonlyargcount,
879
+ obj.co_kwonlyargcount,
880
+ obj.co_nlocals,
881
+ obj.co_stacksize,
882
+ obj.co_flags,
883
+ obj.co_code,
884
+ obj.co_consts,
885
+ obj.co_names,
886
+ obj.co_varnames,
887
+ obj.co_filename,
888
+ obj.co_name,
889
+ obj.co_firstlineno,
890
+ obj.co_lnotab,
891
+ obj.co_freevars,
892
+ obj.co_cellvars,
893
+ )
894
+ return types.CodeType, args
895
+
896
+
897
+ def _cell_reduce(obj):
898
+ """Cell (containing values of a function's free variables) reducer."""
899
+ try:
900
+ obj.cell_contents
901
+ except ValueError: # cell is empty
902
+ return _make_empty_cell, ()
903
+ else:
904
+ return _make_cell, (obj.cell_contents,)
905
+
906
+
907
+ def _classmethod_reduce(obj):
908
+ orig_func = obj.__func__
909
+ return type(obj), (orig_func,)
910
+
911
+
912
+ def _file_reduce(obj):
913
+ """Save a file."""
914
+ import io
915
+
916
+ if not hasattr(obj, "name") or not hasattr(obj, "mode"):
917
+ raise pickle.PicklingError(
918
+ "Cannot pickle files that do not map to an actual file"
919
+ )
920
+ if obj is sys.stdout:
921
+ return getattr, (sys, "stdout")
922
+ if obj is sys.stderr:
923
+ return getattr, (sys, "stderr")
924
+ if obj is sys.stdin:
925
+ raise pickle.PicklingError("Cannot pickle standard input")
926
+ if obj.closed:
927
+ raise pickle.PicklingError("Cannot pickle closed files")
928
+ if hasattr(obj, "isatty") and obj.isatty():
929
+ raise pickle.PicklingError("Cannot pickle files that map to tty objects")
930
+ if "r" not in obj.mode and "+" not in obj.mode:
931
+ raise pickle.PicklingError(
932
+ "Cannot pickle files that are not opened for reading: %s" % obj.mode
933
+ )
934
+
935
+ name = obj.name
936
+
937
+ retval = io.StringIO()
938
+
939
+ try:
940
+ # Read the whole file
941
+ curloc = obj.tell()
942
+ obj.seek(0)
943
+ contents = obj.read()
944
+ obj.seek(curloc)
945
+ except OSError as e:
946
+ raise pickle.PicklingError(
947
+ "Cannot pickle file %s as it cannot be read" % name
948
+ ) from e
949
+ retval.write(contents)
950
+ retval.seek(curloc)
951
+
952
+ retval.name = name
953
+ return _file_reconstructor, (retval,)
954
+
955
+
956
+ def _getset_descriptor_reduce(obj):
957
+ return getattr, (obj.__objclass__, obj.__name__)
958
+
959
+
960
+ def _mappingproxy_reduce(obj):
961
+ return types.MappingProxyType, (dict(obj),)
962
+
963
+
964
+ def _memoryview_reduce(obj):
965
+ return bytes, (obj.tobytes(),)
966
+
967
+
968
+ def _module_reduce(obj):
969
+ if _should_pickle_by_reference(obj):
970
+ return subimport, (obj.__name__,)
971
+ else:
972
+ # Some external libraries can populate the "__builtins__" entry of a
973
+ # module's `__dict__` with unpicklable objects (see #316). For that
974
+ # reason, we do not attempt to pickle the "__builtins__" entry, and
975
+ # restore a default value for it at unpickling time.
976
+ state = obj.__dict__.copy()
977
+ state.pop("__builtins__", None)
978
+ return dynamic_subimport, (obj.__name__, state)
979
+
980
+
981
+ def _method_reduce(obj):
982
+ return (types.MethodType, (obj.__func__, obj.__self__))
983
+
984
+
985
+ def _logger_reduce(obj):
986
+ return logging.getLogger, (obj.name,)
987
+
988
+
989
+ def _root_logger_reduce(obj):
990
+ return logging.getLogger, ()
991
+
992
+
993
+ def _property_reduce(obj):
994
+ return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
995
+
996
+
997
+ def _weakset_reduce(obj):
998
+ return weakref.WeakSet, (list(obj),)
999
+
1000
+
1001
+ def _dynamic_class_reduce(obj):
1002
+ """Save a class that can't be referenced as a module attribute.
1003
+
1004
+ This method is used to serialize classes that are defined inside
1005
+ functions, or that otherwise can't be serialized as attribute lookups
1006
+ from importable modules.
1007
+ """
1008
+ if Enum is not None and issubclass(obj, Enum):
1009
+ return (
1010
+ _make_skeleton_enum,
1011
+ _enum_getnewargs(obj),
1012
+ _enum_getstate(obj),
1013
+ None,
1014
+ None,
1015
+ _class_setstate,
1016
+ )
1017
+ else:
1018
+ return (
1019
+ _make_skeleton_class,
1020
+ _class_getnewargs(obj),
1021
+ _class_getstate(obj),
1022
+ None,
1023
+ None,
1024
+ _class_setstate,
1025
+ )
1026
+
1027
+
1028
+ def _class_reduce(obj):
1029
+ """Select the reducer depending on the dynamic nature of the class obj."""
1030
+ if obj is type(None): # noqa
1031
+ return type, (None,)
1032
+ elif obj is type(Ellipsis):
1033
+ return type, (Ellipsis,)
1034
+ elif obj is type(NotImplemented):
1035
+ return type, (NotImplemented,)
1036
+ elif obj in _BUILTIN_TYPE_NAMES:
1037
+ return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
1038
+ elif not _should_pickle_by_reference(obj):
1039
+ return _dynamic_class_reduce(obj)
1040
+ return NotImplemented
1041
+
1042
+
1043
+ def _dict_keys_reduce(obj):
1044
+ # Safer not to ship the full dict as sending the rest might
1045
+ # be unintended and could potentially cause leaking of
1046
+ # sensitive information
1047
+ return _make_dict_keys, (list(obj),)
1048
+
1049
+
1050
+ def _dict_values_reduce(obj):
1051
+ # Safer not to ship the full dict as sending the rest might
1052
+ # be unintended and could potentially cause leaking of
1053
+ # sensitive information
1054
+ return _make_dict_values, (list(obj),)
1055
+
1056
+
1057
+ def _dict_items_reduce(obj):
1058
+ return _make_dict_items, (dict(obj),)
1059
+
1060
+
1061
+ def _odict_keys_reduce(obj):
1062
+ # Safer not to ship the full dict as sending the rest might
1063
+ # be unintended and could potentially cause leaking of
1064
+ # sensitive information
1065
+ return _make_dict_keys, (list(obj), True)
1066
+
1067
+
1068
+ def _odict_values_reduce(obj):
1069
+ # Safer not to ship the full dict as sending the rest might
1070
+ # be unintended and could potentially cause leaking of
1071
+ # sensitive information
1072
+ return _make_dict_values, (list(obj), True)
1073
+
1074
+
1075
+ def _odict_items_reduce(obj):
1076
+ return _make_dict_items, (dict(obj), True)
1077
+
1078
+
1079
+ def _dataclass_field_base_reduce(obj):
1080
+ return _get_dataclass_field_type_sentinel, (obj.name,)
1081
+
1082
+
1083
+ # COLLECTIONS OF OBJECTS STATE SETTERS
1084
+ # ------------------------------------
1085
+ # state setters are called at unpickling time, once the object is created and
1086
+ # it has to be updated to how it was at unpickling time.
1087
+
1088
+
1089
+ def _function_setstate(obj, state):
1090
+ """Update the state of a dynamic function.
1091
+
1092
+ As __closure__ and __globals__ are readonly attributes of a function, we
1093
+ cannot rely on the native setstate routine of pickle.load_build, that calls
1094
+ setattr on items of the slotstate. Instead, we have to modify them inplace.
1095
+ """
1096
+ state, slotstate = state
1097
+ obj.__dict__.update(state)
1098
+
1099
+ obj_globals = slotstate.pop("__globals__")
1100
+ obj_closure = slotstate.pop("__closure__")
1101
+ # _cloudpickle_subimports is a set of submodules that must be loaded for
1102
+ # the pickled function to work correctly at unpickling time. Now that these
1103
+ # submodules are depickled (hence imported), they can be removed from the
1104
+ # object's state (the object state only served as a reference holder to
1105
+ # these submodules)
1106
+ slotstate.pop("_cloudpickle_submodules")
1107
+
1108
+ obj.__globals__.update(obj_globals)
1109
+ obj.__globals__["__builtins__"] = __builtins__
1110
+
1111
+ if obj_closure is not None:
1112
+ for i, cell in enumerate(obj_closure):
1113
+ try:
1114
+ value = cell.cell_contents
1115
+ except ValueError: # cell is empty
1116
+ continue
1117
+ obj.__closure__[i].cell_contents = value
1118
+
1119
+ for k, v in slotstate.items():
1120
+ setattr(obj, k, v)
1121
+
1122
+
1123
+ def _class_setstate(obj, state):
1124
+ state, slotstate = state
1125
+ registry = None
1126
+ for attrname, attr in state.items():
1127
+ if attrname == "_abc_impl":
1128
+ registry = attr
1129
+ else:
1130
+ setattr(obj, attrname, attr)
1131
+ if registry is not None:
1132
+ for subclass in registry:
1133
+ obj.register(subclass)
1134
+
1135
+ return obj
1136
+
1137
+
1138
+ # COLLECTION OF DATACLASS UTILITIES
1139
+ # ---------------------------------
1140
+ # There are some internal sentinel values whose identity must be preserved when
1141
+ # unpickling dataclass fields. Each sentinel value has a unique name that we can
1142
+ # use to retrieve its identity at unpickling time.
1143
+
1144
+
1145
+ _DATACLASSE_FIELD_TYPE_SENTINELS = {
1146
+ dataclasses._FIELD.name: dataclasses._FIELD,
1147
+ dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR,
1148
+ dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR,
1149
+ }
1150
+
1151
+
1152
+ def _get_dataclass_field_type_sentinel(name):
1153
+ return _DATACLASSE_FIELD_TYPE_SENTINELS[name]
1154
+
1155
+
1156
+ class Pickler(pickle.Pickler):
1157
+ # set of reducers defined and used by cloudpickle (private)
1158
+ _dispatch_table = {}
1159
+ _dispatch_table[classmethod] = _classmethod_reduce
1160
+ _dispatch_table[io.TextIOWrapper] = _file_reduce
1161
+ _dispatch_table[logging.Logger] = _logger_reduce
1162
+ _dispatch_table[logging.RootLogger] = _root_logger_reduce
1163
+ _dispatch_table[memoryview] = _memoryview_reduce
1164
+ _dispatch_table[property] = _property_reduce
1165
+ _dispatch_table[staticmethod] = _classmethod_reduce
1166
+ _dispatch_table[CellType] = _cell_reduce
1167
+ _dispatch_table[types.CodeType] = _code_reduce
1168
+ _dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
1169
+ _dispatch_table[types.ModuleType] = _module_reduce
1170
+ _dispatch_table[types.MethodType] = _method_reduce
1171
+ _dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
1172
+ _dispatch_table[weakref.WeakSet] = _weakset_reduce
1173
+ _dispatch_table[typing.TypeVar] = _typevar_reduce
1174
+ _dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
1175
+ _dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
1176
+ _dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
1177
+ _dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce
1178
+ _dispatch_table[type(OrderedDict().values())] = _odict_values_reduce
1179
+ _dispatch_table[type(OrderedDict().items())] = _odict_items_reduce
1180
+ _dispatch_table[abc.abstractmethod] = _classmethod_reduce
1181
+ _dispatch_table[abc.abstractclassmethod] = _classmethod_reduce
1182
+ _dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce
1183
+ _dispatch_table[abc.abstractproperty] = _property_reduce
1184
+ _dispatch_table[dataclasses._FIELD_BASE] = _dataclass_field_base_reduce
1185
+
1186
+ dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
1187
+
1188
+ # function reducers are defined as instance methods of cloudpickle.Pickler
1189
+ # objects, as they rely on a cloudpickle.Pickler attribute (globals_ref)
1190
+ def _dynamic_function_reduce(self, func):
1191
+ """Reduce a function that is not pickleable via attribute lookup."""
1192
+ newargs = self._function_getnewargs(func)
1193
+ state = _function_getstate(func)
1194
+ return (_make_function, newargs, state, None, None, _function_setstate)
1195
+
1196
+ def _function_reduce(self, obj):
1197
+ """Reducer for function objects.
1198
+
1199
+ If obj is a top-level attribute of a file-backed module, this reducer
1200
+ returns NotImplemented, making the cloudpickle.Pickler fall back to
1201
+ traditional pickle.Pickler routines to save obj. Otherwise, it reduces
1202
+ obj using a custom cloudpickle reducer designed specifically to handle
1203
+ dynamic functions.
1204
+ """
1205
+ if _should_pickle_by_reference(obj):
1206
+ return NotImplemented
1207
+ else:
1208
+ return self._dynamic_function_reduce(obj)
1209
+
1210
+ def _function_getnewargs(self, func):
1211
+ code = func.__code__
1212
+
1213
+ # base_globals represents the future global namespace of func at
1214
+ # unpickling time. Looking it up and storing it in
1215
+ # cloudpickle.Pickler.globals_ref allow functions sharing the same
1216
+ # globals at pickling time to also share them once unpickled, at one
1217
+ # condition: since globals_ref is an attribute of a cloudpickle.Pickler
1218
+ # instance, and that a new cloudpickle.Pickler is created each time
1219
+ # cloudpickle.dump or cloudpickle.dumps is called, functions also need
1220
+ # to be saved within the same invocation of
1221
+ # cloudpickle.dump/cloudpickle.dumps (for example:
1222
+ # cloudpickle.dumps([f1, f2])). There is no such limitation when using
1223
+ # cloudpickle.Pickler.dump, as long as the multiple invocations are
1224
+ # bound to the same cloudpickle.Pickler instance.
1225
+ base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
1226
+
1227
+ if base_globals == {}:
1228
+ # Add module attributes used to resolve relative imports
1229
+ # instructions inside func.
1230
+ for k in ["__package__", "__name__", "__path__", "__file__"]:
1231
+ if k in func.__globals__:
1232
+ base_globals[k] = func.__globals__[k]
1233
+
1234
+ # Do not bind the free variables before the function is created to
1235
+ # avoid infinite recursion.
1236
+ if func.__closure__ is None:
1237
+ closure = None
1238
+ else:
1239
+ closure = tuple(_make_empty_cell() for _ in range(len(code.co_freevars)))
1240
+
1241
+ return code, base_globals, None, None, closure
1242
+
1243
+ def dump(self, obj):
1244
+ try:
1245
+ return super().dump(obj)
1246
+ except RuntimeError as e:
1247
+ if len(e.args) > 0 and "recursion" in e.args[0]:
1248
+ msg = "Could not pickle object as excessively deep recursion required."
1249
+ raise pickle.PicklingError(msg) from e
1250
+ else:
1251
+ raise
1252
+
1253
+ def __init__(self, file, protocol=None, buffer_callback=None):
1254
+ if protocol is None:
1255
+ protocol = DEFAULT_PROTOCOL
1256
+ super().__init__(file, protocol=protocol, buffer_callback=buffer_callback)
1257
+ # map functions __globals__ attribute ids, to ensure that functions
1258
+ # sharing the same global namespace at pickling time also share
1259
+ # their global namespace at unpickling time.
1260
+ self.globals_ref = {}
1261
+ self.proto = int(protocol)
1262
+
1263
+ if not PYPY:
1264
+ # pickle.Pickler is the C implementation of the CPython pickler and
1265
+ # therefore we rely on reduce_override method to customize the pickler
1266
+ # behavior.
1267
+
1268
+ # `cloudpickle.Pickler.dispatch` is only left for backward
1269
+ # compatibility - note that when using protocol 5,
1270
+ # `cloudpickle.Pickler.dispatch` is not an extension of
1271
+ # `pickle._Pickler.dispatch` dictionary, because `cloudpickle.Pickler`
1272
+ # subclasses the C-implemented `pickle.Pickler`, which does not expose
1273
+ # a `dispatch` attribute. Earlier versions of `cloudpickle.Pickler`
1274
+ # used `cloudpickle.Pickler.dispatch` as a class-level attribute
1275
+ # storing all reducers implemented by cloudpickle, but the attribute
1276
+ # name was not a great choice given because it would collide with a
1277
+ # similarly named attribute in the pure-Python `pickle._Pickler`
1278
+ # implementation in the standard library.
1279
+ dispatch = dispatch_table
1280
+
1281
+ # Implementation of the reducer_override callback, in order to
1282
+ # efficiently serialize dynamic functions and classes by subclassing
1283
+ # the C-implemented `pickle.Pickler`.
1284
+ # TODO: decorrelate reducer_override (which is tied to CPython's
1285
+ # implementation - would it make sense to backport it to pypy? - and
1286
+ # pickle's protocol 5 which is implementation agnostic. Currently, the
1287
+ # availability of both notions coincide on CPython's pickle, but it may
1288
+ # not be the case anymore when pypy implements protocol 5.
1289
+
1290
+ def reducer_override(self, obj):
1291
+ """Type-agnostic reducing callback for function and classes.
1292
+
1293
+ For performance reasons, subclasses of the C `pickle.Pickler` class
1294
+ cannot register custom reducers for functions and classes in the
1295
+ dispatch_table attribute. Reducers for such types must instead
1296
+ implemented via the special `reducer_override` method.
1297
+
1298
+ Note that this method will be called for any object except a few
1299
+ builtin-types (int, lists, dicts etc.), which differs from reducers
1300
+ in the Pickler's dispatch_table, each of them being invoked for
1301
+ objects of a specific type only.
1302
+
1303
+ This property comes in handy for classes: although most classes are
1304
+ instances of the ``type`` metaclass, some of them can be instances
1305
+ of other custom metaclasses (such as enum.EnumMeta for example). In
1306
+ particular, the metaclass will likely not be known in advance, and
1307
+ thus cannot be special-cased using an entry in the dispatch_table.
1308
+ reducer_override, among other things, allows us to register a
1309
+ reducer that will be called for any class, independently of its
1310
+ type.
1311
+
1312
+ Notes:
1313
+
1314
+ * reducer_override has the priority over dispatch_table-registered
1315
+ reducers.
1316
+ * reducer_override can be used to fix other limitations of
1317
+ cloudpickle for other types that suffered from type-specific
1318
+ reducers, such as Exceptions. See
1319
+ https://github.com/cloudpipe/cloudpickle/issues/248
1320
+ """
1321
+ t = type(obj)
1322
+ try:
1323
+ is_anyclass = issubclass(t, type)
1324
+ except TypeError: # t is not a class (old Boost; see SF #502085)
1325
+ is_anyclass = False
1326
+
1327
+ if is_anyclass:
1328
+ return _class_reduce(obj)
1329
+ elif isinstance(obj, types.FunctionType):
1330
+ return self._function_reduce(obj)
1331
+ else:
1332
+ # fallback to save_global, including the Pickler's
1333
+ # dispatch_table
1334
+ return NotImplemented
1335
+
1336
+ else:
1337
+ # When reducer_override is not available, hack the pure-Python
1338
+ # Pickler's types.FunctionType and type savers. Note: the type saver
1339
+ # must override Pickler.save_global, because pickle.py contains a
1340
+ # hard-coded call to save_global when pickling meta-classes.
1341
+ dispatch = pickle.Pickler.dispatch.copy()
1342
+
1343
+ def _save_reduce_pickle5(
1344
+ self,
1345
+ func,
1346
+ args,
1347
+ state=None,
1348
+ listitems=None,
1349
+ dictitems=None,
1350
+ state_setter=None,
1351
+ obj=None,
1352
+ ):
1353
+ save = self.save
1354
+ write = self.write
1355
+ self.save_reduce(
1356
+ func,
1357
+ args,
1358
+ state=None,
1359
+ listitems=listitems,
1360
+ dictitems=dictitems,
1361
+ obj=obj,
1362
+ )
1363
+ # backport of the Python 3.8 state_setter pickle operations
1364
+ save(state_setter)
1365
+ save(obj) # simple BINGET opcode as obj is already memoized.
1366
+ save(state)
1367
+ write(pickle.TUPLE2)
1368
+ # Trigger a state_setter(obj, state) function call.
1369
+ write(pickle.REDUCE)
1370
+ # The purpose of state_setter is to carry-out an
1371
+ # inplace modification of obj. We do not care about what the
1372
+ # method might return, so its output is eventually removed from
1373
+ # the stack.
1374
+ write(pickle.POP)
1375
+
1376
+ def save_global(self, obj, name=None, pack=struct.pack):
1377
+ """Main dispatch method.
1378
+
1379
+ The name of this method is somewhat misleading: all types get
1380
+ dispatched here.
1381
+ """
1382
+ if obj is type(None): # noqa
1383
+ return self.save_reduce(type, (None,), obj=obj)
1384
+ elif obj is type(Ellipsis):
1385
+ return self.save_reduce(type, (Ellipsis,), obj=obj)
1386
+ elif obj is type(NotImplemented):
1387
+ return self.save_reduce(type, (NotImplemented,), obj=obj)
1388
+ elif obj in _BUILTIN_TYPE_NAMES:
1389
+ return self.save_reduce(
1390
+ _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj
1391
+ )
1392
+
1393
+ if name is not None:
1394
+ super().save_global(obj, name=name)
1395
+ elif not _should_pickle_by_reference(obj, name=name):
1396
+ self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
1397
+ else:
1398
+ super().save_global(obj, name=name)
1399
+
1400
+ dispatch[type] = save_global
1401
+
1402
+ def save_function(self, obj, name=None):
1403
+ """Registered with the dispatch to handle all function types.
1404
+
1405
+ Determines what kind of function obj is (e.g. lambda, defined at
1406
+ interactive prompt, etc) and handles the pickling appropriately.
1407
+ """
1408
+ if _should_pickle_by_reference(obj, name=name):
1409
+ return super().save_global(obj, name=name)
1410
+ elif PYPY and isinstance(obj.__code__, builtin_code_type):
1411
+ return self.save_pypy_builtin_func(obj)
1412
+ else:
1413
+ return self._save_reduce_pickle5(
1414
+ *self._dynamic_function_reduce(obj), obj=obj
1415
+ )
1416
+
1417
+ def save_pypy_builtin_func(self, obj):
1418
+ """Save pypy equivalent of builtin functions.
1419
+
1420
+ PyPy does not have the concept of builtin-functions. Instead,
1421
+ builtin-functions are simple function instances, but with a
1422
+ builtin-code attribute.
1423
+ Most of the time, builtin functions should be pickled by attribute.
1424
+ But PyPy has flaky support for __qualname__, so some builtin
1425
+ functions such as float.__new__ will be classified as dynamic. For
1426
+ this reason only, we created this special routine. Because
1427
+ builtin-functions are not expected to have closure or globals,
1428
+ there is no additional hack (compared the one already implemented
1429
+ in pickle) to protect ourselves from reference cycles. A simple
1430
+ (reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
1431
+ also that PyPy improved their support for __qualname__ in v3.6, so
1432
+ this routing should be removed when cloudpickle supports only PyPy
1433
+ 3.6 and later.
1434
+ """
1435
+ rv = (
1436
+ types.FunctionType,
1437
+ (obj.__code__, {}, obj.__name__, obj.__defaults__, obj.__closure__),
1438
+ obj.__dict__,
1439
+ )
1440
+ self.save_reduce(*rv, obj=obj)
1441
+
1442
+ dispatch[types.FunctionType] = save_function
1443
+
1444
+
1445
+ # Shorthands similar to pickle.dump/pickle.dumps
1446
+
1447
+
1448
+ def dump(obj, file, protocol=None, buffer_callback=None):
1449
+ """Serialize obj as bytes streamed into file
1450
+
1451
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
1452
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
1453
+ speed between processes running the same Python version.
1454
+
1455
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
1456
+ compatibility with older versions of Python (although this is not always
1457
+ guaranteed to work because cloudpickle relies on some internal
1458
+ implementation details that can change from one Python version to the
1459
+ next).
1460
+ """
1461
+ Pickler(file, protocol=protocol, buffer_callback=buffer_callback).dump(obj)
1462
+
1463
+
1464
+ def dumps(obj, protocol=None, buffer_callback=None):
1465
+ """Serialize obj as a string of bytes allocated in memory
1466
+
1467
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
1468
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
1469
+ speed between processes running the same Python version.
1470
+
1471
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
1472
+ compatibility with older versions of Python (although this is not always
1473
+ guaranteed to work because cloudpickle relies on some internal
1474
+ implementation details that can change from one Python version to the
1475
+ next).
1476
+ """
1477
+ with io.BytesIO() as file:
1478
+ cp = Pickler(file, protocol=protocol, buffer_callback=buffer_callback)
1479
+ cp.dump(obj)
1480
+ return file.getvalue()
1481
+
1482
+
1483
+ # Include pickles unloading functions in this namespace for convenience.
1484
+ load, loads = pickle.load, pickle.loads
1485
+
1486
+ # Backward compat alias.
1487
+ CloudPickler = Pickler
.venv/lib/python3.11/site-packages/ray/cloudpickle/cloudpickle_fast.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compatibility module.
2
+
3
+ It can be necessary to load files generated by previous versions of cloudpickle
4
+ that rely on symbols being defined under the `cloudpickle.cloudpickle_fast`
5
+ namespace.
6
+
7
+ See: tests/test_backward_compat.py
8
+ """
9
+ from . import cloudpickle
10
+
11
+
12
+ def __getattr__(name):
13
+ return getattr(cloudpickle, name)
.venv/lib/python3.11/site-packages/ray/cloudpickle/compat.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+
4
+ logger = logging.getLogger(__name__)
5
+
6
+ RAY_PICKLE_VERBOSE_DEBUG = os.environ.get("RAY_PICKLE_VERBOSE_DEBUG")
7
+ verbose_level = int(RAY_PICKLE_VERBOSE_DEBUG) if RAY_PICKLE_VERBOSE_DEBUG else 0
8
+
9
+ if verbose_level > 1:
10
+ logger.warning(
11
+ "Environmental variable RAY_PICKLE_VERBOSE_DEBUG is set to "
12
+ f"'{verbose_level}', this enabled python-based serialization backend "
13
+ f"instead of C-Pickle. Serialization would be very slow."
14
+ )
15
+ from ray.cloudpickle import py_pickle as pickle
16
+ from ray.cloudpickle.py_pickle import Pickler
17
+ else:
18
+ import pickle # noqa: F401
19
+ from _pickle import Pickler # noqa: F401
.venv/lib/python3.11/site-packages/ray/cloudpickle/py_pickle.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pickle import (
2
+ _Pickler,
3
+ _Unpickler as Unpickler,
4
+ _loads as loads,
5
+ _load as load,
6
+ PickleError,
7
+ PicklingError,
8
+ UnpicklingError,
9
+ HIGHEST_PROTOCOL,
10
+ )
11
+
12
+ __all__ = [
13
+ "PickleError",
14
+ "PicklingError",
15
+ "UnpicklingError",
16
+ "Pickler",
17
+ "Unpickler",
18
+ "load",
19
+ "loads",
20
+ "HIGHEST_PROTOCOL",
21
+ ]
22
+
23
+
24
+ class Pickler(_Pickler):
25
+ def __init__(self, file, protocol=None, *, fix_imports=True, buffer_callback=None):
26
+ super().__init__(
27
+ file, protocol, fix_imports=fix_imports, buffer_callback=buffer_callback
28
+ )
29
+ # avoid being overrided by cloudpickle
30
+ self.dispatch = _Pickler.dispatch.copy()
.venv/lib/python3.11/site-packages/ray/scripts/__init__.py ADDED
File without changes
.venv/lib/python3.11/site-packages/ray/scripts/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (184 Bytes). View file
 
.venv/lib/python3.11/site-packages/ray/scripts/scripts.py ADDED
@@ -0,0 +1,2695 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import json
3
+ import logging
4
+ import os
5
+ import platform
6
+ import signal
7
+ import subprocess
8
+ import sys
9
+ import time
10
+ import urllib
11
+ import urllib.parse
12
+ import warnings
13
+ import shutil
14
+ from datetime import datetime
15
+ from typing import Optional, Set, List, Tuple
16
+ from ray.dashboard.modules.metrics import install_and_start_prometheus
17
+ from ray.util.check_open_ports import check_open_ports
18
+ import requests
19
+
20
+ import click
21
+ import colorama
22
+ import psutil
23
+ import yaml
24
+
25
+ import ray
26
+ import ray._private.ray_constants as ray_constants
27
+ import ray._private.services as services
28
+ from ray._private.utils import (
29
+ check_ray_client_dependencies_installed,
30
+ parse_resources_json,
31
+ parse_node_labels_json,
32
+ )
33
+ from ray._private.internal_api import memory_summary
34
+ from ray._private.storage import _load_class
35
+ from ray._private.usage import usage_lib
36
+ from ray.autoscaler._private.cli_logger import add_click_logging_options, cf, cli_logger
37
+ from ray.autoscaler._private.commands import (
38
+ RUN_ENV_TYPES,
39
+ attach_cluster,
40
+ create_or_update_cluster,
41
+ debug_status,
42
+ exec_cluster,
43
+ get_cluster_dump_archive,
44
+ get_head_node_ip,
45
+ get_local_dump_archive,
46
+ get_worker_node_ips,
47
+ kill_node,
48
+ monitor_cluster,
49
+ rsync,
50
+ teardown_cluster,
51
+ )
52
+ from ray.autoscaler._private.constants import RAY_PROCESSES
53
+ from ray.autoscaler._private.fake_multi_node.node_provider import FAKE_HEAD_NODE_ID
54
+ from ray.util.annotations import PublicAPI
55
+ from ray.core.generated import autoscaler_pb2
56
+
57
+
58
+ logger = logging.getLogger(__name__)
59
+
60
+
61
+ def _check_ray_version(gcs_client):
62
+ import ray._private.usage.usage_lib as ray_usage_lib
63
+
64
+ cluster_metadata = ray_usage_lib.get_cluster_metadata(gcs_client)
65
+ if cluster_metadata and cluster_metadata["ray_version"] != ray.__version__:
66
+ raise RuntimeError(
67
+ "Ray version mismatch: cluster has Ray version "
68
+ f'{cluster_metadata["ray_version"]} '
69
+ f"but local Ray version is {ray.__version__}"
70
+ )
71
+
72
+
73
+ @click.group()
74
+ @click.option(
75
+ "--logging-level",
76
+ required=False,
77
+ default=ray_constants.LOGGER_LEVEL,
78
+ type=str,
79
+ help=ray_constants.LOGGER_LEVEL_HELP,
80
+ )
81
+ @click.option(
82
+ "--logging-format",
83
+ required=False,
84
+ default=ray_constants.LOGGER_FORMAT,
85
+ type=str,
86
+ help=ray_constants.LOGGER_FORMAT_HELP,
87
+ )
88
+ @click.version_option()
89
+ def cli(logging_level, logging_format):
90
+ level = logging.getLevelName(logging_level.upper())
91
+ ray._private.ray_logging.setup_logger(level, logging_format)
92
+ cli_logger.set_format(format_tmpl=logging_format)
93
+
94
+
95
+ @click.command()
96
+ @click.argument("cluster_config_file", required=True, type=str)
97
+ @click.option(
98
+ "--cluster-name",
99
+ "-n",
100
+ required=False,
101
+ type=str,
102
+ help="Override the configured cluster name.",
103
+ )
104
+ @click.option(
105
+ "--port",
106
+ "-p",
107
+ required=False,
108
+ type=int,
109
+ default=ray_constants.DEFAULT_DASHBOARD_PORT,
110
+ help="The local port to forward to the dashboard",
111
+ )
112
+ @click.option(
113
+ "--remote-port",
114
+ required=False,
115
+ type=int,
116
+ default=ray_constants.DEFAULT_DASHBOARD_PORT,
117
+ help="The remote port your dashboard runs on",
118
+ )
119
+ @click.option(
120
+ "--no-config-cache",
121
+ is_flag=True,
122
+ default=False,
123
+ help="Disable the local cluster config cache.",
124
+ )
125
+ @PublicAPI
126
+ def dashboard(cluster_config_file, cluster_name, port, remote_port, no_config_cache):
127
+ """Port-forward a Ray cluster's dashboard to the local machine."""
128
+ # Sleeping in a loop is preferable to `sleep infinity` because the latter
129
+ # only works on linux.
130
+ # Find the first open port sequentially from `remote_port`.
131
+ try:
132
+ port_forward = [
133
+ (port, remote_port),
134
+ ]
135
+ click.echo(
136
+ "Attempting to establish dashboard locally at"
137
+ " http://localhost:{}/ connected to"
138
+ " remote port {}".format(port, remote_port)
139
+ )
140
+ # We want to probe with a no-op that returns quickly to avoid
141
+ # exceptions caused by network errors.
142
+ exec_cluster(
143
+ cluster_config_file,
144
+ override_cluster_name=cluster_name,
145
+ port_forward=port_forward,
146
+ no_config_cache=no_config_cache,
147
+ )
148
+ click.echo("Successfully established connection.")
149
+ except Exception as e:
150
+ raise click.ClickException(
151
+ "Failed to forward dashboard from remote port {1} to local port "
152
+ "{0}. There are a couple possibilities: \n 1. The remote port is "
153
+ "incorrectly specified \n 2. The local port {0} is already in "
154
+ "use.\n The exception is: {2}".format(port, remote_port, e)
155
+ ) from None
156
+
157
+
158
+ def continue_debug_session(live_jobs: Set[str]):
159
+ """Continue active debugging session.
160
+
161
+ This function will connect 'ray debug' to the right debugger
162
+ when a user is stepping between Ray tasks.
163
+ """
164
+ active_sessions = ray.experimental.internal_kv._internal_kv_list(
165
+ "RAY_PDB_", namespace=ray_constants.KV_NAMESPACE_PDB
166
+ )
167
+
168
+ for active_session in active_sessions:
169
+ if active_session.startswith(b"RAY_PDB_CONTINUE"):
170
+ # Check to see that the relevant job is still alive.
171
+ data = ray.experimental.internal_kv._internal_kv_get(
172
+ active_session, namespace=ray_constants.KV_NAMESPACE_PDB
173
+ )
174
+ if json.loads(data)["job_id"] not in live_jobs:
175
+ ray.experimental.internal_kv._internal_kv_del(
176
+ active_session, namespace=ray_constants.KV_NAMESPACE_PDB
177
+ )
178
+ continue
179
+
180
+ print("Continuing pdb session in different process...")
181
+ key = b"RAY_PDB_" + active_session[len("RAY_PDB_CONTINUE_") :]
182
+ while True:
183
+ data = ray.experimental.internal_kv._internal_kv_get(
184
+ key, namespace=ray_constants.KV_NAMESPACE_PDB
185
+ )
186
+ if data:
187
+ session = json.loads(data)
188
+ if "exit_debugger" in session or session["job_id"] not in live_jobs:
189
+ ray.experimental.internal_kv._internal_kv_del(
190
+ key, namespace=ray_constants.KV_NAMESPACE_PDB
191
+ )
192
+ return
193
+ host, port = session["pdb_address"].split(":")
194
+ ray.util.rpdb._connect_pdb_client(host, int(port))
195
+ ray.experimental.internal_kv._internal_kv_del(
196
+ key, namespace=ray_constants.KV_NAMESPACE_PDB
197
+ )
198
+ continue_debug_session(live_jobs)
199
+ return
200
+ time.sleep(1.0)
201
+
202
+
203
+ def none_to_empty(s):
204
+ if s is None:
205
+ return ""
206
+ return s
207
+
208
+
209
+ def format_table(table):
210
+ """Format a table as a list of lines with aligned columns."""
211
+ result = []
212
+ col_width = [max(len(x) for x in col) for col in zip(*table)]
213
+ for line in table:
214
+ result.append(
215
+ " | ".join("{0:{1}}".format(x, col_width[i]) for i, x in enumerate(line))
216
+ )
217
+ return result
218
+
219
+
220
+ @cli.command()
221
+ @click.option(
222
+ "--address", required=False, type=str, help="Override the address to connect to."
223
+ )
224
+ @click.option(
225
+ "-v",
226
+ "--verbose",
227
+ required=False,
228
+ is_flag=True,
229
+ help="Shows additional fields in breakpoint selection page.",
230
+ )
231
+ def debug(address: str, verbose: bool):
232
+ """Show all active breakpoints and exceptions in the Ray debugger."""
233
+ address = services.canonicalize_bootstrap_address_or_die(address)
234
+ logger.info(f"Connecting to Ray instance at {address}.")
235
+ ray.init(address=address, log_to_driver=False)
236
+ if os.environ.get("RAY_DEBUG", "1") != "legacy":
237
+ print(
238
+ f"{colorama.Fore.YELLOW}NOTE: The distributed debugger "
239
+ "https://docs.ray.io/en/latest/ray-observability"
240
+ "/ray-distributed-debugger.html is now the default "
241
+ "due to better interactive debugging support. If you want "
242
+ "to keep using 'ray debug' please set RAY_DEBUG=legacy "
243
+ f"in your cluster (e.g. via runtime environment).{colorama.Fore.RESET}"
244
+ )
245
+ while True:
246
+ # Used to filter out and clean up entries from dead jobs.
247
+ live_jobs = {
248
+ job["JobID"] for job in ray._private.state.jobs() if not job["IsDead"]
249
+ }
250
+ continue_debug_session(live_jobs)
251
+
252
+ active_sessions = ray.experimental.internal_kv._internal_kv_list(
253
+ "RAY_PDB_", namespace=ray_constants.KV_NAMESPACE_PDB
254
+ )
255
+ print("Active breakpoints:")
256
+ sessions_data = []
257
+ for active_session in active_sessions:
258
+ data = json.loads(
259
+ ray.experimental.internal_kv._internal_kv_get(
260
+ active_session, namespace=ray_constants.KV_NAMESPACE_PDB
261
+ )
262
+ )
263
+ # Check that the relevant job is alive, else clean up the entry.
264
+ if data["job_id"] in live_jobs:
265
+ sessions_data.append(data)
266
+ else:
267
+ ray.experimental.internal_kv._internal_kv_del(
268
+ active_session, namespace=ray_constants.KV_NAMESPACE_PDB
269
+ )
270
+ sessions_data = sorted(
271
+ sessions_data, key=lambda data: data["timestamp"], reverse=True
272
+ )
273
+ if verbose:
274
+ table = [
275
+ [
276
+ "index",
277
+ "timestamp",
278
+ "Ray task",
279
+ "filename:lineno",
280
+ "Task ID",
281
+ "Worker ID",
282
+ "Actor ID",
283
+ "Node ID",
284
+ ]
285
+ ]
286
+ for i, data in enumerate(sessions_data):
287
+ date = datetime.utcfromtimestamp(data["timestamp"]).strftime(
288
+ "%Y-%m-%d %H:%M:%S"
289
+ )
290
+ table.append(
291
+ [
292
+ str(i),
293
+ date,
294
+ data["proctitle"],
295
+ data["filename"] + ":" + str(data["lineno"]),
296
+ data["task_id"],
297
+ data["worker_id"],
298
+ none_to_empty(data["actor_id"]),
299
+ data["node_id"],
300
+ ]
301
+ )
302
+ else:
303
+ # Non verbose mode: no IDs.
304
+ table = [["index", "timestamp", "Ray task", "filename:lineno"]]
305
+ for i, data in enumerate(sessions_data):
306
+ date = datetime.utcfromtimestamp(data["timestamp"]).strftime(
307
+ "%Y-%m-%d %H:%M:%S"
308
+ )
309
+ table.append(
310
+ [
311
+ str(i),
312
+ date,
313
+ data["proctitle"],
314
+ data["filename"] + ":" + str(data["lineno"]),
315
+ ]
316
+ )
317
+ for i, line in enumerate(format_table(table)):
318
+ print(line)
319
+ if i >= 1 and not sessions_data[i - 1]["traceback"].startswith(
320
+ "NoneType: None"
321
+ ):
322
+ print(sessions_data[i - 1]["traceback"])
323
+ inp = input("Enter breakpoint index or press enter to refresh: ")
324
+ if inp == "":
325
+ print()
326
+ continue
327
+ else:
328
+ index = int(inp)
329
+ session = json.loads(
330
+ ray.experimental.internal_kv._internal_kv_get(
331
+ active_sessions[index], namespace=ray_constants.KV_NAMESPACE_PDB
332
+ )
333
+ )
334
+ host, port = session["pdb_address"].split(":")
335
+ ray.util.rpdb._connect_pdb_client(host, int(port))
336
+
337
+
338
+ @cli.command()
339
+ @click.option(
340
+ "--node-ip-address", required=False, type=str, help="the IP address of this node"
341
+ )
342
+ @click.option("--address", required=False, type=str, help="the address to use for Ray")
343
+ @click.option(
344
+ "--port",
345
+ type=int,
346
+ required=False,
347
+ help=f"the port of the head ray process. If not provided, defaults to "
348
+ f"{ray_constants.DEFAULT_PORT}; if port is set to 0, we will"
349
+ f" allocate an available port.",
350
+ )
351
+ @click.option(
352
+ "--node-name",
353
+ required=False,
354
+ hidden=True,
355
+ type=str,
356
+ help="the user-provided identifier or name for this node. "
357
+ "Defaults to the node's ip_address",
358
+ )
359
+ @click.option(
360
+ "--redis-username",
361
+ required=False,
362
+ hidden=True,
363
+ type=str,
364
+ default=ray_constants.REDIS_DEFAULT_USERNAME,
365
+ help="If provided, secure Redis ports with this username",
366
+ )
367
+ @click.option(
368
+ "--redis-password",
369
+ required=False,
370
+ hidden=True,
371
+ type=str,
372
+ default=ray_constants.REDIS_DEFAULT_PASSWORD,
373
+ help="If provided, secure Redis ports with this password",
374
+ )
375
+ @click.option(
376
+ "--redis-shard-ports",
377
+ required=False,
378
+ hidden=True,
379
+ type=str,
380
+ help="the port to use for the Redis shards other than the primary Redis shard",
381
+ )
382
+ @click.option(
383
+ "--object-manager-port",
384
+ required=False,
385
+ type=int,
386
+ help="the port to use for starting the object manager",
387
+ )
388
+ @click.option(
389
+ "--node-manager-port",
390
+ required=False,
391
+ type=int,
392
+ default=0,
393
+ help="the port to use for starting the node manager",
394
+ )
395
+ @click.option(
396
+ "--gcs-server-port",
397
+ required=False,
398
+ type=int,
399
+ help="Port number for the GCS server.",
400
+ )
401
+ @click.option(
402
+ "--min-worker-port",
403
+ required=False,
404
+ type=int,
405
+ default=10002,
406
+ help="the lowest port number that workers will bind on. If not set, "
407
+ "random ports will be chosen.",
408
+ )
409
+ @click.option(
410
+ "--max-worker-port",
411
+ required=False,
412
+ type=int,
413
+ default=19999,
414
+ help="the highest port number that workers will bind on. If set, "
415
+ "'--min-worker-port' must also be set.",
416
+ )
417
+ @click.option(
418
+ "--worker-port-list",
419
+ required=False,
420
+ help="a comma-separated list of open ports for workers to bind on. "
421
+ "Overrides '--min-worker-port' and '--max-worker-port'.",
422
+ )
423
+ @click.option(
424
+ "--ray-client-server-port",
425
+ required=False,
426
+ type=int,
427
+ default=None,
428
+ help="the port number the ray client server binds on, default to 10001, "
429
+ "or None if ray[client] is not installed.",
430
+ )
431
+ @click.option(
432
+ "--memory",
433
+ required=False,
434
+ hidden=True,
435
+ type=int,
436
+ help="The amount of memory (in bytes) to make available to workers. "
437
+ "By default, this is set to the available memory on the node.",
438
+ )
439
+ @click.option(
440
+ "--object-store-memory",
441
+ required=False,
442
+ type=int,
443
+ help="The amount of memory (in bytes) to start the object store with. "
444
+ "By default, this is 30% (ray_constants.DEFAULT_OBJECT_STORE_MEMORY_PROPORTION) "
445
+ "of available system memory capped by "
446
+ "the shm size and 200G (ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES) "
447
+ "but can be set higher.",
448
+ )
449
+ @click.option(
450
+ "--redis-max-memory",
451
+ required=False,
452
+ hidden=True,
453
+ type=int,
454
+ help="The max amount of memory (in bytes) to allow redis to use. Once the "
455
+ "limit is exceeded, redis will start LRU eviction of entries. This only "
456
+ "applies to the sharded redis tables (task, object, and profile tables). "
457
+ "By default this is capped at 10GB but can be set higher.",
458
+ )
459
+ @click.option(
460
+ "--num-cpus", required=False, type=int, help="the number of CPUs on this node"
461
+ )
462
+ @click.option(
463
+ "--num-gpus", required=False, type=int, help="the number of GPUs on this node"
464
+ )
465
+ @click.option(
466
+ "--resources",
467
+ required=False,
468
+ default="{}",
469
+ type=str,
470
+ help="A JSON serialized dictionary mapping resource name to resource quantity."
471
+ + (
472
+ r"""
473
+
474
+ Windows command prompt users must ensure to double quote command line arguments. Because
475
+ JSON requires the use of double quotes you must escape these arguments as well, for
476
+ example:
477
+
478
+ ray start --head --resources="{\"special_hardware\":1, \"custom_label\":1}"
479
+
480
+ Windows powershell users need additional escaping:
481
+
482
+ ray start --head --resources="{\""special_hardware\"":1, \""custom_label\"":1}"
483
+ """
484
+ if platform.system() == "Windows"
485
+ else ""
486
+ ),
487
+ )
488
+ @click.option(
489
+ "--head",
490
+ is_flag=True,
491
+ default=False,
492
+ help="provide this argument for the head node",
493
+ )
494
+ @click.option(
495
+ "--include-dashboard",
496
+ default=None,
497
+ type=bool,
498
+ help="provide this argument to start the Ray dashboard GUI",
499
+ )
500
+ @click.option(
501
+ "--dashboard-host",
502
+ required=False,
503
+ default=ray_constants.DEFAULT_DASHBOARD_IP,
504
+ help="the host to bind the dashboard server to, either localhost "
505
+ "(127.0.0.1) or 0.0.0.0 (available from all interfaces). By default, this "
506
+ "is 127.0.0.1",
507
+ )
508
+ @click.option(
509
+ "--dashboard-port",
510
+ required=False,
511
+ type=int,
512
+ default=ray_constants.DEFAULT_DASHBOARD_PORT,
513
+ help="the port to bind the dashboard server to--defaults to {}".format(
514
+ ray_constants.DEFAULT_DASHBOARD_PORT
515
+ ),
516
+ )
517
+ @click.option(
518
+ "--dashboard-agent-listen-port",
519
+ type=int,
520
+ default=ray_constants.DEFAULT_DASHBOARD_AGENT_LISTEN_PORT,
521
+ help="the port for dashboard agents to listen for http on.",
522
+ )
523
+ @click.option(
524
+ "--dashboard-agent-grpc-port",
525
+ type=int,
526
+ default=None,
527
+ help="the port for dashboard agents to listen for grpc on.",
528
+ )
529
+ @click.option(
530
+ "--dashboard-grpc-port",
531
+ type=int,
532
+ default=None,
533
+ help="The port for the dashboard head to listen for grpc on.",
534
+ )
535
+ @click.option(
536
+ "--runtime-env-agent-port",
537
+ type=int,
538
+ default=None,
539
+ help="The port for the runtime enviroment agents to listen for http on.",
540
+ )
541
+ @click.option(
542
+ "--block",
543
+ is_flag=True,
544
+ default=False,
545
+ help="provide this argument to block forever in this command",
546
+ )
547
+ @click.option(
548
+ "--plasma-directory",
549
+ required=False,
550
+ type=str,
551
+ help="object store directory for memory mapped files",
552
+ )
553
+ @click.option(
554
+ "--autoscaling-config",
555
+ required=False,
556
+ type=str,
557
+ help="the file that contains the autoscaling config",
558
+ )
559
+ @click.option(
560
+ "--no-redirect-output",
561
+ is_flag=True,
562
+ default=False,
563
+ help="do not redirect non-worker stdout and stderr to files",
564
+ )
565
+ @click.option(
566
+ "--plasma-store-socket-name",
567
+ default=None,
568
+ help="manually specify the socket name of the plasma store",
569
+ )
570
+ @click.option(
571
+ "--raylet-socket-name",
572
+ default=None,
573
+ help="manually specify the socket path of the raylet process",
574
+ )
575
+ @click.option(
576
+ "--temp-dir",
577
+ default=None,
578
+ help="manually specify the root temporary dir of the Ray process, only "
579
+ "works when --head is specified",
580
+ )
581
+ @click.option(
582
+ "--storage",
583
+ default=None,
584
+ help="the persistent storage URI for the cluster. Experimental.",
585
+ )
586
+ @click.option(
587
+ "--system-config",
588
+ default=None,
589
+ hidden=True,
590
+ type=json.loads,
591
+ help="Override system configuration defaults.",
592
+ )
593
+ @click.option(
594
+ "--enable-object-reconstruction",
595
+ is_flag=True,
596
+ default=False,
597
+ hidden=True,
598
+ help="Specify whether object reconstruction will be used for this cluster.",
599
+ )
600
+ @click.option(
601
+ "--metrics-export-port",
602
+ type=int,
603
+ default=None,
604
+ help="the port to use to expose Ray metrics through a Prometheus endpoint.",
605
+ )
606
+ @click.option(
607
+ "--no-monitor",
608
+ is_flag=True,
609
+ hidden=True,
610
+ default=False,
611
+ help="If True, the ray autoscaler monitor for this cluster will not be started.",
612
+ )
613
+ @click.option(
614
+ "--tracing-startup-hook",
615
+ type=str,
616
+ hidden=True,
617
+ default=None,
618
+ help="The function that sets up tracing with a tracing provider, remote "
619
+ "span processor, and additional instruments. See docs.ray.io/tracing.html "
620
+ "for more info.",
621
+ )
622
+ @click.option(
623
+ "--ray-debugger-external",
624
+ is_flag=True,
625
+ default=False,
626
+ help="Make the Ray debugger available externally to the node. This is only "
627
+ "safe to activate if the node is behind a firewall.",
628
+ )
629
+ @click.option(
630
+ "--disable-usage-stats",
631
+ is_flag=True,
632
+ default=False,
633
+ help="If True, the usage stats collection will be disabled.",
634
+ )
635
+ @click.option(
636
+ "--labels",
637
+ required=False,
638
+ hidden=True,
639
+ default="{}",
640
+ type=str,
641
+ help="a JSON serialized dictionary mapping label name to label value.",
642
+ )
643
+ @click.option(
644
+ "--include-log-monitor",
645
+ default=None,
646
+ type=bool,
647
+ help="If set to True or left unset, a log monitor will start monitoring "
648
+ "the log files of all processes on this node and push their contents to GCS. "
649
+ "Only one log monitor should be started per physical host to avoid log "
650
+ "duplication on the driver process.",
651
+ )
652
+ @add_click_logging_options
653
+ @PublicAPI
654
+ def start(
655
+ node_ip_address,
656
+ address,
657
+ port,
658
+ node_name,
659
+ redis_username,
660
+ redis_password,
661
+ redis_shard_ports,
662
+ object_manager_port,
663
+ node_manager_port,
664
+ gcs_server_port,
665
+ min_worker_port,
666
+ max_worker_port,
667
+ worker_port_list,
668
+ ray_client_server_port,
669
+ memory,
670
+ object_store_memory,
671
+ redis_max_memory,
672
+ num_cpus,
673
+ num_gpus,
674
+ resources,
675
+ head,
676
+ include_dashboard,
677
+ dashboard_host,
678
+ dashboard_port,
679
+ dashboard_agent_listen_port,
680
+ dashboard_agent_grpc_port,
681
+ dashboard_grpc_port,
682
+ runtime_env_agent_port,
683
+ block,
684
+ plasma_directory,
685
+ autoscaling_config,
686
+ no_redirect_output,
687
+ plasma_store_socket_name,
688
+ raylet_socket_name,
689
+ temp_dir,
690
+ storage,
691
+ system_config,
692
+ enable_object_reconstruction,
693
+ metrics_export_port,
694
+ no_monitor,
695
+ tracing_startup_hook,
696
+ ray_debugger_external,
697
+ disable_usage_stats,
698
+ labels,
699
+ include_log_monitor,
700
+ ):
701
+ """Start Ray processes manually on the local machine."""
702
+ # TODO(hjiang): Expose physical mode interface to ray cluster start command after
703
+ # all features implemented.
704
+
705
+ if gcs_server_port is not None:
706
+ cli_logger.error(
707
+ "`{}` is deprecated and ignored. Use {} to specify "
708
+ "GCS server port on head node.",
709
+ cf.bold("--gcs-server-port"),
710
+ cf.bold("--port"),
711
+ )
712
+ # Whether the original arguments include node_ip_address.
713
+ include_node_ip_address = False
714
+ if node_ip_address is not None:
715
+ include_node_ip_address = True
716
+ node_ip_address = services.resolve_ip_for_localhost(node_ip_address)
717
+
718
+ resources = parse_resources_json(resources, cli_logger, cf)
719
+ labels_dict = parse_node_labels_json(labels, cli_logger, cf)
720
+
721
+ if plasma_store_socket_name is not None:
722
+ warnings.warn(
723
+ "plasma_store_socket_name is deprecated and will be removed. You are not "
724
+ "supposed to specify this parameter as it's internal.",
725
+ DeprecationWarning,
726
+ stacklevel=2,
727
+ )
728
+ if raylet_socket_name is not None:
729
+ warnings.warn(
730
+ "raylet_socket_name is deprecated and will be removed. You are not "
731
+ "supposed to specify this parameter as it's internal.",
732
+ DeprecationWarning,
733
+ stacklevel=2,
734
+ )
735
+ if temp_dir and not head:
736
+ cli_logger.warning(
737
+ f"`--temp-dir={temp_dir}` option will be ignored. "
738
+ "`--head` is a required flag to use `--temp-dir`. "
739
+ "temp_dir is only configurable from a head node. "
740
+ "All the worker nodes will use the same temp_dir as a head node. "
741
+ )
742
+ temp_dir = None
743
+
744
+ redirect_output = None if not no_redirect_output else True
745
+
746
+ # no client, no port -> ok
747
+ # no port, has client -> default to 10001
748
+ # has port, no client -> value error
749
+ # has port, has client -> ok, check port validity
750
+ has_ray_client = check_ray_client_dependencies_installed()
751
+ if has_ray_client and ray_client_server_port is None:
752
+ ray_client_server_port = 10001
753
+
754
+ ray_params = ray._private.parameter.RayParams(
755
+ node_ip_address=node_ip_address,
756
+ node_name=node_name if node_name else node_ip_address,
757
+ min_worker_port=min_worker_port,
758
+ max_worker_port=max_worker_port,
759
+ worker_port_list=worker_port_list,
760
+ ray_client_server_port=ray_client_server_port,
761
+ object_manager_port=object_manager_port,
762
+ node_manager_port=node_manager_port,
763
+ memory=memory,
764
+ object_store_memory=object_store_memory,
765
+ redis_username=redis_username,
766
+ redis_password=redis_password,
767
+ redirect_output=redirect_output,
768
+ num_cpus=num_cpus,
769
+ num_gpus=num_gpus,
770
+ resources=resources,
771
+ labels=labels_dict,
772
+ autoscaling_config=autoscaling_config,
773
+ plasma_directory=plasma_directory,
774
+ huge_pages=False,
775
+ plasma_store_socket_name=plasma_store_socket_name,
776
+ raylet_socket_name=raylet_socket_name,
777
+ temp_dir=temp_dir,
778
+ storage=storage,
779
+ include_dashboard=include_dashboard,
780
+ dashboard_host=dashboard_host,
781
+ dashboard_port=dashboard_port,
782
+ dashboard_agent_listen_port=dashboard_agent_listen_port,
783
+ metrics_agent_port=dashboard_agent_grpc_port,
784
+ dashboard_grpc_port=dashboard_grpc_port,
785
+ runtime_env_agent_port=runtime_env_agent_port,
786
+ _system_config=system_config,
787
+ enable_object_reconstruction=enable_object_reconstruction,
788
+ metrics_export_port=metrics_export_port,
789
+ no_monitor=no_monitor,
790
+ tracing_startup_hook=tracing_startup_hook,
791
+ ray_debugger_external=ray_debugger_external,
792
+ enable_physical_mode=False,
793
+ include_log_monitor=include_log_monitor,
794
+ )
795
+
796
+ if ray_constants.RAY_START_HOOK in os.environ:
797
+ _load_class(os.environ[ray_constants.RAY_START_HOOK])(ray_params, head)
798
+
799
+ if head:
800
+ # Start head node.
801
+
802
+ if disable_usage_stats:
803
+ usage_lib.set_usage_stats_enabled_via_env_var(False)
804
+ usage_lib.show_usage_stats_prompt(cli=True)
805
+ cli_logger.newline()
806
+
807
+ if port is None:
808
+ port = ray_constants.DEFAULT_PORT
809
+
810
+ # Set bootstrap port.
811
+ assert ray_params.redis_port is None
812
+ assert ray_params.gcs_server_port is None
813
+ ray_params.gcs_server_port = port
814
+
815
+ if os.environ.get("RAY_FAKE_CLUSTER"):
816
+ ray_params.env_vars = {
817
+ "RAY_OVERRIDE_NODE_ID_FOR_TESTING": FAKE_HEAD_NODE_ID
818
+ }
819
+
820
+ num_redis_shards = None
821
+ # Start Ray on the head node.
822
+ if redis_shard_ports is not None and address is None:
823
+ redis_shard_ports = redis_shard_ports.split(",")
824
+ # Infer the number of Redis shards from the ports if the number is
825
+ # not provided.
826
+ num_redis_shards = len(redis_shard_ports)
827
+
828
+ # This logic is deprecated and will be removed later.
829
+ if address is not None:
830
+ cli_logger.warning(
831
+ "Specifying {} for external Redis address is deprecated. "
832
+ "Please specify environment variable {}={} instead.",
833
+ cf.bold("--address"),
834
+ cf.bold("RAY_REDIS_ADDRESS"),
835
+ address,
836
+ )
837
+ external_addresses = address.split(",")
838
+
839
+ # We reuse primary redis as sharding when there's only one
840
+ # instance provided.
841
+ if len(external_addresses) == 1:
842
+ external_addresses.append(external_addresses[0])
843
+
844
+ ray_params.update_if_absent(external_addresses=external_addresses)
845
+ num_redis_shards = len(external_addresses) - 1
846
+ if redis_username == ray_constants.REDIS_DEFAULT_USERNAME:
847
+ cli_logger.warning(
848
+ "`{}` should not be specified as empty string if "
849
+ "external Redis server(s) `{}` points to requires "
850
+ "username.",
851
+ cf.bold("--redis-username"),
852
+ cf.bold("--address"),
853
+ )
854
+ if redis_password == ray_constants.REDIS_DEFAULT_PASSWORD:
855
+ cli_logger.warning(
856
+ "`{}` should not be specified as empty string if "
857
+ "external redis server(s) `{}` points to requires "
858
+ "password.",
859
+ cf.bold("--redis-password"),
860
+ cf.bold("--address"),
861
+ )
862
+
863
+ # Get the node IP address if one is not provided.
864
+ ray_params.update_if_absent(node_ip_address=services.get_node_ip_address())
865
+ cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
866
+
867
+ # Initialize Redis settings.
868
+ ray_params.update_if_absent(
869
+ redis_shard_ports=redis_shard_ports,
870
+ redis_max_memory=redis_max_memory,
871
+ num_redis_shards=num_redis_shards,
872
+ redis_max_clients=None,
873
+ )
874
+
875
+ # Fail early when starting a new cluster when one is already running
876
+ if address is None:
877
+ default_address = f"{ray_params.node_ip_address}:{port}"
878
+ bootstrap_address = services.find_bootstrap_address(temp_dir)
879
+ if (
880
+ default_address == bootstrap_address
881
+ and bootstrap_address in services.find_gcs_addresses()
882
+ ):
883
+ # The default address is already in use by a local running GCS
884
+ # instance.
885
+ raise ConnectionError(
886
+ f"Ray is trying to start at {default_address}, "
887
+ f"but is already running at {bootstrap_address}. "
888
+ "Please specify a different port using the `--port`"
889
+ " flag of `ray start` command."
890
+ )
891
+
892
+ node = ray._private.node.Node(
893
+ ray_params, head=True, shutdown_at_exit=block, spawn_reaper=block
894
+ )
895
+
896
+ bootstrap_address = node.address
897
+
898
+ # this is a noop if new-style is not set, so the old logger calls
899
+ # are still in place
900
+ cli_logger.newline()
901
+ startup_msg = "Ray runtime started."
902
+ cli_logger.success("-" * len(startup_msg))
903
+ cli_logger.success(startup_msg)
904
+ cli_logger.success("-" * len(startup_msg))
905
+ cli_logger.newline()
906
+ with cli_logger.group("Next steps"):
907
+ dashboard_url = node.address_info["webui_url"]
908
+ if ray_constants.ENABLE_RAY_CLUSTER:
909
+ cli_logger.print("To add another node to this Ray cluster, run")
910
+ # NOTE(kfstorm): Java driver rely on this line to get the address
911
+ # of the cluster. Please be careful when updating this line.
912
+ cli_logger.print(
913
+ cf.bold(" {} ray start --address='{}'"),
914
+ f" {ray_constants.ENABLE_RAY_CLUSTERS_ENV_VAR}=1"
915
+ if ray_constants.IS_WINDOWS_OR_OSX
916
+ else "",
917
+ bootstrap_address,
918
+ )
919
+
920
+ cli_logger.newline()
921
+ cli_logger.print("To connect to this Ray cluster:")
922
+ with cli_logger.indented():
923
+ cli_logger.print("{} ray", cf.magenta("import"))
924
+ cli_logger.print(
925
+ "ray{}init({})",
926
+ cf.magenta("."),
927
+ "_node_ip_address{}{}".format(
928
+ cf.magenta("="), cf.yellow("'" + node_ip_address + "'")
929
+ )
930
+ if include_node_ip_address
931
+ else "",
932
+ )
933
+
934
+ if dashboard_url:
935
+ cli_logger.newline()
936
+ cli_logger.print("To submit a Ray job using the Ray Jobs CLI:")
937
+ cli_logger.print(
938
+ cf.bold(
939
+ " RAY_ADDRESS='http://{}' ray job submit "
940
+ "--working-dir . "
941
+ "-- python my_script.py"
942
+ ),
943
+ dashboard_url,
944
+ )
945
+ cli_logger.newline()
946
+ cli_logger.print(
947
+ "See https://docs.ray.io/en/latest/cluster/running-applications"
948
+ "/job-submission/index.html "
949
+ )
950
+ cli_logger.print(
951
+ "for more information on submitting Ray jobs to the Ray cluster."
952
+ )
953
+
954
+ cli_logger.newline()
955
+ cli_logger.print("To terminate the Ray runtime, run")
956
+ cli_logger.print(cf.bold(" ray stop"))
957
+
958
+ cli_logger.newline()
959
+ cli_logger.print("To view the status of the cluster, use")
960
+ cli_logger.print(" {}".format(cf.bold("ray status")))
961
+
962
+ if dashboard_url:
963
+ cli_logger.newline()
964
+ cli_logger.print("To monitor and debug Ray, view the dashboard at ")
965
+ cli_logger.print(
966
+ " {}".format(
967
+ cf.bold(dashboard_url),
968
+ )
969
+ )
970
+
971
+ cli_logger.newline()
972
+ cli_logger.print(
973
+ cf.underlined(
974
+ "If connection to the dashboard fails, check your "
975
+ "firewall settings and "
976
+ "network configuration."
977
+ )
978
+ )
979
+ ray_params.gcs_address = bootstrap_address
980
+ else:
981
+ # Start worker node.
982
+ if not ray_constants.ENABLE_RAY_CLUSTER:
983
+ cli_logger.abort(
984
+ "Multi-node Ray clusters are not supported on Windows and OSX. "
985
+ "Restart the Ray cluster with the environment variable `{}=1` "
986
+ "to proceed anyway.",
987
+ cf.bold(ray_constants.ENABLE_RAY_CLUSTERS_ENV_VAR),
988
+ )
989
+ raise Exception(
990
+ "Multi-node Ray clusters are not supported on Windows and OSX. "
991
+ "Restart the Ray cluster with the environment variable "
992
+ f"`{ray_constants.ENABLE_RAY_CLUSTERS_ENV_VAR}=1` to proceed "
993
+ "anyway.",
994
+ )
995
+
996
+ # Ensure `--address` flag is specified.
997
+ if address is None:
998
+ cli_logger.abort(
999
+ "`{}` is a required flag unless starting a head node with `{}`.",
1000
+ cf.bold("--address"),
1001
+ cf.bold("--head"),
1002
+ )
1003
+ raise Exception(
1004
+ "`--address` is a required flag unless starting a "
1005
+ "head node with `--head`."
1006
+ )
1007
+
1008
+ # Raise error if any head-only flag are specified.
1009
+ head_only_flags = {
1010
+ "--port": port,
1011
+ "--redis-shard-ports": redis_shard_ports,
1012
+ "--include-dashboard": include_dashboard,
1013
+ }
1014
+ for flag, val in head_only_flags.items():
1015
+ if val is None:
1016
+ continue
1017
+ cli_logger.abort(
1018
+ "`{}` should only be specified when starting head node with `{}`.",
1019
+ cf.bold(flag),
1020
+ cf.bold("--head"),
1021
+ )
1022
+ raise ValueError(
1023
+ f"{flag} should only be specified when starting head node "
1024
+ "with `--head`."
1025
+ )
1026
+
1027
+ # Start Ray on a non-head node.
1028
+ bootstrap_address = services.canonicalize_bootstrap_address(
1029
+ address, temp_dir=temp_dir
1030
+ )
1031
+
1032
+ if bootstrap_address is None:
1033
+ cli_logger.abort(
1034
+ "Cannot canonicalize address `{}={}`.",
1035
+ cf.bold("--address"),
1036
+ cf.bold(address),
1037
+ )
1038
+ raise Exception("Cannot canonicalize address " f"`--address={address}`.")
1039
+
1040
+ ray_params.gcs_address = bootstrap_address
1041
+
1042
+ # Get the node IP address if one is not provided.
1043
+ ray_params.update_if_absent(
1044
+ node_ip_address=services.get_node_ip_address(bootstrap_address)
1045
+ )
1046
+
1047
+ cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
1048
+
1049
+ node = ray._private.node.Node(
1050
+ ray_params, head=False, shutdown_at_exit=block, spawn_reaper=block
1051
+ )
1052
+ temp_dir = node.get_temp_dir_path()
1053
+
1054
+ # TODO(hjiang): Validate whether specified resource is true for physical
1055
+ # resource.
1056
+
1057
+ # Ray and Python versions should probably be checked before
1058
+ # initializing Node.
1059
+ node.check_version_info()
1060
+
1061
+ cli_logger.newline()
1062
+ startup_msg = "Ray runtime started."
1063
+ cli_logger.success("-" * len(startup_msg))
1064
+ cli_logger.success(startup_msg)
1065
+ cli_logger.success("-" * len(startup_msg))
1066
+ cli_logger.newline()
1067
+ cli_logger.print("To terminate the Ray runtime, run")
1068
+ cli_logger.print(cf.bold(" ray stop"))
1069
+ cli_logger.flush()
1070
+
1071
+ assert ray_params.gcs_address is not None
1072
+ ray._private.utils.write_ray_address(ray_params.gcs_address, temp_dir)
1073
+
1074
+ if block:
1075
+ cli_logger.newline()
1076
+ with cli_logger.group(cf.bold("--block")):
1077
+ cli_logger.print(
1078
+ "This command will now block forever until terminated by a signal."
1079
+ )
1080
+ cli_logger.print(
1081
+ "Running subprocesses are monitored and a message will be "
1082
+ "printed if any of them terminate unexpectedly. Subprocesses "
1083
+ "exit with SIGTERM will be treated as graceful, thus NOT reported."
1084
+ )
1085
+ cli_logger.flush()
1086
+
1087
+ while True:
1088
+ time.sleep(1)
1089
+ deceased = node.dead_processes()
1090
+
1091
+ # Report unexpected exits of subprocesses with unexpected return codes.
1092
+ # We are explicitly expecting SIGTERM because this is how `ray stop` sends
1093
+ # shutdown signal to subprocesses, i.e. log_monitor, raylet...
1094
+ # NOTE(rickyyx): We are treating 128+15 as an expected return code since
1095
+ # this is what autoscaler/_private/monitor.py does upon SIGTERM
1096
+ # handling.
1097
+ expected_return_codes = [
1098
+ 0,
1099
+ signal.SIGTERM,
1100
+ -1 * signal.SIGTERM,
1101
+ 128 + signal.SIGTERM,
1102
+ ]
1103
+ unexpected_deceased = [
1104
+ (process_type, process)
1105
+ for process_type, process in deceased
1106
+ if process.returncode not in expected_return_codes
1107
+ ]
1108
+ if len(unexpected_deceased) > 0:
1109
+ cli_logger.newline()
1110
+ cli_logger.error("Some Ray subprocesses exited unexpectedly:")
1111
+
1112
+ with cli_logger.indented():
1113
+ for process_type, process in unexpected_deceased:
1114
+ cli_logger.error(
1115
+ "{}",
1116
+ cf.bold(str(process_type)),
1117
+ _tags={"exit code": str(process.returncode)},
1118
+ )
1119
+
1120
+ cli_logger.newline()
1121
+ cli_logger.error("Remaining processes will be killed.")
1122
+ # explicitly kill all processes since atexit handlers
1123
+ # will not exit with errors.
1124
+ node.kill_all_processes(check_alive=False, allow_graceful=False)
1125
+ os._exit(1)
1126
+ # not-reachable
1127
+
1128
+
1129
+ @cli.command()
1130
+ @click.option(
1131
+ "-f",
1132
+ "--force",
1133
+ is_flag=True,
1134
+ help="If set, ray will send SIGKILL instead of SIGTERM.",
1135
+ )
1136
+ @click.option(
1137
+ "-g",
1138
+ "--grace-period",
1139
+ default=16,
1140
+ help=(
1141
+ "The time in seconds ray waits for processes to be properly terminated. "
1142
+ "If processes are not terminated within the grace period, "
1143
+ "they are forcefully terminated after the grace period. "
1144
+ ),
1145
+ )
1146
+ @add_click_logging_options
1147
+ @PublicAPI
1148
+ def stop(force: bool, grace_period: int):
1149
+ """Stop Ray processes manually on the local machine."""
1150
+ is_linux = sys.platform.startswith("linux")
1151
+ total_procs_found = 0
1152
+ total_procs_stopped = 0
1153
+ procs_not_gracefully_killed = []
1154
+
1155
+ def kill_procs(
1156
+ force: bool, grace_period: int, processes_to_kill: List[str]
1157
+ ) -> Tuple[int, int, List[psutil.Process]]:
1158
+ """Find all processes from `processes_to_kill` and terminate them.
1159
+
1160
+ Unless `force` is specified, it gracefully kills processes. If
1161
+ processes are not cleaned within `grace_period`, it force kill all
1162
+ remaining processes.
1163
+
1164
+ Returns:
1165
+ total_procs_found: Total number of processes found from
1166
+ `processes_to_kill` is added.
1167
+ total_procs_stopped: Total number of processes gracefully
1168
+ stopped from `processes_to_kill` is added.
1169
+ procs_not_gracefully_killed: If processes are not killed
1170
+ gracefully, they are added here.
1171
+ """
1172
+ process_infos = []
1173
+ for proc in psutil.process_iter(["name", "cmdline"]):
1174
+ try:
1175
+ process_infos.append((proc, proc.name(), proc.cmdline()))
1176
+ except psutil.Error:
1177
+ pass
1178
+
1179
+ stopped = []
1180
+ for keyword, filter_by_cmd in processes_to_kill:
1181
+ if filter_by_cmd and is_linux and len(keyword) > 15:
1182
+ # getting here is an internal bug, so we do not use cli_logger
1183
+ msg = (
1184
+ "The filter string should not be more than {} "
1185
+ "characters. Actual length: {}. Filter: {}"
1186
+ ).format(15, len(keyword), keyword)
1187
+ raise ValueError(msg)
1188
+
1189
+ found = []
1190
+ for candidate in process_infos:
1191
+ proc, proc_cmd, proc_args = candidate
1192
+ corpus = (
1193
+ proc_cmd if filter_by_cmd else subprocess.list2cmdline(proc_args)
1194
+ )
1195
+ if keyword in corpus:
1196
+ found.append(candidate)
1197
+ for proc, proc_cmd, proc_args in found:
1198
+ proc_string = str(subprocess.list2cmdline(proc_args))
1199
+ try:
1200
+ if force:
1201
+ proc.kill()
1202
+ else:
1203
+ # TODO(mehrdadn): On Windows, this is forceful termination.
1204
+ # We don't want CTRL_BREAK_EVENT, because that would
1205
+ # terminate the entire process group. What to do?
1206
+ proc.terminate()
1207
+
1208
+ if force:
1209
+ cli_logger.verbose(
1210
+ "Killed `{}` {} ",
1211
+ cf.bold(proc_string),
1212
+ cf.dimmed("(via SIGKILL)"),
1213
+ )
1214
+ else:
1215
+ cli_logger.verbose(
1216
+ "Send termination request to `{}` {}",
1217
+ cf.bold(proc_string),
1218
+ cf.dimmed("(via SIGTERM)"),
1219
+ )
1220
+
1221
+ stopped.append(proc)
1222
+ except psutil.NoSuchProcess:
1223
+ cli_logger.verbose(
1224
+ "Attempted to stop `{}`, but process was already dead.",
1225
+ cf.bold(proc_string),
1226
+ )
1227
+ except (psutil.Error, OSError) as ex:
1228
+ cli_logger.error(
1229
+ "Could not terminate `{}` due to {}",
1230
+ cf.bold(proc_string),
1231
+ str(ex),
1232
+ )
1233
+
1234
+ # Wait for the processes to actually stop.
1235
+ # Dedup processes.
1236
+ stopped, alive = psutil.wait_procs(stopped, timeout=0)
1237
+ procs_to_kill = stopped + alive
1238
+ total_found = len(procs_to_kill)
1239
+
1240
+ # Wait for grace period to terminate processes.
1241
+ gone_procs = set()
1242
+
1243
+ def on_terminate(proc):
1244
+ gone_procs.add(proc)
1245
+ cli_logger.print(f"{len(gone_procs)}/{total_found} stopped.", end="\r")
1246
+
1247
+ stopped, alive = psutil.wait_procs(
1248
+ procs_to_kill, timeout=grace_period, callback=on_terminate
1249
+ )
1250
+ total_stopped = len(stopped)
1251
+
1252
+ # For processes that are not killed within the grace period,
1253
+ # we send force termination signals.
1254
+ for proc in alive:
1255
+ proc.kill()
1256
+ # Wait a little bit to make sure processes are killed forcefully.
1257
+ psutil.wait_procs(alive, timeout=2)
1258
+ return total_found, total_stopped, alive
1259
+
1260
+ # Process killing procedure: we put processes into 3 buckets.
1261
+ # Bucket 1: raylet
1262
+ # Bucket 2: all other processes, e.g. dashboard, runtime env agents
1263
+ # Bucket 3: gcs_server.
1264
+ #
1265
+ # For each bucket, we send sigterm to all processes, then wait for 30s, then if
1266
+ # they are still alive, send sigkill.
1267
+ processes_to_kill = RAY_PROCESSES
1268
+ # Raylet should exit before all other processes exit.
1269
+ # Otherwise, fate-sharing agents will complain and exit.
1270
+ assert processes_to_kill[0][0] == "raylet"
1271
+
1272
+ # GCS should exit after all other processes exit.
1273
+ # Otherwise, some of processes may exit with an unexpected
1274
+ # exit code which breaks ray start --block.
1275
+ assert processes_to_kill[-1][0] == "gcs_server"
1276
+
1277
+ buckets = [[processes_to_kill[0]], processes_to_kill[1:-1], [processes_to_kill[-1]]]
1278
+
1279
+ for bucket in buckets:
1280
+ found, stopped, alive = kill_procs(force, grace_period / len(buckets), bucket)
1281
+ total_procs_found += found
1282
+ total_procs_stopped += stopped
1283
+ procs_not_gracefully_killed.extend(alive)
1284
+
1285
+ # Print the termination result.
1286
+ if total_procs_found == 0:
1287
+ cli_logger.print("Did not find any active Ray processes.")
1288
+ else:
1289
+ if total_procs_stopped == total_procs_found:
1290
+ cli_logger.success("Stopped all {} Ray processes.", total_procs_stopped)
1291
+ else:
1292
+ cli_logger.warning(
1293
+ f"Stopped only {total_procs_stopped} out of {total_procs_found} "
1294
+ f"Ray processes within the grace period {grace_period} seconds. "
1295
+ f"Set `{cf.bold('-v')}` to see more details. "
1296
+ f"Remaining processes {procs_not_gracefully_killed} "
1297
+ "will be forcefully terminated.",
1298
+ )
1299
+ cli_logger.warning(
1300
+ f"You can also use `{cf.bold('--force')}` to forcefully terminate "
1301
+ "processes or set higher `--grace-period` to wait longer time for "
1302
+ "proper termination."
1303
+ )
1304
+
1305
+ # NOTE(swang): This will not reset the cluster address for a user-defined
1306
+ # temp_dir. This is fine since it will get overwritten the next time we
1307
+ # call `ray start`.
1308
+ ray._private.utils.reset_ray_address()
1309
+
1310
+
1311
+ @cli.command()
1312
+ @click.argument("cluster_config_file", required=True, type=str)
1313
+ @click.option(
1314
+ "--min-workers",
1315
+ required=False,
1316
+ type=int,
1317
+ help="Override the configured min worker node count for the cluster.",
1318
+ )
1319
+ @click.option(
1320
+ "--max-workers",
1321
+ required=False,
1322
+ type=int,
1323
+ help="Override the configured max worker node count for the cluster.",
1324
+ )
1325
+ @click.option(
1326
+ "--no-restart",
1327
+ is_flag=True,
1328
+ default=False,
1329
+ help=(
1330
+ "Whether to skip restarting Ray services during the update. "
1331
+ "This avoids interrupting running jobs."
1332
+ ),
1333
+ )
1334
+ @click.option(
1335
+ "--restart-only",
1336
+ is_flag=True,
1337
+ default=False,
1338
+ help=(
1339
+ "Whether to skip running setup commands and only restart Ray. "
1340
+ "This cannot be used with 'no-restart'."
1341
+ ),
1342
+ )
1343
+ @click.option(
1344
+ "--yes", "-y", is_flag=True, default=False, help="Don't ask for confirmation."
1345
+ )
1346
+ @click.option(
1347
+ "--cluster-name",
1348
+ "-n",
1349
+ required=False,
1350
+ type=str,
1351
+ help="Override the configured cluster name.",
1352
+ )
1353
+ @click.option(
1354
+ "--no-config-cache",
1355
+ is_flag=True,
1356
+ default=False,
1357
+ help="Disable the local cluster config cache.",
1358
+ )
1359
+ @click.option(
1360
+ "--redirect-command-output",
1361
+ is_flag=True,
1362
+ default=False,
1363
+ help="Whether to redirect command output to a file.",
1364
+ )
1365
+ @click.option(
1366
+ "--use-login-shells/--use-normal-shells",
1367
+ is_flag=True,
1368
+ default=True,
1369
+ help=(
1370
+ "Ray uses login shells (bash --login -i) to run cluster commands "
1371
+ "by default. If your workflow is compatible with normal shells, "
1372
+ "this can be disabled for a better user experience."
1373
+ ),
1374
+ )
1375
+ @click.option(
1376
+ "--disable-usage-stats",
1377
+ is_flag=True,
1378
+ default=False,
1379
+ help="If True, the usage stats collection will be disabled.",
1380
+ )
1381
+ @add_click_logging_options
1382
+ @PublicAPI
1383
+ def up(
1384
+ cluster_config_file,
1385
+ min_workers,
1386
+ max_workers,
1387
+ no_restart,
1388
+ restart_only,
1389
+ yes,
1390
+ cluster_name,
1391
+ no_config_cache,
1392
+ redirect_command_output,
1393
+ use_login_shells,
1394
+ disable_usage_stats,
1395
+ ):
1396
+ """Create or update a Ray cluster."""
1397
+ if disable_usage_stats:
1398
+ usage_lib.set_usage_stats_enabled_via_env_var(False)
1399
+
1400
+ if restart_only or no_restart:
1401
+ cli_logger.doassert(
1402
+ restart_only != no_restart,
1403
+ "`{}` is incompatible with `{}`.",
1404
+ cf.bold("--restart-only"),
1405
+ cf.bold("--no-restart"),
1406
+ )
1407
+ assert (
1408
+ restart_only != no_restart
1409
+ ), "Cannot set both 'restart_only' and 'no_restart' at the same time!"
1410
+
1411
+ if urllib.parse.urlparse(cluster_config_file).scheme in ("http", "https"):
1412
+ try:
1413
+ response = urllib.request.urlopen(cluster_config_file, timeout=5)
1414
+ content = response.read()
1415
+ file_name = cluster_config_file.split("/")[-1]
1416
+ with open(file_name, "wb") as f:
1417
+ f.write(content)
1418
+ cluster_config_file = file_name
1419
+ except urllib.error.HTTPError as e:
1420
+ cli_logger.warning("{}", str(e))
1421
+ cli_logger.warning("Could not download remote cluster configuration file.")
1422
+ create_or_update_cluster(
1423
+ config_file=cluster_config_file,
1424
+ override_min_workers=min_workers,
1425
+ override_max_workers=max_workers,
1426
+ no_restart=no_restart,
1427
+ restart_only=restart_only,
1428
+ yes=yes,
1429
+ override_cluster_name=cluster_name,
1430
+ no_config_cache=no_config_cache,
1431
+ redirect_command_output=redirect_command_output,
1432
+ use_login_shells=use_login_shells,
1433
+ )
1434
+
1435
+
1436
+ @cli.command()
1437
+ @click.argument("cluster_config_file", required=True, type=str)
1438
+ @click.option(
1439
+ "--yes", "-y", is_flag=True, default=False, help="Don't ask for confirmation."
1440
+ )
1441
+ @click.option(
1442
+ "--workers-only", is_flag=True, default=False, help="Only destroy the workers."
1443
+ )
1444
+ @click.option(
1445
+ "--cluster-name",
1446
+ "-n",
1447
+ required=False,
1448
+ type=str,
1449
+ help="Override the configured cluster name.",
1450
+ )
1451
+ @click.option(
1452
+ "--keep-min-workers",
1453
+ is_flag=True,
1454
+ default=False,
1455
+ help="Retain the minimal amount of workers specified in the config.",
1456
+ )
1457
+ @add_click_logging_options
1458
+ @PublicAPI
1459
+ def down(cluster_config_file, yes, workers_only, cluster_name, keep_min_workers):
1460
+ """Tear down a Ray cluster."""
1461
+ teardown_cluster(
1462
+ cluster_config_file, yes, workers_only, cluster_name, keep_min_workers
1463
+ )
1464
+
1465
+
1466
+ @cli.command(hidden=True)
1467
+ @click.argument("cluster_config_file", required=True, type=str)
1468
+ @click.option(
1469
+ "--yes", "-y", is_flag=True, default=False, help="Don't ask for confirmation."
1470
+ )
1471
+ @click.option(
1472
+ "--hard",
1473
+ is_flag=True,
1474
+ default=False,
1475
+ help="Terminates the node via node provider (defaults to a 'soft kill'"
1476
+ " which terminates Ray but does not actually delete the instances).",
1477
+ )
1478
+ @click.option(
1479
+ "--cluster-name",
1480
+ "-n",
1481
+ required=False,
1482
+ type=str,
1483
+ help="Override the configured cluster name.",
1484
+ )
1485
+ def kill_random_node(cluster_config_file, yes, hard, cluster_name):
1486
+ """Kills a random Ray node. For testing purposes only."""
1487
+ click.echo(
1488
+ "Killed node with IP " + kill_node(cluster_config_file, yes, hard, cluster_name)
1489
+ )
1490
+
1491
+
1492
+ @cli.command()
1493
+ @click.argument("cluster_config_file", required=True, type=str)
1494
+ @click.option(
1495
+ "--lines", required=False, default=100, type=int, help="Number of lines to tail."
1496
+ )
1497
+ @click.option(
1498
+ "--cluster-name",
1499
+ "-n",
1500
+ required=False,
1501
+ type=str,
1502
+ help="Override the configured cluster name.",
1503
+ )
1504
+ @add_click_logging_options
1505
+ def monitor(cluster_config_file, lines, cluster_name):
1506
+ """Tails the autoscaler logs of a Ray cluster."""
1507
+ monitor_cluster(cluster_config_file, lines, cluster_name)
1508
+
1509
+
1510
+ @cli.command()
1511
+ @click.argument("cluster_config_file", required=True, type=str)
1512
+ @click.option(
1513
+ "--start", is_flag=True, default=False, help="Start the cluster if needed."
1514
+ )
1515
+ @click.option(
1516
+ "--screen", is_flag=True, default=False, help="Run the command in screen."
1517
+ )
1518
+ @click.option("--tmux", is_flag=True, default=False, help="Run the command in tmux.")
1519
+ @click.option(
1520
+ "--cluster-name",
1521
+ "-n",
1522
+ required=False,
1523
+ type=str,
1524
+ help="Override the configured cluster name.",
1525
+ )
1526
+ @click.option(
1527
+ "--no-config-cache",
1528
+ is_flag=True,
1529
+ default=False,
1530
+ help="Disable the local cluster config cache.",
1531
+ )
1532
+ @click.option("--new", "-N", is_flag=True, help="Force creation of a new screen.")
1533
+ @click.option(
1534
+ "--port-forward",
1535
+ "-p",
1536
+ required=False,
1537
+ multiple=True,
1538
+ type=int,
1539
+ help="Port to forward. Use this multiple times to forward multiple ports.",
1540
+ )
1541
+ @add_click_logging_options
1542
+ @PublicAPI
1543
+ def attach(
1544
+ cluster_config_file,
1545
+ start,
1546
+ screen,
1547
+ tmux,
1548
+ cluster_name,
1549
+ no_config_cache,
1550
+ new,
1551
+ port_forward,
1552
+ ):
1553
+ """Create or attach to a SSH session to a Ray cluster."""
1554
+ port_forward = [(port, port) for port in list(port_forward)]
1555
+ attach_cluster(
1556
+ cluster_config_file,
1557
+ start,
1558
+ screen,
1559
+ tmux,
1560
+ cluster_name,
1561
+ no_config_cache=no_config_cache,
1562
+ new=new,
1563
+ port_forward=port_forward,
1564
+ )
1565
+
1566
+
1567
+ @cli.command()
1568
+ @click.argument("cluster_config_file", required=True, type=str)
1569
+ @click.argument("source", required=False, type=str)
1570
+ @click.argument("target", required=False, type=str)
1571
+ @click.option(
1572
+ "--cluster-name",
1573
+ "-n",
1574
+ required=False,
1575
+ type=str,
1576
+ help="Override the configured cluster name.",
1577
+ )
1578
+ @add_click_logging_options
1579
+ def rsync_down(cluster_config_file, source, target, cluster_name):
1580
+ """Download specific files from a Ray cluster."""
1581
+ rsync(cluster_config_file, source, target, cluster_name, down=True)
1582
+
1583
+
1584
+ @cli.command()
1585
+ @click.argument("cluster_config_file", required=True, type=str)
1586
+ @click.argument("source", required=False, type=str)
1587
+ @click.argument("target", required=False, type=str)
1588
+ @click.option(
1589
+ "--cluster-name",
1590
+ "-n",
1591
+ required=False,
1592
+ type=str,
1593
+ help="Override the configured cluster name.",
1594
+ )
1595
+ @click.option(
1596
+ "--all-nodes",
1597
+ "-A",
1598
+ is_flag=True,
1599
+ required=False,
1600
+ help="Upload to all nodes (workers and head).",
1601
+ )
1602
+ @add_click_logging_options
1603
+ def rsync_up(cluster_config_file, source, target, cluster_name, all_nodes):
1604
+ """Upload specific files to a Ray cluster."""
1605
+ if all_nodes:
1606
+ cli_logger.warning(
1607
+ "WARNING: the `all_nodes` option is deprecated and will be "
1608
+ "removed in the future. "
1609
+ "Rsync to worker nodes is not reliable since workers may be "
1610
+ "added during autoscaling. Please use the `file_mounts` "
1611
+ "feature instead for consistent file sync in autoscaling clusters"
1612
+ )
1613
+
1614
+ rsync(
1615
+ cluster_config_file,
1616
+ source,
1617
+ target,
1618
+ cluster_name,
1619
+ down=False,
1620
+ all_nodes=all_nodes,
1621
+ )
1622
+
1623
+
1624
+ @cli.command(context_settings={"ignore_unknown_options": True})
1625
+ @click.argument("cluster_config_file", required=True, type=str)
1626
+ @click.option(
1627
+ "--stop",
1628
+ is_flag=True,
1629
+ default=False,
1630
+ help="Stop the cluster after the command finishes running.",
1631
+ )
1632
+ @click.option(
1633
+ "--start", is_flag=True, default=False, help="Start the cluster if needed."
1634
+ )
1635
+ @click.option(
1636
+ "--screen", is_flag=True, default=False, help="Run the command in a screen."
1637
+ )
1638
+ @click.option("--tmux", is_flag=True, default=False, help="Run the command in tmux.")
1639
+ @click.option(
1640
+ "--cluster-name",
1641
+ "-n",
1642
+ required=False,
1643
+ type=str,
1644
+ help="Override the configured cluster name.",
1645
+ )
1646
+ @click.option(
1647
+ "--no-config-cache",
1648
+ is_flag=True,
1649
+ default=False,
1650
+ help="Disable the local cluster config cache.",
1651
+ )
1652
+ @click.option(
1653
+ "--port-forward",
1654
+ "-p",
1655
+ required=False,
1656
+ multiple=True,
1657
+ type=int,
1658
+ help="Port to forward. Use this multiple times to forward multiple ports.",
1659
+ )
1660
+ @click.argument("script", required=True, type=str)
1661
+ @click.option(
1662
+ "--args",
1663
+ required=False,
1664
+ type=str,
1665
+ help="(deprecated) Use '-- --arg1 --arg2' for script args.",
1666
+ )
1667
+ @click.argument("script_args", nargs=-1)
1668
+ @click.option(
1669
+ "--disable-usage-stats",
1670
+ is_flag=True,
1671
+ default=False,
1672
+ help="If True, the usage stats collection will be disabled.",
1673
+ )
1674
+ @click.option(
1675
+ "--extra-screen-args",
1676
+ default=None,
1677
+ help="if screen is enabled, add the provided args to it. A useful example "
1678
+ "usage scenario is passing --extra-screen-args='-Logfile /full/path/blah_log.txt'"
1679
+ " as it redirects screen output also to a custom file",
1680
+ )
1681
+ @add_click_logging_options
1682
+ def submit(
1683
+ cluster_config_file,
1684
+ screen,
1685
+ tmux,
1686
+ stop,
1687
+ start,
1688
+ cluster_name,
1689
+ no_config_cache,
1690
+ port_forward,
1691
+ script,
1692
+ args,
1693
+ script_args,
1694
+ disable_usage_stats,
1695
+ extra_screen_args: Optional[str] = None,
1696
+ ):
1697
+ """Uploads and runs a script on the specified cluster.
1698
+
1699
+ The script is automatically synced to the following location:
1700
+
1701
+ os.path.join("~", os.path.basename(script))
1702
+
1703
+ Example:
1704
+ ray submit [CLUSTER.YAML] experiment.py -- --smoke-test
1705
+ """
1706
+ cli_logger.doassert(
1707
+ not (screen and tmux),
1708
+ "`{}` and `{}` are incompatible.",
1709
+ cf.bold("--screen"),
1710
+ cf.bold("--tmux"),
1711
+ )
1712
+ cli_logger.doassert(
1713
+ not (script_args and args),
1714
+ "`{0}` and `{1}` are incompatible. Use only `{1}`.\nExample: `{2}`",
1715
+ cf.bold("--args"),
1716
+ cf.bold("-- <args ...>"),
1717
+ cf.bold("ray submit script.py -- --arg=123 --flag"),
1718
+ )
1719
+
1720
+ assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
1721
+ assert not (script_args and args), "Use -- --arg1 --arg2 for script args."
1722
+
1723
+ if (extra_screen_args is not None) and (not screen):
1724
+ cli_logger.abort(
1725
+ "To use extra_screen_args, it is required to use the --screen flag"
1726
+ )
1727
+
1728
+ if args:
1729
+ cli_logger.warning(
1730
+ "`{}` is deprecated and will be removed in the future.", cf.bold("--args")
1731
+ )
1732
+ cli_logger.warning(
1733
+ "Use `{}` instead. Example: `{}`.",
1734
+ cf.bold("-- <args ...>"),
1735
+ cf.bold("ray submit script.py -- --arg=123 --flag"),
1736
+ )
1737
+ cli_logger.newline()
1738
+
1739
+ if start:
1740
+ if disable_usage_stats:
1741
+ usage_lib.set_usage_stats_enabled_via_env_var(False)
1742
+
1743
+ create_or_update_cluster(
1744
+ config_file=cluster_config_file,
1745
+ override_min_workers=None,
1746
+ override_max_workers=None,
1747
+ no_restart=False,
1748
+ restart_only=False,
1749
+ yes=True,
1750
+ override_cluster_name=cluster_name,
1751
+ no_config_cache=no_config_cache,
1752
+ redirect_command_output=False,
1753
+ use_login_shells=True,
1754
+ )
1755
+ target = os.path.basename(script)
1756
+ target = os.path.join("~", target)
1757
+ rsync(
1758
+ cluster_config_file,
1759
+ script,
1760
+ target,
1761
+ cluster_name,
1762
+ no_config_cache=no_config_cache,
1763
+ down=False,
1764
+ )
1765
+
1766
+ command_parts = ["python", target]
1767
+ if script_args:
1768
+ command_parts += list(script_args)
1769
+ elif args is not None:
1770
+ command_parts += [args]
1771
+
1772
+ port_forward = [(port, port) for port in list(port_forward)]
1773
+ cmd = " ".join(command_parts)
1774
+ exec_cluster(
1775
+ cluster_config_file,
1776
+ cmd=cmd,
1777
+ run_env="docker",
1778
+ screen=screen,
1779
+ tmux=tmux,
1780
+ stop=stop,
1781
+ start=False,
1782
+ override_cluster_name=cluster_name,
1783
+ no_config_cache=no_config_cache,
1784
+ port_forward=port_forward,
1785
+ extra_screen_args=extra_screen_args,
1786
+ )
1787
+
1788
+
1789
+ @cli.command()
1790
+ @click.argument("cluster_config_file", required=True, type=str)
1791
+ @click.argument("cmd", required=True, type=str)
1792
+ @click.option(
1793
+ "--run-env",
1794
+ required=False,
1795
+ type=click.Choice(RUN_ENV_TYPES),
1796
+ default="auto",
1797
+ help="Choose whether to execute this command in a container or directly on"
1798
+ " the cluster head. Only applies when docker is configured in the YAML.",
1799
+ )
1800
+ @click.option(
1801
+ "--stop",
1802
+ is_flag=True,
1803
+ default=False,
1804
+ help="Stop the cluster after the command finishes running.",
1805
+ )
1806
+ @click.option(
1807
+ "--start", is_flag=True, default=False, help="Start the cluster if needed."
1808
+ )
1809
+ @click.option(
1810
+ "--screen", is_flag=True, default=False, help="Run the command in a screen."
1811
+ )
1812
+ @click.option("--tmux", is_flag=True, default=False, help="Run the command in tmux.")
1813
+ @click.option(
1814
+ "--cluster-name",
1815
+ "-n",
1816
+ required=False,
1817
+ type=str,
1818
+ help="Override the configured cluster name.",
1819
+ )
1820
+ @click.option(
1821
+ "--no-config-cache",
1822
+ is_flag=True,
1823
+ default=False,
1824
+ help="Disable the local cluster config cache.",
1825
+ )
1826
+ @click.option(
1827
+ "--port-forward",
1828
+ "-p",
1829
+ required=False,
1830
+ multiple=True,
1831
+ type=int,
1832
+ help="Port to forward. Use this multiple times to forward multiple ports.",
1833
+ )
1834
+ @click.option(
1835
+ "--disable-usage-stats",
1836
+ is_flag=True,
1837
+ default=False,
1838
+ help="If True, the usage stats collection will be disabled.",
1839
+ )
1840
+ @add_click_logging_options
1841
+ def exec(
1842
+ cluster_config_file,
1843
+ cmd,
1844
+ run_env,
1845
+ screen,
1846
+ tmux,
1847
+ stop,
1848
+ start,
1849
+ cluster_name,
1850
+ no_config_cache,
1851
+ port_forward,
1852
+ disable_usage_stats,
1853
+ ):
1854
+ """Execute a command via SSH on a Ray cluster."""
1855
+ port_forward = [(port, port) for port in list(port_forward)]
1856
+
1857
+ if start:
1858
+ if disable_usage_stats:
1859
+ usage_lib.set_usage_stats_enabled_via_env_var(False)
1860
+
1861
+ exec_cluster(
1862
+ cluster_config_file,
1863
+ cmd=cmd,
1864
+ run_env=run_env,
1865
+ screen=screen,
1866
+ tmux=tmux,
1867
+ stop=stop,
1868
+ start=start,
1869
+ override_cluster_name=cluster_name,
1870
+ no_config_cache=no_config_cache,
1871
+ port_forward=port_forward,
1872
+ _allow_uninitialized_state=True,
1873
+ )
1874
+
1875
+
1876
+ @cli.command()
1877
+ @click.argument("cluster_config_file", required=True, type=str)
1878
+ @click.option(
1879
+ "--cluster-name",
1880
+ "-n",
1881
+ required=False,
1882
+ type=str,
1883
+ help="Override the configured cluster name.",
1884
+ )
1885
+ def get_head_ip(cluster_config_file, cluster_name):
1886
+ """Return the head node IP of a Ray cluster."""
1887
+ click.echo(get_head_node_ip(cluster_config_file, cluster_name))
1888
+
1889
+
1890
+ @cli.command()
1891
+ @click.argument("cluster_config_file", required=True, type=str)
1892
+ @click.option(
1893
+ "--cluster-name",
1894
+ "-n",
1895
+ required=False,
1896
+ type=str,
1897
+ help="Override the configured cluster name.",
1898
+ )
1899
+ def get_worker_ips(cluster_config_file, cluster_name):
1900
+ """Return the list of worker IPs of a Ray cluster."""
1901
+ worker_ips = get_worker_node_ips(cluster_config_file, cluster_name)
1902
+ click.echo("\n".join(worker_ips))
1903
+
1904
+
1905
+ @cli.command()
1906
+ def disable_usage_stats():
1907
+ """Disable usage stats collection.
1908
+
1909
+ This will not affect the current running clusters
1910
+ but clusters launched in the future.
1911
+ """
1912
+ usage_lib.set_usage_stats_enabled_via_config(enabled=False)
1913
+ print(
1914
+ "Usage stats disabled for future clusters. "
1915
+ "Restart any current running clusters for this to take effect."
1916
+ )
1917
+
1918
+
1919
+ @cli.command()
1920
+ def enable_usage_stats():
1921
+ """Enable usage stats collection.
1922
+
1923
+ This will not affect the current running clusters
1924
+ but clusters launched in the future.
1925
+ """
1926
+ usage_lib.set_usage_stats_enabled_via_config(enabled=True)
1927
+ print(
1928
+ "Usage stats enabled for future clusters. "
1929
+ "Restart any current running clusters for this to take effect."
1930
+ )
1931
+
1932
+
1933
+ @cli.command()
1934
+ def stack():
1935
+ """Take a stack dump of all Python workers on the local machine."""
1936
+ COMMAND = """
1937
+ pyspy=`which py-spy`
1938
+ if [ ! -e "$pyspy" ]; then
1939
+ echo "ERROR: Please 'pip install py-spy'" \
1940
+ "or 'pip install ray[default]' first."
1941
+ exit 1
1942
+ fi
1943
+ # Set IFS to iterate over lines instead of over words.
1944
+ export IFS="
1945
+ "
1946
+ # Call sudo to prompt for password before anything has been printed.
1947
+ sudo true
1948
+ workers=$(
1949
+ ps aux | grep -E ' ray::|default_worker.py' | grep -v raylet | grep -v grep
1950
+ )
1951
+ for worker in $workers; do
1952
+ echo "Stack dump for $worker";
1953
+ pid=`echo $worker | awk '{print $2}'`;
1954
+ case "$(uname -s)" in
1955
+ Linux*) native=--native;;
1956
+ *) native=;;
1957
+ esac
1958
+ sudo $pyspy dump --pid $pid $native;
1959
+ echo;
1960
+ done
1961
+ """
1962
+ subprocess.call(COMMAND, shell=True)
1963
+
1964
+
1965
+ @cli.command()
1966
+ def microbenchmark():
1967
+ """Run a local Ray microbenchmark on the current machine."""
1968
+ from ray._private.ray_perf import main
1969
+
1970
+ main()
1971
+
1972
+
1973
+ @cli.command()
1974
+ @click.option(
1975
+ "--address",
1976
+ required=False,
1977
+ type=str,
1978
+ help="Override the Ray address to connect to.",
1979
+ )
1980
+ def timeline(address):
1981
+ """Take a Chrome tracing timeline for a Ray cluster."""
1982
+ address = services.canonicalize_bootstrap_address_or_die(address)
1983
+ logger.info(f"Connecting to Ray instance at {address}.")
1984
+ ray.init(address=address)
1985
+ time = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
1986
+ filename = os.path.join(
1987
+ ray._private.utils.get_user_temp_dir(), f"ray-timeline-{time}.json"
1988
+ )
1989
+ ray.timeline(filename=filename)
1990
+ size = os.path.getsize(filename)
1991
+ logger.info(f"Trace file written to {filename} ({size} bytes).")
1992
+ logger.info("You can open this with chrome://tracing in the Chrome browser.")
1993
+
1994
+
1995
+ @cli.command()
1996
+ @click.option(
1997
+ "--address", required=False, type=str, help="Override the address to connect to."
1998
+ )
1999
+ @click.option(
2000
+ "--group-by",
2001
+ type=click.Choice(["NODE_ADDRESS", "STACK_TRACE"]),
2002
+ default="NODE_ADDRESS",
2003
+ help="Group object references by a GroupByType \
2004
+ (e.g. NODE_ADDRESS or STACK_TRACE).",
2005
+ )
2006
+ @click.option(
2007
+ "--sort-by",
2008
+ type=click.Choice(["PID", "OBJECT_SIZE", "REFERENCE_TYPE"]),
2009
+ default="OBJECT_SIZE",
2010
+ help="Sort object references in ascending order by a SortingType \
2011
+ (e.g. PID, OBJECT_SIZE, or REFERENCE_TYPE).",
2012
+ )
2013
+ @click.option(
2014
+ "--units",
2015
+ type=click.Choice(["B", "KB", "MB", "GB"]),
2016
+ default="B",
2017
+ help="Specify unit metrics for displaying object sizes \
2018
+ (e.g. B, KB, MB, GB).",
2019
+ )
2020
+ @click.option(
2021
+ "--no-format",
2022
+ is_flag=True,
2023
+ type=bool,
2024
+ default=True,
2025
+ help="Display unformatted results. Defaults to true when \
2026
+ terminal width is less than 137 characters.",
2027
+ )
2028
+ @click.option(
2029
+ "--stats-only", is_flag=True, default=False, help="Display plasma store stats only."
2030
+ )
2031
+ @click.option(
2032
+ "--num-entries",
2033
+ "--n",
2034
+ type=int,
2035
+ default=None,
2036
+ help="Specify number of sorted entries per group.",
2037
+ )
2038
+ def memory(
2039
+ address,
2040
+ group_by,
2041
+ sort_by,
2042
+ units,
2043
+ no_format,
2044
+ stats_only,
2045
+ num_entries,
2046
+ ):
2047
+ """Print object references held in a Ray cluster."""
2048
+ address = services.canonicalize_bootstrap_address_or_die(address)
2049
+ gcs_client = ray._raylet.GcsClient(address=address)
2050
+ _check_ray_version(gcs_client)
2051
+ time = datetime.now()
2052
+ header = "=" * 8 + f" Object references status: {time} " + "=" * 8
2053
+ mem_stats = memory_summary(
2054
+ address,
2055
+ group_by,
2056
+ sort_by,
2057
+ units,
2058
+ no_format,
2059
+ stats_only,
2060
+ num_entries,
2061
+ )
2062
+ print(f"{header}\n{mem_stats}")
2063
+
2064
+
2065
+ @cli.command()
2066
+ @click.option(
2067
+ "--address", required=False, type=str, help="Override the address to connect to."
2068
+ )
2069
+ @click.option(
2070
+ "-v",
2071
+ "--verbose",
2072
+ required=False,
2073
+ is_flag=True,
2074
+ hidden=True,
2075
+ help="Experimental: Display additional debuggging information.",
2076
+ )
2077
+ @PublicAPI
2078
+ def status(address: str, verbose: bool):
2079
+ """Print cluster status, including autoscaling info."""
2080
+ address = services.canonicalize_bootstrap_address_or_die(address)
2081
+ gcs_client = ray._raylet.GcsClient(address=address)
2082
+ _check_ray_version(gcs_client)
2083
+ ray.experimental.internal_kv._initialize_internal_kv(gcs_client)
2084
+ status = gcs_client.internal_kv_get(ray_constants.DEBUG_AUTOSCALING_STATUS.encode())
2085
+ error = gcs_client.internal_kv_get(ray_constants.DEBUG_AUTOSCALING_ERROR.encode())
2086
+ print(debug_status(status, error, verbose=verbose, address=address))
2087
+
2088
+
2089
+ @cli.command(hidden=True)
2090
+ @click.option(
2091
+ "--stream",
2092
+ "-S",
2093
+ required=False,
2094
+ type=bool,
2095
+ is_flag=True,
2096
+ default=False,
2097
+ help="If True, will stream the binary archive contents to stdout",
2098
+ )
2099
+ @click.option(
2100
+ "--output", "-o", required=False, type=str, default=None, help="Output file."
2101
+ )
2102
+ @click.option(
2103
+ "--logs/--no-logs",
2104
+ is_flag=True,
2105
+ default=True,
2106
+ help="Collect logs from ray session dir",
2107
+ )
2108
+ @click.option(
2109
+ "--debug-state/--no-debug-state",
2110
+ is_flag=True,
2111
+ default=True,
2112
+ help="Collect debug_state.txt from ray session dir",
2113
+ )
2114
+ @click.option(
2115
+ "--pip/--no-pip", is_flag=True, default=True, help="Collect installed pip packages"
2116
+ )
2117
+ @click.option(
2118
+ "--processes/--no-processes",
2119
+ is_flag=True,
2120
+ default=True,
2121
+ help="Collect info on running processes",
2122
+ )
2123
+ @click.option(
2124
+ "--processes-verbose/--no-processes-verbose",
2125
+ is_flag=True,
2126
+ default=True,
2127
+ help="Increase process information verbosity",
2128
+ )
2129
+ @click.option(
2130
+ "--tempfile",
2131
+ "-T",
2132
+ required=False,
2133
+ type=str,
2134
+ default=None,
2135
+ help="Temporary file to use",
2136
+ )
2137
+ def local_dump(
2138
+ stream: bool = False,
2139
+ output: Optional[str] = None,
2140
+ logs: bool = True,
2141
+ debug_state: bool = True,
2142
+ pip: bool = True,
2143
+ processes: bool = True,
2144
+ processes_verbose: bool = False,
2145
+ tempfile: Optional[str] = None,
2146
+ ):
2147
+ """Collect local data and package into an archive.
2148
+
2149
+ Usage:
2150
+
2151
+ ray local-dump [--stream/--output file]
2152
+
2153
+ This script is called on remote nodes to fetch their data.
2154
+ """
2155
+ # This may stream data to stdout, so no printing here
2156
+ get_local_dump_archive(
2157
+ stream=stream,
2158
+ output=output,
2159
+ logs=logs,
2160
+ debug_state=debug_state,
2161
+ pip=pip,
2162
+ processes=processes,
2163
+ processes_verbose=processes_verbose,
2164
+ tempfile=tempfile,
2165
+ )
2166
+
2167
+
2168
+ @cli.command()
2169
+ @click.argument("cluster_config_file", required=False, type=str)
2170
+ @click.option(
2171
+ "--host",
2172
+ "-h",
2173
+ required=False,
2174
+ type=str,
2175
+ help="Single or list of hosts, separated by comma.",
2176
+ )
2177
+ @click.option(
2178
+ "--ssh-user",
2179
+ "-U",
2180
+ required=False,
2181
+ type=str,
2182
+ default=None,
2183
+ help="Username of the SSH user.",
2184
+ )
2185
+ @click.option(
2186
+ "--ssh-key",
2187
+ "-K",
2188
+ required=False,
2189
+ type=str,
2190
+ default=None,
2191
+ help="Path to the SSH key file.",
2192
+ )
2193
+ @click.option(
2194
+ "--docker",
2195
+ "-d",
2196
+ required=False,
2197
+ type=str,
2198
+ default=None,
2199
+ help="Name of the docker container, if applicable.",
2200
+ )
2201
+ @click.option(
2202
+ "--local",
2203
+ "-L",
2204
+ required=False,
2205
+ type=bool,
2206
+ is_flag=True,
2207
+ default=None,
2208
+ help="Also include information about the local node.",
2209
+ )
2210
+ @click.option(
2211
+ "--output", "-o", required=False, type=str, default=None, help="Output file."
2212
+ )
2213
+ @click.option(
2214
+ "--logs/--no-logs",
2215
+ is_flag=True,
2216
+ default=True,
2217
+ help="Collect logs from ray session dir",
2218
+ )
2219
+ @click.option(
2220
+ "--debug-state/--no-debug-state",
2221
+ is_flag=True,
2222
+ default=True,
2223
+ help="Collect debug_state.txt from ray log dir",
2224
+ )
2225
+ @click.option(
2226
+ "--pip/--no-pip", is_flag=True, default=True, help="Collect installed pip packages"
2227
+ )
2228
+ @click.option(
2229
+ "--processes/--no-processes",
2230
+ is_flag=True,
2231
+ default=True,
2232
+ help="Collect info on running processes",
2233
+ )
2234
+ @click.option(
2235
+ "--processes-verbose/--no-processes-verbose",
2236
+ is_flag=True,
2237
+ default=True,
2238
+ help="Increase process information verbosity",
2239
+ )
2240
+ @click.option(
2241
+ "--tempfile",
2242
+ "-T",
2243
+ required=False,
2244
+ type=str,
2245
+ default=None,
2246
+ help="Temporary file to use",
2247
+ )
2248
+ def cluster_dump(
2249
+ cluster_config_file: Optional[str] = None,
2250
+ host: Optional[str] = None,
2251
+ ssh_user: Optional[str] = None,
2252
+ ssh_key: Optional[str] = None,
2253
+ docker: Optional[str] = None,
2254
+ local: Optional[bool] = None,
2255
+ output: Optional[str] = None,
2256
+ logs: bool = True,
2257
+ debug_state: bool = True,
2258
+ pip: bool = True,
2259
+ processes: bool = True,
2260
+ processes_verbose: bool = False,
2261
+ tempfile: Optional[str] = None,
2262
+ ):
2263
+ """Get log data from one or more nodes.
2264
+
2265
+ Best used with Ray cluster configs:
2266
+
2267
+ ray cluster-dump [cluster.yaml]
2268
+
2269
+ Include the --local flag to also collect and include data from the
2270
+ local node.
2271
+
2272
+ Missing fields will be tried to be auto-filled.
2273
+
2274
+ You can also manually specify a list of hosts using the
2275
+ ``--host <host1,host2,...>`` parameter.
2276
+ """
2277
+ archive_path = get_cluster_dump_archive(
2278
+ cluster_config_file=cluster_config_file,
2279
+ host=host,
2280
+ ssh_user=ssh_user,
2281
+ ssh_key=ssh_key,
2282
+ docker=docker,
2283
+ local=local,
2284
+ output=output,
2285
+ logs=logs,
2286
+ debug_state=debug_state,
2287
+ pip=pip,
2288
+ processes=processes,
2289
+ processes_verbose=processes_verbose,
2290
+ tempfile=tempfile,
2291
+ )
2292
+ if archive_path:
2293
+ click.echo(f"Created archive: {archive_path}")
2294
+ else:
2295
+ click.echo("Could not create archive.")
2296
+
2297
+
2298
+ @cli.command(hidden=True)
2299
+ @click.option(
2300
+ "--address", required=False, type=str, help="Override the address to connect to."
2301
+ )
2302
+ def global_gc(address):
2303
+ """Trigger Python garbage collection on all cluster workers."""
2304
+ ray.init(address=address)
2305
+ ray._private.internal_api.global_gc()
2306
+ print("Triggered gc.collect() on all workers.")
2307
+
2308
+
2309
+ @cli.command(hidden=True)
2310
+ @click.option(
2311
+ "--address", required=False, type=str, help="Override the address to connect to."
2312
+ )
2313
+ @click.option(
2314
+ "--node-id",
2315
+ required=True,
2316
+ type=str,
2317
+ help="Hex ID of the worker node to be drained.",
2318
+ )
2319
+ @click.option(
2320
+ "--reason",
2321
+ required=True,
2322
+ type=click.Choice(
2323
+ [
2324
+ item[0]
2325
+ for item in autoscaler_pb2.DrainNodeReason.items()
2326
+ if item[1] != autoscaler_pb2.DRAIN_NODE_REASON_UNSPECIFIED
2327
+ ]
2328
+ ),
2329
+ help="The reason why the node will be drained.",
2330
+ )
2331
+ @click.option(
2332
+ "--reason-message",
2333
+ required=True,
2334
+ type=str,
2335
+ help="The detailed drain reason message.",
2336
+ )
2337
+ @click.option(
2338
+ "--deadline-remaining-seconds",
2339
+ required=False,
2340
+ type=int,
2341
+ default=None,
2342
+ help="Inform GCS that the node to be drained will be force killed "
2343
+ "after this many of seconds. "
2344
+ "Default is None which means there is no deadline. "
2345
+ "Note: This command doesn't actually force kill the node after the deadline, "
2346
+ "it's the caller's responsibility to do that.",
2347
+ )
2348
+ def drain_node(
2349
+ address: str,
2350
+ node_id: str,
2351
+ reason: str,
2352
+ reason_message: str,
2353
+ deadline_remaining_seconds: int,
2354
+ ):
2355
+ """
2356
+ This is NOT a public API.
2357
+
2358
+ Manually drain a worker node.
2359
+ """
2360
+ deadline_timestamp_ms = 0
2361
+ if deadline_remaining_seconds is not None:
2362
+ if deadline_remaining_seconds < 0:
2363
+ raise click.BadParameter(
2364
+ "--deadline-remaining-seconds cannot be negative, "
2365
+ f"got {deadline_remaining_seconds}"
2366
+ )
2367
+ deadline_timestamp_ms = (time.time_ns() // 1000000) + (
2368
+ deadline_remaining_seconds * 1000
2369
+ )
2370
+
2371
+ if ray.NodeID.from_hex(node_id) == ray.NodeID.nil():
2372
+ raise click.BadParameter(f"Invalid hex ID of a Ray node, got {node_id}")
2373
+
2374
+ address = services.canonicalize_bootstrap_address_or_die(address)
2375
+
2376
+ gcs_client = ray._raylet.GcsClient(address=address)
2377
+ _check_ray_version(gcs_client)
2378
+ is_accepted, rejection_error_message = gcs_client.drain_node(
2379
+ node_id,
2380
+ autoscaler_pb2.DrainNodeReason.Value(reason),
2381
+ reason_message,
2382
+ deadline_timestamp_ms,
2383
+ )
2384
+
2385
+ if not is_accepted:
2386
+ raise click.ClickException(
2387
+ f"The drain request is not accepted: {rejection_error_message}"
2388
+ )
2389
+
2390
+
2391
+ @cli.command(name="kuberay-autoscaler", hidden=True)
2392
+ @click.option(
2393
+ "--cluster-name",
2394
+ required=True,
2395
+ type=str,
2396
+ help="The name of the Ray Cluster.\n"
2397
+ "Should coincide with the `metadata.name` of the RayCluster CR.",
2398
+ )
2399
+ @click.option(
2400
+ "--cluster-namespace",
2401
+ required=True,
2402
+ type=str,
2403
+ help="The Kubernetes namespace the Ray Cluster lives in.\n"
2404
+ "Should coincide with the `metadata.namespace` of the RayCluster CR.",
2405
+ )
2406
+ def kuberay_autoscaler(cluster_name: str, cluster_namespace: str) -> None:
2407
+ """Runs the autoscaler for a Ray cluster managed by the KubeRay operator.
2408
+
2409
+ `ray kuberay-autoscaler` is meant to be used as an entry point in
2410
+ KubeRay cluster configs.
2411
+ `ray kuberay-autoscaler` is NOT a public CLI.
2412
+ """
2413
+ # Delay import to avoid introducing Ray core dependency on the Python Kubernetes
2414
+ # client.
2415
+ from ray.autoscaler._private.kuberay.run_autoscaler import run_kuberay_autoscaler
2416
+
2417
+ run_kuberay_autoscaler(cluster_name, cluster_namespace)
2418
+
2419
+
2420
+ @cli.command(name="health-check", hidden=True)
2421
+ @click.option(
2422
+ "--address", required=False, type=str, help="Override the address to connect to."
2423
+ )
2424
+ @click.option(
2425
+ "--component",
2426
+ required=False,
2427
+ type=str,
2428
+ help="Health check for a specific component. Currently supports: "
2429
+ "[ray_client_server]",
2430
+ )
2431
+ @click.option(
2432
+ "--skip-version-check",
2433
+ is_flag=True,
2434
+ default=False,
2435
+ help="Skip comparison of GCS version with local Ray version.",
2436
+ )
2437
+ def healthcheck(address, component, skip_version_check):
2438
+ """
2439
+ This is NOT a public API.
2440
+
2441
+ Health check a Ray or a specific component. Exit code 0 is healthy.
2442
+ """
2443
+
2444
+ address = services.canonicalize_bootstrap_address_or_die(address)
2445
+ gcs_client = ray._raylet.GcsClient(address=address)
2446
+ if not skip_version_check:
2447
+ _check_ray_version(gcs_client)
2448
+
2449
+ if not component:
2450
+ sys.exit(0)
2451
+
2452
+ report_str = gcs_client.internal_kv_get(
2453
+ component.encode(), namespace=ray_constants.KV_NAMESPACE_HEALTHCHECK
2454
+ )
2455
+ if not report_str:
2456
+ # Status was never updated
2457
+ sys.exit(1)
2458
+
2459
+ report = json.loads(report_str)
2460
+
2461
+ # TODO (Alex): We probably shouldn't rely on time here, but cloud providers
2462
+ # have very well synchronized NTP servers, so this should be fine in
2463
+ # practice.
2464
+ cur_time = time.time()
2465
+ report_time = float(report["time"])
2466
+
2467
+ # If the status is too old, the service has probably already died.
2468
+ delta = cur_time - report_time
2469
+ time_ok = delta < ray._private.ray_constants.HEALTHCHECK_EXPIRATION_S
2470
+
2471
+ if time_ok:
2472
+ sys.exit(0)
2473
+ else:
2474
+ sys.exit(1)
2475
+
2476
+
2477
+ @cli.command()
2478
+ @click.option("-v", "--verbose", is_flag=True)
2479
+ @click.option(
2480
+ "--dryrun",
2481
+ is_flag=True,
2482
+ help="Identifies the wheel but does not execute the installation.",
2483
+ )
2484
+ def install_nightly(verbose, dryrun):
2485
+ """Install the latest wheels for Ray.
2486
+
2487
+ This uses the same python environment as the one that Ray is currently
2488
+ installed in. Make sure that there is no Ray processes on this
2489
+ machine (ray stop) when running this command.
2490
+ """
2491
+ raydir = os.path.abspath(os.path.dirname(ray.__file__))
2492
+ all_wheels_path = os.path.join(raydir, "nightly-wheels.yaml")
2493
+
2494
+ wheels = None
2495
+ if os.path.exists(all_wheels_path):
2496
+ with open(all_wheels_path) as f:
2497
+ wheels = yaml.safe_load(f)
2498
+
2499
+ if not wheels:
2500
+ raise click.ClickException(
2501
+ f"Wheels not found in '{all_wheels_path}'! "
2502
+ "Please visit https://docs.ray.io/en/master/installation.html to "
2503
+ "obtain the latest wheels."
2504
+ )
2505
+
2506
+ platform = sys.platform
2507
+ py_version = "{0}.{1}".format(*sys.version_info[:2])
2508
+
2509
+ matching_wheel = None
2510
+ for target_platform, wheel_map in wheels.items():
2511
+ if verbose:
2512
+ print(f"Evaluating os={target_platform}, python={list(wheel_map)}")
2513
+ if platform.startswith(target_platform):
2514
+ if py_version in wheel_map:
2515
+ matching_wheel = wheel_map[py_version]
2516
+ break
2517
+ if verbose:
2518
+ print("Not matched.")
2519
+
2520
+ if matching_wheel is None:
2521
+ raise click.ClickException(
2522
+ "Unable to identify a matching platform. "
2523
+ "Please visit https://docs.ray.io/en/master/installation.html to "
2524
+ "obtain the latest wheels."
2525
+ )
2526
+ if dryrun:
2527
+ print(f"Found wheel: {matching_wheel}")
2528
+ else:
2529
+ cmd = [sys.executable, "-m", "pip", "install", "-U", matching_wheel]
2530
+ print(f"Running: {' '.join(cmd)}.")
2531
+ subprocess.check_call(cmd)
2532
+
2533
+
2534
+ @cli.command()
2535
+ @click.option(
2536
+ "--show-library-path",
2537
+ "-show",
2538
+ required=False,
2539
+ is_flag=True,
2540
+ help="Show the cpp include path and library path, if provided.",
2541
+ )
2542
+ @click.option(
2543
+ "--generate-bazel-project-template-to",
2544
+ "-gen",
2545
+ required=False,
2546
+ type=str,
2547
+ help="The directory to generate the bazel project template to, if provided.",
2548
+ )
2549
+ @add_click_logging_options
2550
+ def cpp(show_library_path, generate_bazel_project_template_to):
2551
+ """Show the cpp library path and generate the bazel project template."""
2552
+ if sys.platform == "win32":
2553
+ cli_logger.error("Ray C++ API is not supported on Windows currently.")
2554
+ sys.exit(1)
2555
+ if not show_library_path and not generate_bazel_project_template_to:
2556
+ raise ValueError(
2557
+ "Please input at least one option of '--show-library-path'"
2558
+ " and '--generate-bazel-project-template-to'."
2559
+ )
2560
+ raydir = os.path.abspath(os.path.dirname(ray.__file__))
2561
+ cpp_dir = os.path.join(raydir, "cpp")
2562
+ cpp_templete_dir = os.path.join(cpp_dir, "example")
2563
+ include_dir = os.path.join(cpp_dir, "include")
2564
+ lib_dir = os.path.join(cpp_dir, "lib")
2565
+ if not os.path.isdir(cpp_dir):
2566
+ raise ValueError('Please install ray with C++ API by "pip install ray[cpp]".')
2567
+ if show_library_path:
2568
+ cli_logger.print("Ray C++ include path {} ", cf.bold(f"{include_dir}"))
2569
+ cli_logger.print("Ray C++ library path {} ", cf.bold(f"{lib_dir}"))
2570
+ if generate_bazel_project_template_to:
2571
+ # copytree expects that the dst dir doesn't exist
2572
+ # so we manually delete it if it exists.
2573
+ if os.path.exists(generate_bazel_project_template_to):
2574
+ shutil.rmtree(generate_bazel_project_template_to)
2575
+ shutil.copytree(cpp_templete_dir, generate_bazel_project_template_to)
2576
+ out_include_dir = os.path.join(
2577
+ generate_bazel_project_template_to, "thirdparty/include"
2578
+ )
2579
+ if os.path.exists(out_include_dir):
2580
+ shutil.rmtree(out_include_dir)
2581
+ shutil.copytree(include_dir, out_include_dir)
2582
+ out_lib_dir = os.path.join(generate_bazel_project_template_to, "thirdparty/lib")
2583
+ if os.path.exists(out_lib_dir):
2584
+ shutil.rmtree(out_lib_dir)
2585
+ shutil.copytree(lib_dir, out_lib_dir)
2586
+
2587
+ cli_logger.print(
2588
+ "Project template generated to {}",
2589
+ cf.bold(f"{os.path.abspath(generate_bazel_project_template_to)}"),
2590
+ )
2591
+ cli_logger.print("To build and run this template, run")
2592
+ cli_logger.print(
2593
+ cf.bold(
2594
+ f" cd {os.path.abspath(generate_bazel_project_template_to)}"
2595
+ " && bash run.sh"
2596
+ )
2597
+ )
2598
+
2599
+
2600
+ @click.group(name="metrics")
2601
+ def metrics_group():
2602
+ pass
2603
+
2604
+
2605
+ @metrics_group.command(name="launch-prometheus")
2606
+ def launch_prometheus():
2607
+ install_and_start_prometheus.main()
2608
+
2609
+
2610
+ @metrics_group.command(name="shutdown-prometheus")
2611
+ def shutdown_prometheus():
2612
+ try:
2613
+ requests.post("http://localhost:9090/-/quit")
2614
+ except requests.exceptions.RequestException as e:
2615
+ print(f"An error occurred: {e}")
2616
+ sys.exit(1)
2617
+
2618
+
2619
+ def add_command_alias(command, name, hidden):
2620
+ new_command = copy.deepcopy(command)
2621
+ new_command.hidden = hidden
2622
+ cli.add_command(new_command, name=name)
2623
+
2624
+
2625
+ cli.add_command(dashboard)
2626
+ cli.add_command(debug)
2627
+ cli.add_command(start)
2628
+ cli.add_command(stop)
2629
+ cli.add_command(up)
2630
+ add_command_alias(up, name="create_or_update", hidden=True)
2631
+ cli.add_command(attach)
2632
+ cli.add_command(exec)
2633
+ add_command_alias(exec, name="exec_cmd", hidden=True)
2634
+ add_command_alias(rsync_down, name="rsync_down", hidden=True)
2635
+ add_command_alias(rsync_up, name="rsync_up", hidden=True)
2636
+ cli.add_command(submit)
2637
+ cli.add_command(down)
2638
+ add_command_alias(down, name="teardown", hidden=True)
2639
+ cli.add_command(kill_random_node)
2640
+ add_command_alias(get_head_ip, name="get_head_ip", hidden=True)
2641
+ cli.add_command(get_worker_ips)
2642
+ cli.add_command(microbenchmark)
2643
+ cli.add_command(stack)
2644
+ cli.add_command(status)
2645
+ cli.add_command(memory)
2646
+ cli.add_command(local_dump)
2647
+ cli.add_command(cluster_dump)
2648
+ cli.add_command(global_gc)
2649
+ cli.add_command(timeline)
2650
+ cli.add_command(install_nightly)
2651
+ cli.add_command(cpp)
2652
+ cli.add_command(disable_usage_stats)
2653
+ cli.add_command(enable_usage_stats)
2654
+ cli.add_command(metrics_group)
2655
+ cli.add_command(drain_node)
2656
+ cli.add_command(check_open_ports)
2657
+
2658
+ try:
2659
+ from ray.util.state.state_cli import (
2660
+ ray_get,
2661
+ ray_list,
2662
+ logs_state_cli_group,
2663
+ summary_state_cli_group,
2664
+ )
2665
+
2666
+ cli.add_command(ray_list, name="list")
2667
+ cli.add_command(ray_get, name="get")
2668
+ add_command_alias(summary_state_cli_group, name="summary", hidden=False)
2669
+ add_command_alias(logs_state_cli_group, name="logs", hidden=False)
2670
+ except ImportError as e:
2671
+ logger.debug(f"Integrating ray state command line tool failed: {e}")
2672
+
2673
+
2674
+ try:
2675
+ from ray.dashboard.modules.job.cli import job_cli_group
2676
+
2677
+ add_command_alias(job_cli_group, name="job", hidden=False)
2678
+ except Exception as e:
2679
+ logger.debug(f"Integrating ray jobs command line tool failed with {e}")
2680
+
2681
+
2682
+ try:
2683
+ from ray.serve.scripts import serve_cli
2684
+
2685
+ cli.add_command(serve_cli)
2686
+ except Exception as e:
2687
+ logger.debug(f"Integrating ray serve command line tool failed with {e}")
2688
+
2689
+
2690
+ def main():
2691
+ return cli()
2692
+
2693
+
2694
+ if __name__ == "__main__":
2695
+ main()
.venv/lib/python3.11/site-packages/ray/thirdparty_files/colorama-0.4.6.dist-info/METADATA ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: colorama
3
+ Version: 0.4.6
4
+ Summary: Cross-platform colored terminal text.
5
+ Project-URL: Homepage, https://github.com/tartley/colorama
6
+ Author-email: Jonathan Hartley <tartley@tartley.com>
7
+ License-File: LICENSE.txt
8
+ Keywords: ansi,color,colour,crossplatform,terminal,text,windows,xplatform
9
+ Classifier: Development Status :: 5 - Production/Stable
10
+ Classifier: Environment :: Console
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: BSD License
13
+ Classifier: Operating System :: OS Independent
14
+ Classifier: Programming Language :: Python
15
+ Classifier: Programming Language :: Python :: 2
16
+ Classifier: Programming Language :: Python :: 2.7
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.7
19
+ Classifier: Programming Language :: Python :: 3.8
20
+ Classifier: Programming Language :: Python :: 3.9
21
+ Classifier: Programming Language :: Python :: 3.10
22
+ Classifier: Programming Language :: Python :: Implementation :: CPython
23
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
24
+ Classifier: Topic :: Terminals
25
+ Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7
26
+ Description-Content-Type: text/x-rst
27
+
28
+ .. image:: https://img.shields.io/pypi/v/colorama.svg
29
+ :target: https://pypi.org/project/colorama/
30
+ :alt: Latest Version
31
+
32
+ .. image:: https://img.shields.io/pypi/pyversions/colorama.svg
33
+ :target: https://pypi.org/project/colorama/
34
+ :alt: Supported Python versions
35
+
36
+ .. image:: https://github.com/tartley/colorama/actions/workflows/test.yml/badge.svg
37
+ :target: https://github.com/tartley/colorama/actions/workflows/test.yml
38
+ :alt: Build Status
39
+
40
+ Colorama
41
+ ========
42
+
43
+ Makes ANSI escape character sequences (for producing colored terminal text and
44
+ cursor positioning) work under MS Windows.
45
+
46
+ .. |donate| image:: https://www.paypalobjects.com/en_US/i/btn/btn_donate_SM.gif
47
+ :target: https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=2MZ9D2GMLYCUJ&item_name=Colorama&currency_code=USD
48
+ :alt: Donate with Paypal
49
+
50
+ `PyPI for releases <https://pypi.org/project/colorama/>`_ |
51
+ `Github for source <https://github.com/tartley/colorama>`_ |
52
+ `Colorama for enterprise on Tidelift <https://github.com/tartley/colorama/blob/master/ENTERPRISE.md>`_
53
+
54
+ If you find Colorama useful, please |donate| to the authors. Thank you!
55
+
56
+ Installation
57
+ ------------
58
+
59
+ Tested on CPython 2.7, 3.7, 3.8, 3.9 and 3.10 and Pypy 2.7 and 3.8.
60
+
61
+ No requirements other than the standard library.
62
+
63
+ .. code-block:: bash
64
+
65
+ pip install colorama
66
+ # or
67
+ conda install -c anaconda colorama
68
+
69
+ Description
70
+ -----------
71
+
72
+ ANSI escape character sequences have long been used to produce colored terminal
73
+ text and cursor positioning on Unix and Macs. Colorama makes this work on
74
+ Windows, too, by wrapping ``stdout``, stripping ANSI sequences it finds (which
75
+ would appear as gobbledygook in the output), and converting them into the
76
+ appropriate win32 calls to modify the state of the terminal. On other platforms,
77
+ Colorama does nothing.
78
+
79
+ This has the upshot of providing a simple cross-platform API for printing
80
+ colored terminal text from Python, and has the happy side-effect that existing
81
+ applications or libraries which use ANSI sequences to produce colored output on
82
+ Linux or Macs can now also work on Windows, simply by calling
83
+ ``colorama.just_fix_windows_console()`` (since v0.4.6) or ``colorama.init()``
84
+ (all versions, but may have other side-effects – see below).
85
+
86
+ An alternative approach is to install ``ansi.sys`` on Windows machines, which
87
+ provides the same behaviour for all applications running in terminals. Colorama
88
+ is intended for situations where that isn't easy (e.g., maybe your app doesn't
89
+ have an installer.)
90
+
91
+ Demo scripts in the source code repository print some colored text using
92
+ ANSI sequences. Compare their output under Gnome-terminal's built in ANSI
93
+ handling, versus on Windows Command-Prompt using Colorama:
94
+
95
+ .. image:: https://github.com/tartley/colorama/raw/master/screenshots/ubuntu-demo.png
96
+ :width: 661
97
+ :height: 357
98
+ :alt: ANSI sequences on Ubuntu under gnome-terminal.
99
+
100
+ .. image:: https://github.com/tartley/colorama/raw/master/screenshots/windows-demo.png
101
+ :width: 668
102
+ :height: 325
103
+ :alt: Same ANSI sequences on Windows, using Colorama.
104
+
105
+ These screenshots show that, on Windows, Colorama does not support ANSI 'dim
106
+ text'; it looks the same as 'normal text'.
107
+
108
+ Usage
109
+ -----
110
+
111
+ Initialisation
112
+ ..............
113
+
114
+ If the only thing you want from Colorama is to get ANSI escapes to work on
115
+ Windows, then run:
116
+
117
+ .. code-block:: python
118
+
119
+ from colorama import just_fix_windows_console
120
+ just_fix_windows_console()
121
+
122
+ If you're on a recent version of Windows 10 or better, and your stdout/stderr
123
+ are pointing to a Windows console, then this will flip the magic configuration
124
+ switch to enable Windows' built-in ANSI support.
125
+
126
+ If you're on an older version of Windows, and your stdout/stderr are pointing to
127
+ a Windows console, then this will wrap ``sys.stdout`` and/or ``sys.stderr`` in a
128
+ magic file object that intercepts ANSI escape sequences and issues the
129
+ appropriate Win32 calls to emulate them.
130
+
131
+ In all other circumstances, it does nothing whatsoever. Basically the idea is
132
+ that this makes Windows act like Unix with respect to ANSI escape handling.
133
+
134
+ It's safe to call this function multiple times. It's safe to call this function
135
+ on non-Windows platforms, but it won't do anything. It's safe to call this
136
+ function when one or both of your stdout/stderr are redirected to a file – it
137
+ won't do anything to those streams.
138
+
139
+ Alternatively, you can use the older interface with more features (but also more
140
+ potential footguns):
141
+
142
+ .. code-block:: python
143
+
144
+ from colorama import init
145
+ init()
146
+
147
+ This does the same thing as ``just_fix_windows_console``, except for the
148
+ following differences:
149
+
150
+ - It's not safe to call ``init`` multiple times; you can end up with multiple
151
+ layers of wrapping and broken ANSI support.
152
+
153
+ - Colorama will apply a heuristic to guess whether stdout/stderr support ANSI,
154
+ and if it thinks they don't, then it will wrap ``sys.stdout`` and
155
+ ``sys.stderr`` in a magic file object that strips out ANSI escape sequences
156
+ before printing them. This happens on all platforms, and can be convenient if
157
+ you want to write your code to emit ANSI escape sequences unconditionally, and
158
+ let Colorama decide whether they should actually be output. But note that
159
+ Colorama's heuristic is not particularly clever.
160
+
161
+ - ``init`` also accepts explicit keyword args to enable/disable various
162
+ functionality – see below.
163
+
164
+ To stop using Colorama before your program exits, simply call ``deinit()``.
165
+ This will restore ``stdout`` and ``stderr`` to their original values, so that
166
+ Colorama is disabled. To resume using Colorama again, call ``reinit()``; it is
167
+ cheaper than calling ``init()`` again (but does the same thing).
168
+
169
+ Most users should depend on ``colorama >= 0.4.6``, and use
170
+ ``just_fix_windows_console``. The old ``init`` interface will be supported
171
+ indefinitely for backwards compatibility, but we don't plan to fix any issues
172
+ with it, also for backwards compatibility.
173
+
174
+ Colored Output
175
+ ..............
176
+
177
+ Cross-platform printing of colored text can then be done using Colorama's
178
+ constant shorthand for ANSI escape sequences. These are deliberately
179
+ rudimentary, see below.
180
+
181
+ .. code-block:: python
182
+
183
+ from colorama import Fore, Back, Style
184
+ print(Fore.RED + 'some red text')
185
+ print(Back.GREEN + 'and with a green background')
186
+ print(Style.DIM + 'and in dim text')
187
+ print(Style.RESET_ALL)
188
+ print('back to normal now')
189
+
190
+ ...or simply by manually printing ANSI sequences from your own code:
191
+
192
+ .. code-block:: python
193
+
194
+ print('\033[31m' + 'some red text')
195
+ print('\033[39m') # and reset to default color
196
+
197
+ ...or, Colorama can be used in conjunction with existing ANSI libraries
198
+ such as the venerable `Termcolor <https://pypi.org/project/termcolor/>`_
199
+ the fabulous `Blessings <https://pypi.org/project/blessings/>`_,
200
+ or the incredible `_Rich <https://pypi.org/project/rich/>`_.
201
+
202
+ If you wish Colorama's Fore, Back and Style constants were more capable,
203
+ then consider using one of the above highly capable libraries to generate
204
+ colors, etc, and use Colorama just for its primary purpose: to convert
205
+ those ANSI sequences to also work on Windows:
206
+
207
+ SIMILARLY, do not send PRs adding the generation of new ANSI types to Colorama.
208
+ We are only interested in converting ANSI codes to win32 API calls, not
209
+ shortcuts like the above to generate ANSI characters.
210
+
211
+ .. code-block:: python
212
+
213
+ from colorama import just_fix_windows_console
214
+ from termcolor import colored
215
+
216
+ # use Colorama to make Termcolor work on Windows too
217
+ just_fix_windows_console()
218
+
219
+ # then use Termcolor for all colored text output
220
+ print(colored('Hello, World!', 'green', 'on_red'))
221
+
222
+ Available formatting constants are::
223
+
224
+ Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
225
+ Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
226
+ Style: DIM, NORMAL, BRIGHT, RESET_ALL
227
+
228
+ ``Style.RESET_ALL`` resets foreground, background, and brightness. Colorama will
229
+ perform this reset automatically on program exit.
230
+
231
+ These are fairly well supported, but not part of the standard::
232
+
233
+ Fore: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX
234
+ Back: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX
235
+
236
+ Cursor Positioning
237
+ ..................
238
+
239
+ ANSI codes to reposition the cursor are supported. See ``demos/demo06.py`` for
240
+ an example of how to generate them.
241
+
242
+ Init Keyword Args
243
+ .................
244
+
245
+ ``init()`` accepts some ``**kwargs`` to override default behaviour.
246
+
247
+ init(autoreset=False):
248
+ If you find yourself repeatedly sending reset sequences to turn off color
249
+ changes at the end of every print, then ``init(autoreset=True)`` will
250
+ automate that:
251
+
252
+ .. code-block:: python
253
+
254
+ from colorama import init
255
+ init(autoreset=True)
256
+ print(Fore.RED + 'some red text')
257
+ print('automatically back to default color again')
258
+
259
+ init(strip=None):
260
+ Pass ``True`` or ``False`` to override whether ANSI codes should be
261
+ stripped from the output. The default behaviour is to strip if on Windows
262
+ or if output is redirected (not a tty).
263
+
264
+ init(convert=None):
265
+ Pass ``True`` or ``False`` to override whether to convert ANSI codes in the
266
+ output into win32 calls. The default behaviour is to convert if on Windows
267
+ and output is to a tty (terminal).
268
+
269
+ init(wrap=True):
270
+ On Windows, Colorama works by replacing ``sys.stdout`` and ``sys.stderr``
271
+ with proxy objects, which override the ``.write()`` method to do their work.
272
+ If this wrapping causes you problems, then this can be disabled by passing
273
+ ``init(wrap=False)``. The default behaviour is to wrap if ``autoreset`` or
274
+ ``strip`` or ``convert`` are True.
275
+
276
+ When wrapping is disabled, colored printing on non-Windows platforms will
277
+ continue to work as normal. To do cross-platform colored output, you can
278
+ use Colorama's ``AnsiToWin32`` proxy directly:
279
+
280
+ .. code-block:: python
281
+
282
+ import sys
283
+ from colorama import init, AnsiToWin32
284
+ init(wrap=False)
285
+ stream = AnsiToWin32(sys.stderr).stream
286
+
287
+ # Python 2
288
+ print >>stream, Fore.BLUE + 'blue text on stderr'
289
+
290
+ # Python 3
291
+ print(Fore.BLUE + 'blue text on stderr', file=stream)
292
+
293
+ Recognised ANSI Sequences
294
+ .........................
295
+
296
+ ANSI sequences generally take the form::
297
+
298
+ ESC [ <param> ; <param> ... <command>
299
+
300
+ Where ``<param>`` is an integer, and ``<command>`` is a single letter. Zero or
301
+ more params are passed to a ``<command>``. If no params are passed, it is
302
+ generally synonymous with passing a single zero. No spaces exist in the
303
+ sequence; they have been inserted here simply to read more easily.
304
+
305
+ The only ANSI sequences that Colorama converts into win32 calls are::
306
+
307
+ ESC [ 0 m # reset all (colors and brightness)
308
+ ESC [ 1 m # bright
309
+ ESC [ 2 m # dim (looks same as normal brightness)
310
+ ESC [ 22 m # normal brightness
311
+
312
+ # FOREGROUND:
313
+ ESC [ 30 m # black
314
+ ESC [ 31 m # red
315
+ ESC [ 32 m # green
316
+ ESC [ 33 m # yellow
317
+ ESC [ 34 m # blue
318
+ ESC [ 35 m # magenta
319
+ ESC [ 36 m # cyan
320
+ ESC [ 37 m # white
321
+ ESC [ 39 m # reset
322
+
323
+ # BACKGROUND
324
+ ESC [ 40 m # black
325
+ ESC [ 41 m # red
326
+ ESC [ 42 m # green
327
+ ESC [ 43 m # yellow
328
+ ESC [ 44 m # blue
329
+ ESC [ 45 m # magenta
330
+ ESC [ 46 m # cyan
331
+ ESC [ 47 m # white
332
+ ESC [ 49 m # reset
333
+
334
+ # cursor positioning
335
+ ESC [ y;x H # position cursor at x across, y down
336
+ ESC [ y;x f # position cursor at x across, y down
337
+ ESC [ n A # move cursor n lines up
338
+ ESC [ n B # move cursor n lines down
339
+ ESC [ n C # move cursor n characters forward
340
+ ESC [ n D # move cursor n characters backward
341
+
342
+ # clear the screen
343
+ ESC [ mode J # clear the screen
344
+
345
+ # clear the line
346
+ ESC [ mode K # clear the line
347
+
348
+ Multiple numeric params to the ``'m'`` command can be combined into a single
349
+ sequence::
350
+
351
+ ESC [ 36 ; 45 ; 1 m # bright cyan text on magenta background
352
+
353
+ All other ANSI sequences of the form ``ESC [ <param> ; <param> ... <command>``
354
+ are silently stripped from the output on Windows.
355
+
356
+ Any other form of ANSI sequence, such as single-character codes or alternative
357
+ initial characters, are not recognised or stripped. It would be cool to add
358
+ them though. Let me know if it would be useful for you, via the Issues on
359
+ GitHub.
360
+
361
+ Status & Known Problems
362
+ -----------------------
363
+
364
+ I've personally only tested it on Windows XP (CMD, Console2), Ubuntu
365
+ (gnome-terminal, xterm), and OS X.
366
+
367
+ Some valid ANSI sequences aren't recognised.
368
+
369
+ If you're hacking on the code, see `README-hacking.md`_. ESPECIALLY, see the
370
+ explanation there of why we do not want PRs that allow Colorama to generate new
371
+ types of ANSI codes.
372
+
373
+ See outstanding issues and wish-list:
374
+ https://github.com/tartley/colorama/issues
375
+
376
+ If anything doesn't work for you, or doesn't do what you expected or hoped for,
377
+ I'd love to hear about it on that issues list, would be delighted by patches,
378
+ and would be happy to grant commit access to anyone who submits a working patch
379
+ or two.
380
+
381
+ .. _README-hacking.md: README-hacking.md
382
+
383
+ License
384
+ -------
385
+
386
+ Copyright Jonathan Hartley & Arnon Yaari, 2013-2020. BSD 3-Clause license; see
387
+ LICENSE file.
388
+
389
+ Professional support
390
+ --------------------
391
+
392
+ .. |tideliftlogo| image:: https://cdn2.hubspot.net/hubfs/4008838/website/logos/logos_for_download/Tidelift_primary-shorthand-logo.png
393
+ :alt: Tidelift
394
+ :target: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme
395
+
396
+ .. list-table::
397
+ :widths: 10 100
398
+
399
+ * - |tideliftlogo|
400
+ - Professional support for colorama is available as part of the
401
+ `Tidelift Subscription`_.
402
+ Tidelift gives software development teams a single source for purchasing
403
+ and maintaining their software, with professional grade assurances from
404
+ the experts who know it best, while seamlessly integrating with existing
405
+ tools.
406
+
407
+ .. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme
408
+
409
+ Thanks
410
+ ------
411
+
412
+ See the CHANGELOG for more thanks!
413
+
414
+ * Marc Schlaich (schlamar) for a ``setup.py`` fix for Python2.5.
415
+ * Marc Abramowitz, reported & fixed a crash on exit with closed ``stdout``,
416
+ providing a solution to issue #7's setuptools/distutils debate,
417
+ and other fixes.
418
+ * User 'eryksun', for guidance on correctly instantiating ``ctypes.windll``.
419
+ * Matthew McCormick for politely pointing out a longstanding crash on non-Win.
420
+ * Ben Hoyt, for a magnificent fix under 64-bit Windows.
421
+ * Jesse at Empty Square for submitting a fix for examples in the README.
422
+ * User 'jamessp', an observant documentation fix for cursor positioning.
423
+ * User 'vaal1239', Dave Mckee & Lackner Kristof for a tiny but much-needed Win7
424
+ fix.
425
+ * Julien Stuyck, for wisely suggesting Python3 compatible updates to README.
426
+ * Daniel Griffith for multiple fabulous patches.
427
+ * Oscar Lesta for a valuable fix to stop ANSI chars being sent to non-tty
428
+ output.
429
+ * Roger Binns, for many suggestions, valuable feedback, & bug reports.
430
+ * Tim Golden for thought and much appreciated feedback on the initial idea.
431
+ * User 'Zearin' for updates to the README file.
432
+ * John Szakmeister for adding support for light colors
433
+ * Charles Merriam for adding documentation to demos
434
+ * Jurko for a fix on 64-bit Windows CPython2.5 w/o ctypes
435
+ * Florian Bruhin for a fix when stdout or stderr are None
436
+ * Thomas Weininger for fixing ValueError on Windows
437
+ * Remi Rampin for better Github integration and fixes to the README file
438
+ * Simeon Visser for closing a file handle using 'with' and updating classifiers
439
+ to include Python 3.3 and 3.4
440
+ * Andy Neff for fixing RESET of LIGHT_EX colors.
441
+ * Jonathan Hartley for the initial idea and implementation.
.venv/lib/python3.11/site-packages/ray/thirdparty_files/colorama-0.4.6.dist-info/RECORD ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ colorama-0.4.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ colorama-0.4.6.dist-info/METADATA,sha256=e67SnrUMOym9sz_4TjF3vxvAV4T3aF7NyqRHHH3YEMw,17158
3
+ colorama-0.4.6.dist-info/RECORD,,
4
+ colorama-0.4.6.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ colorama-0.4.6.dist-info/WHEEL,sha256=cdcF4Fbd0FPtw2EMIOwH-3rSOTUdTCeOSXRMD1iLUb8,105
6
+ colorama-0.4.6.dist-info/licenses/LICENSE.txt,sha256=ysNcAmhuXQSlpxQL-zs25zrtSWZW6JEQLkKIhteTAxg,1491
7
+ colorama/__init__.py,sha256=wePQA4U20tKgYARySLEC047ucNX-g8pRLpYBuiHlLb8,266
8
+ colorama/__pycache__/__init__.cpython-311.pyc,,
9
+ colorama/__pycache__/ansi.cpython-311.pyc,,
10
+ colorama/__pycache__/ansitowin32.cpython-311.pyc,,
11
+ colorama/__pycache__/initialise.cpython-311.pyc,,
12
+ colorama/__pycache__/win32.cpython-311.pyc,,
13
+ colorama/__pycache__/winterm.cpython-311.pyc,,
14
+ colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522
15
+ colorama/ansitowin32.py,sha256=vPNYa3OZbxjbuFyaVo0Tmhmy1FZ1lKMWCnT7odXpItk,11128
16
+ colorama/initialise.py,sha256=-hIny86ClXo39ixh5iSCfUIa2f_h_bgKRDW7gqs-KLU,3325
17
+ colorama/tests/__init__.py,sha256=MkgPAEzGQd-Rq0w0PZXSX2LadRWhUECcisJY8lSrm4Q,75
18
+ colorama/tests/__pycache__/__init__.cpython-311.pyc,,
19
+ colorama/tests/__pycache__/ansi_test.cpython-311.pyc,,
20
+ colorama/tests/__pycache__/ansitowin32_test.cpython-311.pyc,,
21
+ colorama/tests/__pycache__/initialise_test.cpython-311.pyc,,
22
+ colorama/tests/__pycache__/isatty_test.cpython-311.pyc,,
23
+ colorama/tests/__pycache__/utils.cpython-311.pyc,,
24
+ colorama/tests/__pycache__/winterm_test.cpython-311.pyc,,
25
+ colorama/tests/ansi_test.py,sha256=FeViDrUINIZcr505PAxvU4AjXz1asEiALs9GXMhwRaE,2839
26
+ colorama/tests/ansitowin32_test.py,sha256=RN7AIhMJ5EqDsYaCjVo-o4u8JzDD4ukJbmevWKS70rY,10678
27
+ colorama/tests/initialise_test.py,sha256=BbPy-XfyHwJ6zKozuQOvNvQZzsx9vdb_0bYXn7hsBTc,6741
28
+ colorama/tests/isatty_test.py,sha256=Pg26LRpv0yQDB5Ac-sxgVXG7hsA1NYvapFgApZfYzZg,1866
29
+ colorama/tests/utils.py,sha256=1IIRylG39z5-dzq09R_ngufxyPZxgldNbrxKxUGwGKE,1079
30
+ colorama/tests/winterm_test.py,sha256=qoWFPEjym5gm2RuMwpf3pOis3a5r_PJZFCzK254JL8A,3709
31
+ colorama/win32.py,sha256=YQOKwMTwtGBbsY4dL5HYTvwTeP9wIQra5MvPNddpxZs,6181
32
+ colorama/winterm.py,sha256=XCQFDHjPi6AHYNdZwy0tA02H-Jh48Jp-HvCjeLeLp3U,7134
.venv/lib/python3.11/site-packages/ray/thirdparty_files/colorama-0.4.6.dist-info/REQUESTED ADDED
File without changes
.venv/lib/python3.11/site-packages/ray/thirdparty_files/colorama-0.4.6.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.11.1
3
+ Root-Is-Purelib: true
4
+ Tag: py2-none-any
5
+ Tag: py3-none-any
.venv/lib/python3.11/site-packages/ray/thirdparty_files/colorama-0.4.6.dist-info/licenses/LICENSE.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2010 Jonathan Hartley
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are met:
6
+
7
+ * Redistributions of source code must retain the above copyright notice, this
8
+ list of conditions and the following disclaimer.
9
+
10
+ * Redistributions in binary form must reproduce the above copyright notice,
11
+ this list of conditions and the following disclaimer in the documentation
12
+ and/or other materials provided with the distribution.
13
+
14
+ * Neither the name of the copyright holders, nor those of its contributors
15
+ may be used to endorse or promote products derived from this software without
16
+ specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.venv/lib/python3.11/site-packages/ray/thirdparty_files/psutil/_psutil_linux.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d76b01666546f2806089d6f4ef08f005db0bb410d893a54d463a7b0f8b727e2
3
+ size 115336
.venv/lib/python3.11/site-packages/ray/thirdparty_files/setproctitle-1.2.2.dist-info/METADATA ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.2
2
+ Name: setproctitle
3
+ Version: 1.2.2
4
+ Summary: A Python module to customize the process title
5
+ Home-page: https://github.com/dvarrazzo/py-setproctitle
6
+ Download-URL: http://pypi.python.org/pypi/setproctitle/
7
+ Author: Daniele Varrazzo
8
+ Author-email: daniele.varrazzo@gmail.com
9
+ License: BSD
10
+ Platform: GNU/Linux
11
+ Platform: BSD
12
+ Platform: MacOS X
13
+ Platform: Windows
14
+ Classifier: Development Status :: 5 - Production/Stable
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: License :: OSI Approved :: BSD License
17
+ Classifier: Programming Language :: C
18
+ Classifier: Programming Language :: Python :: 3
19
+ Classifier: Programming Language :: Python :: 3.6
20
+ Classifier: Programming Language :: Python :: 3.7
21
+ Classifier: Programming Language :: Python :: 3.8
22
+ Classifier: Programming Language :: Python :: 3.9
23
+ Classifier: Operating System :: POSIX :: Linux
24
+ Classifier: Operating System :: POSIX :: BSD
25
+ Classifier: Operating System :: MacOS :: MacOS X
26
+ Classifier: Operating System :: Microsoft :: Windows
27
+ Classifier: Topic :: Software Development
28
+ Requires-Python: >=3.6
29
+ Description-Content-Type: text/x-rst
30
+ Provides-Extra: test
31
+ Requires-Dist: pytest<6.2,>=6.1; extra == "test"
32
+ Dynamic: author
33
+ Dynamic: author-email
34
+ Dynamic: classifier
35
+ Dynamic: description
36
+ Dynamic: description-content-type
37
+ Dynamic: download-url
38
+ Dynamic: home-page
39
+ Dynamic: license
40
+ Dynamic: platform
41
+ Dynamic: provides-extra
42
+ Dynamic: requires-python
43
+ Dynamic: summary
44
+
45
+ A Python module to customize the process title
46
+ ==============================================
47
+
48
+ .. image:: https://github.com/dvarrazzo/py-setproctitle/workflows/Tests/badge.svg
49
+ :target: https://github.com/dvarrazzo/py-setproctitle/actions?query=workflow%3ATests
50
+ :alt: Tests
51
+
52
+ :author: Daniele Varrazzo
53
+
54
+ The ``setproctitle`` module allows a process to change its title (as displayed
55
+ by system tools such as ``ps`` and ``top``).
56
+
57
+ Changing the title is mostly useful in multi-process systems, for example
58
+ when a master process is forked: changing the children's title allows to
59
+ identify the task each process is busy with. The technique is used by
60
+ PostgreSQL_ and the `OpenSSH Server`_ for example.
61
+
62
+ The procedure is hardly portable across different systems. PostgreSQL provides
63
+ a good `multi-platform implementation`__: this module is a Python wrapper
64
+ around PostgreSQL code.
65
+
66
+ - `Homepage <https://github.com/dvarrazzo/py-setproctitle>`__
67
+ - `Download <http://pypi.python.org/pypi/setproctitle/>`__
68
+ - `Bug tracker <https://github.com/dvarrazzo/py-setproctitle/issues>`__
69
+
70
+
71
+ .. _PostgreSQL: http://www.postgresql.org
72
+ .. _OpenSSH Server: http://www.openssh.com/
73
+ .. __: http://doxygen.postgresql.org/ps__status_8c_source.html
74
+
75
+
76
+ Installation
77
+ ------------
78
+
79
+ ``setproctitle`` is a C extension: in order to build it you will need a C
80
+ compiler and the Python development support (the ``python-dev`` or
81
+ ``python3-dev`` package in most Linux distributions). No further external
82
+ dependencies are required.
83
+
84
+ You can use ``pip`` to install the module::
85
+
86
+ pip install setproctitle
87
+
88
+ You can use ``pip -t`` or ``virtualenv`` for local installations, ``sudo pip``
89
+ for a system-wide one... the usual stuff. Read pip_ or virtualenv_ docs for
90
+ all the details.
91
+
92
+ .. _pip: https://pip.readthedocs.org/
93
+ .. _virtualenv: https://virtualenv.readthedocs.org/
94
+
95
+
96
+ Usage
97
+ -----
98
+
99
+ .. note::
100
+ You should import and use the module (even just calling ``getproctitle()``)
101
+ pretty early in your program lifetime: code writing env vars `may
102
+ interfere`__ with the module initialisation.
103
+
104
+ .. __: https://github.com/dvarrazzo/py-setproctitle/issues/42
105
+
106
+
107
+ The ``setproctitle`` module exports the following functions:
108
+
109
+ ``setproctitle(title)``
110
+ Set *title* as the title for the current process.
111
+
112
+ ``getproctitle()``
113
+ Return the current process title.
114
+
115
+ The process title is usually visible in files such as ``/proc/PID/cmdline``,
116
+ ``/proc/PID/status``, ``/proc/PID/comm``, depending on the operating system
117
+ and kernel version. These information are used by user-space tools such as
118
+ ``ps`` and ``top``.
119
+
120
+
121
+ ``setthreadtitle(title)``
122
+ Set *title* as the title for the current thread.
123
+
124
+ ``getthreadtitle()``
125
+ Get the current thread title.
126
+
127
+ The thread title is exposed by some operating systems as the file
128
+ ``/proc/PID/task/TID/comm``, which is used by certain tools such as ``htop``.
129
+
130
+
131
+ Environment variables
132
+ ~~~~~~~~~~~~~~~~~~~~~
133
+
134
+ A few environment variables can be used to customize the module behavior:
135
+
136
+ ``SPT_NOENV``
137
+ Avoid clobbering ``/proc/PID/environ``.
138
+
139
+ On many platforms, setting the process title will clobber the
140
+ ``environ`` memory area. ``os.environ`` will work as expected from within
141
+ the Python process, but the content of the file ``/proc/PID/environ`` will
142
+ be overwritten. If you require this file not to be broken you can set the
143
+ ``SPT_NOENV`` environment variable to any non-empty value: in this case
144
+ the maximum length for the title will be limited to the length of the
145
+ command line.
146
+
147
+ ``SPT_DEBUG``
148
+ Print debug information on ``stderr``.
149
+
150
+ If the module doesn't work as expected you can set this variable to a
151
+ non-empty value to generate information useful for debugging. Note that
152
+ the most useful information is printed when the module is imported, not
153
+ when the functions are called.
154
+
155
+
156
+ Module status
157
+ -------------
158
+
159
+ The module can be currently compiled and effectively used on the following
160
+ platforms:
161
+
162
+ - GNU/Linux
163
+ - BSD
164
+ - MacOS X
165
+ - Windows
166
+
167
+ Note that on Windows there is no way to change the process string:
168
+ what the module does is to create a *Named Object* whose value can be read
169
+ using a tool such as `Process Explorer`_ (contribution of a more useful tool
170
+ to be used together with ``setproctitle`` would be well accepted).
171
+
172
+ The module can probably work on HP-UX, but I haven't found any to test with.
173
+ It is unlikely that it can work on Solaris instead.
174
+
175
+ .. _Process Explorer: http://technet.microsoft.com/en-us/sysinternals/bb896653.aspx
176
+
177
+ Releases history
178
+ ----------------
179
+
180
+ Version 1.2.2
181
+ -------------
182
+
183
+ - Fixed Windows build (issues #89, #90).
184
+ - Added wheel packages for Windows (issues #47, #90).
185
+ - Added wheel packages for aarch64 (issues #95).
186
+
187
+
188
+ Version 1.2.1
189
+ -------------
190
+
191
+ - Fixed segfault after ``os.environ.clear()`` (issue #88).
192
+
193
+
194
+ Version 1.2
195
+ ~~~~~~~~~~~
196
+
197
+ - added ``getthreadtitle()`` and ``setthreadtitle()``.
198
+ - Initialisation of the module moved to the first usage: importing the module
199
+ doesn't cause side effects.
200
+ - Manage much longer command lines (#52)
201
+ - Improved build on BSD, dropped ancient versions (issue #67).
202
+ - Fixed build for Python 3.8 (#66, #72)
203
+ - Added support for Python 3.9
204
+ - Dropped support for Python < 3.6
205
+
206
+
207
+ Version 1.1.10
208
+ ~~~~~~~~~~~~~~
209
+
210
+ - Fixed building with certain ``prctl.h`` implementations (issue #44).
211
+ - Use ``setuptools`` if available (issue #48).
212
+
213
+
214
+ Version 1.1.9
215
+ ~~~~~~~~~~~~~
216
+
217
+ - Fixed build on VC (issues #20, #33).
218
+ - Added ``MANIFEST.in`` to the source distribution to help with RPM building
219
+ (issue #30).
220
+
221
+
222
+ Version 1.1.8
223
+ ~~~~~~~~~~~~~
224
+
225
+ - Added support for Python "diehard" 2.4 (pull request #3).
226
+ - Fixed build on Mac OS X 10.9 Maverick (issue #27).
227
+
228
+
229
+ Version 1.1.7
230
+ ~~~~~~~~~~~~~
231
+
232
+ - Added PyPy support, courtesy of Ozan Turksever - http://www.logsign.net
233
+ (pull request #2).
234
+
235
+
236
+ Version 1.1.6
237
+ ~~~~~~~~~~~~~
238
+
239
+ - The module can be compiled again on Windows (issue #21).
240
+
241
+
242
+ Version 1.1.5
243
+ ~~~~~~~~~~~~~
244
+
245
+ - No module bug, but a packaging issue: files ``README`` and ``HISTORY``
246
+ added back into the distribution.
247
+
248
+
249
+ Version 1.1.4
250
+ ~~~~~~~~~~~~~
251
+
252
+ - The module works correctly in embedded Python.
253
+ - ``setproctitle()`` accepts a keyword argument.
254
+ - Debug output support always compiled in: the variable ``SPT_DEBUG`` can be
255
+ used to emit debug log.
256
+
257
+
258
+ Version 1.1.3
259
+ ~~~~~~~~~~~~~
260
+
261
+ - Don't clobber environ if the variable ``SPT_NOENV`` is set (issue #16).
262
+
263
+
264
+ Version 1.1.2
265
+ ~~~~~~~~~~~~~
266
+
267
+ - Find the setproctitle include file on OpenBSD (issue #11).
268
+ - Skip test with unicode if the file system encoding wouldn't make it pass
269
+ (issue #13).
270
+
271
+
272
+ Version 1.1.1
273
+ ~~~~~~~~~~~~~
274
+
275
+ - Fixed segfault when the module is imported under mod_wsgi (issue #9).
276
+
277
+
278
+ Version 1.1
279
+ ~~~~~~~~~~~
280
+
281
+ - The module works correctly with Python 3.
282
+
283
+
284
+ Version 1.0.1
285
+ ~~~~~~~~~~~~~
286
+
287
+ - ``setproctitle()`` works even when Python messes up with argv, e.g. when run
288
+ with the -m option (issue #8).
289
+
290
+
291
+ Version 1.0
292
+ ~~~~~~~~~~~
293
+
294
+ No major change since the previous version. The module has been heavily used
295
+ in production environment without any problem reported, so it's time to declare
296
+ it stable.
297
+
298
+
299
+ Version 0.4
300
+ ~~~~~~~~~~~
301
+
302
+ - Module works on BSD (tested on FreeBSD 7.2).
303
+
304
+ - Module works on Windows. Many thanks to `Develer`_ for providing a neat `GCC
305
+ package for Windows with Python integration`__ that made the Windows porting
306
+ painless.
307
+
308
+ .. _Develer: http://www.develer.com/
309
+ .. __: http://www.develer.com/oss/GccWinBinaries
310
+
311
+
312
+ Version 0.3
313
+ ~~~~~~~~~~~
314
+
315
+ - Module works on Mac OS X 10.2. Reported working on OS X 10.6 too.
316
+
317
+
318
+ Version 0.2
319
+ ~~~~~~~~~~~
320
+
321
+ - Added ``prctl()`` call on Linux >= 2.6.9 to update ``/proc/self/status``.
322
+
323
+
324
+ Version 0.1
325
+ ~~~~~~~~~~~
326
+
327
+ - Initial public release.
.venv/lib/python3.11/site-packages/ray/thirdparty_files/setproctitle-1.2.2.dist-info/RECORD ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ setproctitle-1.2.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ setproctitle-1.2.2.dist-info/METADATA,sha256=FFhJMjb-VT6QB94FksWKoQkg-sZy52OTBXC_KG3_hf8,9051
3
+ setproctitle-1.2.2.dist-info/RECORD,,
4
+ setproctitle-1.2.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ setproctitle-1.2.2.dist-info/WHEEL,sha256=HDBMQ19ZtMFARwRc3ZGC21o8Tu11IBK6tQtXGgqOICs,104
6
+ setproctitle-1.2.2.dist-info/top_level.txt,sha256=khlu2SuNEK2Ct594XhMTT62ll8p0oFza-4dmzUFTBhQ,13
7
+ setproctitle.cpython-311-x86_64-linux-gnu.so,sha256=sOhOZH3jZQWd3CIHW_KKe8qLZZm-YPo86kISUM-MZkM,69120
.venv/lib/python3.11/site-packages/yaml/__init__.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from .error import *
3
+
4
+ from .tokens import *
5
+ from .events import *
6
+ from .nodes import *
7
+
8
+ from .loader import *
9
+ from .dumper import *
10
+
11
+ __version__ = '6.0.2'
12
+ try:
13
+ from .cyaml import *
14
+ __with_libyaml__ = True
15
+ except ImportError:
16
+ __with_libyaml__ = False
17
+
18
+ import io
19
+
20
+ #------------------------------------------------------------------------------
21
+ # XXX "Warnings control" is now deprecated. Leaving in the API function to not
22
+ # break code that uses it.
23
+ #------------------------------------------------------------------------------
24
+ def warnings(settings=None):
25
+ if settings is None:
26
+ return {}
27
+
28
+ #------------------------------------------------------------------------------
29
+ def scan(stream, Loader=Loader):
30
+ """
31
+ Scan a YAML stream and produce scanning tokens.
32
+ """
33
+ loader = Loader(stream)
34
+ try:
35
+ while loader.check_token():
36
+ yield loader.get_token()
37
+ finally:
38
+ loader.dispose()
39
+
40
+ def parse(stream, Loader=Loader):
41
+ """
42
+ Parse a YAML stream and produce parsing events.
43
+ """
44
+ loader = Loader(stream)
45
+ try:
46
+ while loader.check_event():
47
+ yield loader.get_event()
48
+ finally:
49
+ loader.dispose()
50
+
51
+ def compose(stream, Loader=Loader):
52
+ """
53
+ Parse the first YAML document in a stream
54
+ and produce the corresponding representation tree.
55
+ """
56
+ loader = Loader(stream)
57
+ try:
58
+ return loader.get_single_node()
59
+ finally:
60
+ loader.dispose()
61
+
62
+ def compose_all(stream, Loader=Loader):
63
+ """
64
+ Parse all YAML documents in a stream
65
+ and produce corresponding representation trees.
66
+ """
67
+ loader = Loader(stream)
68
+ try:
69
+ while loader.check_node():
70
+ yield loader.get_node()
71
+ finally:
72
+ loader.dispose()
73
+
74
+ def load(stream, Loader):
75
+ """
76
+ Parse the first YAML document in a stream
77
+ and produce the corresponding Python object.
78
+ """
79
+ loader = Loader(stream)
80
+ try:
81
+ return loader.get_single_data()
82
+ finally:
83
+ loader.dispose()
84
+
85
+ def load_all(stream, Loader):
86
+ """
87
+ Parse all YAML documents in a stream
88
+ and produce corresponding Python objects.
89
+ """
90
+ loader = Loader(stream)
91
+ try:
92
+ while loader.check_data():
93
+ yield loader.get_data()
94
+ finally:
95
+ loader.dispose()
96
+
97
+ def full_load(stream):
98
+ """
99
+ Parse the first YAML document in a stream
100
+ and produce the corresponding Python object.
101
+
102
+ Resolve all tags except those known to be
103
+ unsafe on untrusted input.
104
+ """
105
+ return load(stream, FullLoader)
106
+
107
+ def full_load_all(stream):
108
+ """
109
+ Parse all YAML documents in a stream
110
+ and produce corresponding Python objects.
111
+
112
+ Resolve all tags except those known to be
113
+ unsafe on untrusted input.
114
+ """
115
+ return load_all(stream, FullLoader)
116
+
117
+ def safe_load(stream):
118
+ """
119
+ Parse the first YAML document in a stream
120
+ and produce the corresponding Python object.
121
+
122
+ Resolve only basic YAML tags. This is known
123
+ to be safe for untrusted input.
124
+ """
125
+ return load(stream, SafeLoader)
126
+
127
+ def safe_load_all(stream):
128
+ """
129
+ Parse all YAML documents in a stream
130
+ and produce corresponding Python objects.
131
+
132
+ Resolve only basic YAML tags. This is known
133
+ to be safe for untrusted input.
134
+ """
135
+ return load_all(stream, SafeLoader)
136
+
137
+ def unsafe_load(stream):
138
+ """
139
+ Parse the first YAML document in a stream
140
+ and produce the corresponding Python object.
141
+
142
+ Resolve all tags, even those known to be
143
+ unsafe on untrusted input.
144
+ """
145
+ return load(stream, UnsafeLoader)
146
+
147
+ def unsafe_load_all(stream):
148
+ """
149
+ Parse all YAML documents in a stream
150
+ and produce corresponding Python objects.
151
+
152
+ Resolve all tags, even those known to be
153
+ unsafe on untrusted input.
154
+ """
155
+ return load_all(stream, UnsafeLoader)
156
+
157
+ def emit(events, stream=None, Dumper=Dumper,
158
+ canonical=None, indent=None, width=None,
159
+ allow_unicode=None, line_break=None):
160
+ """
161
+ Emit YAML parsing events into a stream.
162
+ If stream is None, return the produced string instead.
163
+ """
164
+ getvalue = None
165
+ if stream is None:
166
+ stream = io.StringIO()
167
+ getvalue = stream.getvalue
168
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
169
+ allow_unicode=allow_unicode, line_break=line_break)
170
+ try:
171
+ for event in events:
172
+ dumper.emit(event)
173
+ finally:
174
+ dumper.dispose()
175
+ if getvalue:
176
+ return getvalue()
177
+
178
+ def serialize_all(nodes, stream=None, Dumper=Dumper,
179
+ canonical=None, indent=None, width=None,
180
+ allow_unicode=None, line_break=None,
181
+ encoding=None, explicit_start=None, explicit_end=None,
182
+ version=None, tags=None):
183
+ """
184
+ Serialize a sequence of representation trees into a YAML stream.
185
+ If stream is None, return the produced string instead.
186
+ """
187
+ getvalue = None
188
+ if stream is None:
189
+ if encoding is None:
190
+ stream = io.StringIO()
191
+ else:
192
+ stream = io.BytesIO()
193
+ getvalue = stream.getvalue
194
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
195
+ allow_unicode=allow_unicode, line_break=line_break,
196
+ encoding=encoding, version=version, tags=tags,
197
+ explicit_start=explicit_start, explicit_end=explicit_end)
198
+ try:
199
+ dumper.open()
200
+ for node in nodes:
201
+ dumper.serialize(node)
202
+ dumper.close()
203
+ finally:
204
+ dumper.dispose()
205
+ if getvalue:
206
+ return getvalue()
207
+
208
+ def serialize(node, stream=None, Dumper=Dumper, **kwds):
209
+ """
210
+ Serialize a representation tree into a YAML stream.
211
+ If stream is None, return the produced string instead.
212
+ """
213
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
214
+
215
+ def dump_all(documents, stream=None, Dumper=Dumper,
216
+ default_style=None, default_flow_style=False,
217
+ canonical=None, indent=None, width=None,
218
+ allow_unicode=None, line_break=None,
219
+ encoding=None, explicit_start=None, explicit_end=None,
220
+ version=None, tags=None, sort_keys=True):
221
+ """
222
+ Serialize a sequence of Python objects into a YAML stream.
223
+ If stream is None, return the produced string instead.
224
+ """
225
+ getvalue = None
226
+ if stream is None:
227
+ if encoding is None:
228
+ stream = io.StringIO()
229
+ else:
230
+ stream = io.BytesIO()
231
+ getvalue = stream.getvalue
232
+ dumper = Dumper(stream, default_style=default_style,
233
+ default_flow_style=default_flow_style,
234
+ canonical=canonical, indent=indent, width=width,
235
+ allow_unicode=allow_unicode, line_break=line_break,
236
+ encoding=encoding, version=version, tags=tags,
237
+ explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
238
+ try:
239
+ dumper.open()
240
+ for data in documents:
241
+ dumper.represent(data)
242
+ dumper.close()
243
+ finally:
244
+ dumper.dispose()
245
+ if getvalue:
246
+ return getvalue()
247
+
248
+ def dump(data, stream=None, Dumper=Dumper, **kwds):
249
+ """
250
+ Serialize a Python object into a YAML stream.
251
+ If stream is None, return the produced string instead.
252
+ """
253
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
254
+
255
+ def safe_dump_all(documents, stream=None, **kwds):
256
+ """
257
+ Serialize a sequence of Python objects into a YAML stream.
258
+ Produce only basic YAML tags.
259
+ If stream is None, return the produced string instead.
260
+ """
261
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
262
+
263
+ def safe_dump(data, stream=None, **kwds):
264
+ """
265
+ Serialize a Python object into a YAML stream.
266
+ Produce only basic YAML tags.
267
+ If stream is None, return the produced string instead.
268
+ """
269
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
270
+
271
+ def add_implicit_resolver(tag, regexp, first=None,
272
+ Loader=None, Dumper=Dumper):
273
+ """
274
+ Add an implicit scalar detector.
275
+ If an implicit scalar value matches the given regexp,
276
+ the corresponding tag is assigned to the scalar.
277
+ first is a sequence of possible initial characters or None.
278
+ """
279
+ if Loader is None:
280
+ loader.Loader.add_implicit_resolver(tag, regexp, first)
281
+ loader.FullLoader.add_implicit_resolver(tag, regexp, first)
282
+ loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first)
283
+ else:
284
+ Loader.add_implicit_resolver(tag, regexp, first)
285
+ Dumper.add_implicit_resolver(tag, regexp, first)
286
+
287
+ def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper):
288
+ """
289
+ Add a path based resolver for the given tag.
290
+ A path is a list of keys that forms a path
291
+ to a node in the representation tree.
292
+ Keys can be string values, integers, or None.
293
+ """
294
+ if Loader is None:
295
+ loader.Loader.add_path_resolver(tag, path, kind)
296
+ loader.FullLoader.add_path_resolver(tag, path, kind)
297
+ loader.UnsafeLoader.add_path_resolver(tag, path, kind)
298
+ else:
299
+ Loader.add_path_resolver(tag, path, kind)
300
+ Dumper.add_path_resolver(tag, path, kind)
301
+
302
+ def add_constructor(tag, constructor, Loader=None):
303
+ """
304
+ Add a constructor for the given tag.
305
+ Constructor is a function that accepts a Loader instance
306
+ and a node object and produces the corresponding Python object.
307
+ """
308
+ if Loader is None:
309
+ loader.Loader.add_constructor(tag, constructor)
310
+ loader.FullLoader.add_constructor(tag, constructor)
311
+ loader.UnsafeLoader.add_constructor(tag, constructor)
312
+ else:
313
+ Loader.add_constructor(tag, constructor)
314
+
315
+ def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
316
+ """
317
+ Add a multi-constructor for the given tag prefix.
318
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
319
+ Multi-constructor accepts a Loader instance, a tag suffix,
320
+ and a node object and produces the corresponding Python object.
321
+ """
322
+ if Loader is None:
323
+ loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
324
+ loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
325
+ loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor)
326
+ else:
327
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
328
+
329
+ def add_representer(data_type, representer, Dumper=Dumper):
330
+ """
331
+ Add a representer for the given type.
332
+ Representer is a function accepting a Dumper instance
333
+ and an instance of the given data type
334
+ and producing the corresponding representation node.
335
+ """
336
+ Dumper.add_representer(data_type, representer)
337
+
338
+ def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
339
+ """
340
+ Add a representer for the given type.
341
+ Multi-representer is a function accepting a Dumper instance
342
+ and an instance of the given data type or subtype
343
+ and producing the corresponding representation node.
344
+ """
345
+ Dumper.add_multi_representer(data_type, multi_representer)
346
+
347
+ class YAMLObjectMetaclass(type):
348
+ """
349
+ The metaclass for YAMLObject.
350
+ """
351
+ def __init__(cls, name, bases, kwds):
352
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
353
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
354
+ if isinstance(cls.yaml_loader, list):
355
+ for loader in cls.yaml_loader:
356
+ loader.add_constructor(cls.yaml_tag, cls.from_yaml)
357
+ else:
358
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
359
+
360
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
361
+
362
+ class YAMLObject(metaclass=YAMLObjectMetaclass):
363
+ """
364
+ An object that can dump itself to a YAML stream
365
+ and load itself from a YAML stream.
366
+ """
367
+
368
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
369
+
370
+ yaml_loader = [Loader, FullLoader, UnsafeLoader]
371
+ yaml_dumper = Dumper
372
+
373
+ yaml_tag = None
374
+ yaml_flow_style = None
375
+
376
+ @classmethod
377
+ def from_yaml(cls, loader, node):
378
+ """
379
+ Convert a representation node to a Python object.
380
+ """
381
+ return loader.construct_yaml_object(node, cls)
382
+
383
+ @classmethod
384
+ def to_yaml(cls, dumper, data):
385
+ """
386
+ Convert a Python object to a representation node.
387
+ """
388
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
389
+ flow_style=cls.yaml_flow_style)
390
+
.venv/lib/python3.11/site-packages/yaml/__pycache__/composer.cpython-311.pyc ADDED
Binary file (7.06 kB). View file
 
.venv/lib/python3.11/site-packages/yaml/__pycache__/constructor.cpython-311.pyc ADDED
Binary file (38.7 kB). View file
 
.venv/lib/python3.11/site-packages/yaml/__pycache__/cyaml.cpython-311.pyc ADDED
Binary file (5.4 kB). View file
 
.venv/lib/python3.11/site-packages/yaml/__pycache__/dumper.cpython-311.pyc ADDED
Binary file (2.87 kB). View file