BryanW commited on
Commit
91a59f1
·
verified ·
1 Parent(s): 6ee4204

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/__pycache__/__config__.cpython-312.pyc +0 -0
  2. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/__pycache__/__init__.cpython-312.pyc +0 -0
  3. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/__pycache__/_distributor_init.cpython-312.pyc +0 -0
  4. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/__pycache__/conftest.cpython-312.pyc +0 -0
  5. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/__pycache__/version.cpython-312.pyc +0 -0
  6. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__init__.py +14 -0
  7. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_array_api.py +1036 -0
  8. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_array_api_compat_vendor.py +9 -0
  9. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_array_api_docs_tables.py +294 -0
  10. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_array_api_no_0d.py +103 -0
  11. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_array_api_override.py +150 -0
  12. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_bunch.py +229 -0
  13. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_ccallback.py +251 -0
  14. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_ccallback_c.cpython-312-x86_64-linux-gnu.so +0 -0
  15. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_disjoint_set.py +254 -0
  16. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_docscrape.py +761 -0
  17. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_elementwise_iterative_method.py +346 -0
  18. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_fpumode.cpython-312-x86_64-linux-gnu.so +0 -0
  19. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_gcutils.py +105 -0
  20. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_pep440.py +487 -0
  21. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_public_api.py +56 -0
  22. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_sparse.py +41 -0
  23. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_test_ccallback.cpython-312-x86_64-linux-gnu.so +0 -0
  24. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_test_deprecation_call.cpython-312-x86_64-linux-gnu.so +0 -0
  25. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_test_deprecation_def.cpython-312-x86_64-linux-gnu.so +0 -0
  26. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_testutils.py +373 -0
  27. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_tmpdirs.py +86 -0
  28. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_util.py +1251 -0
  29. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/deprecation.py +274 -0
  30. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/doccer.py +366 -0
  31. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/messagestream.cpython-312-x86_64-linux-gnu.so +0 -0
  32. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/uarray.py +31 -0
  33. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/cluster/__init__.py +31 -0
  34. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/cluster/hierarchy.py +0 -0
  35. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/cluster/vq.py +832 -0
  36. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/constants/__init__.py +358 -0
  37. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/constants/_codata.py +0 -0
  38. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/constants/_constants.py +369 -0
  39. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/constants/codata.py +21 -0
  40. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/constants/constants.py +53 -0
  41. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/__pycache__/__init__.cpython-312.pyc +0 -0
  42. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/__pycache__/_download_all.cpython-312.pyc +0 -0
  43. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-312.pyc +0 -0
  44. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/__pycache__/_registry.cpython-312.pyc +0 -0
  45. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/__pycache__/_utils.cpython-312.pyc +0 -0
  46. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/tests/__init__.py +0 -0
  47. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-312.pyc +0 -0
  48. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-312.pyc +0 -0
  49. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/tests/test_data.py +128 -0
  50. URSA/.venv_ursa/lib/python3.12/site-packages/scipy/differentiate/__pycache__/__init__.cpython-312.pyc +0 -0
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/__pycache__/__config__.cpython-312.pyc ADDED
Binary file (5.11 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (4.21 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/__pycache__/_distributor_init.cpython-312.pyc ADDED
Binary file (865 Bytes). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/__pycache__/conftest.cpython-312.pyc ADDED
Binary file (26 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/__pycache__/version.cpython-312.pyc ADDED
Binary file (572 Bytes). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module containing private utility functions
3
+ ===========================================
4
+
5
+ The ``scipy._lib`` namespace is empty (for now). Tests for all
6
+ utilities in submodules of ``_lib`` can be run with::
7
+
8
+ from scipy import _lib
9
+ _lib.test()
10
+
11
+ """
12
+ from scipy._lib._testutils import PytestTester
13
+ test = PytestTester(__name__)
14
+ del PytestTester
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_array_api.py ADDED
@@ -0,0 +1,1036 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility functions to use Python Array API compatible libraries.
2
+
3
+ For the context about the Array API see:
4
+ https://data-apis.org/array-api/latest/purpose_and_scope.html
5
+
6
+ The SciPy use case of the Array API is described on the following page:
7
+ https://data-apis.org/array-api/latest/use_cases.html#use-case-scipy
8
+ """
9
+ import operator
10
+ import dataclasses
11
+ import functools
12
+ import textwrap
13
+
14
+ from collections.abc import Generator
15
+ from contextlib import contextmanager
16
+ from contextvars import ContextVar
17
+ from types import ModuleType
18
+ from typing import Any, Literal, TypeAlias
19
+ from collections.abc import Iterable
20
+
21
+ import numpy as np
22
+ import numpy.typing as npt
23
+
24
+ from scipy._lib.array_api_compat import (
25
+ is_array_api_obj,
26
+ is_lazy_array,
27
+ is_numpy_array,
28
+ is_cupy_array,
29
+ is_torch_array,
30
+ is_jax_array,
31
+ is_dask_array,
32
+ size as xp_size,
33
+ numpy as np_compat,
34
+ device as xp_device,
35
+ is_numpy_namespace as is_numpy,
36
+ is_cupy_namespace as is_cupy,
37
+ is_torch_namespace as is_torch,
38
+ is_jax_namespace as is_jax,
39
+ is_dask_namespace as is_dask,
40
+ is_array_api_strict_namespace as is_array_api_strict,
41
+ )
42
+ from scipy._lib.array_api_compat.common._helpers import _compat_module_name
43
+ from scipy._lib.array_api_extra.testing import lazy_xp_function
44
+ from scipy._lib._array_api_override import (
45
+ array_namespace, SCIPY_ARRAY_API, SCIPY_DEVICE
46
+ )
47
+ from scipy._lib._docscrape import FunctionDoc
48
+ from scipy._lib import array_api_extra as xpx
49
+
50
+
51
+ __all__ = [
52
+ '_asarray', 'array_namespace', 'assert_almost_equal', 'assert_array_almost_equal',
53
+ 'default_xp', 'eager_warns', 'is_lazy_array', 'is_marray',
54
+ 'is_array_api_strict', 'is_complex', 'is_cupy', 'is_jax', 'is_numpy', 'is_torch',
55
+ 'np_compat', 'get_native_namespace_name',
56
+ 'SCIPY_ARRAY_API', 'SCIPY_DEVICE', 'scipy_namespace_for',
57
+ 'xp_assert_close', 'xp_assert_equal', 'xp_assert_less',
58
+ 'xp_copy', 'xp_device', 'xp_ravel', 'xp_size',
59
+ 'xp_unsupported_param_msg', 'xp_vector_norm', 'xp_capabilities',
60
+ 'xp_result_type', 'xp_promote',
61
+ 'make_xp_test_case', 'make_xp_pytest_marks', 'make_xp_pytest_param',
62
+ ]
63
+
64
+
65
+ Array: TypeAlias = Any # To be changed to a Protocol later (see array-api#589)
66
+ ArrayLike: TypeAlias = Array | npt.ArrayLike
67
+
68
+
69
+ def _check_finite(array: Array, xp: ModuleType) -> None:
70
+ """Check for NaNs or Infs."""
71
+ if not xp.all(xp.isfinite(array)):
72
+ msg = "array must not contain infs or NaNs"
73
+ raise ValueError(msg)
74
+
75
+ def _asarray(
76
+ array: ArrayLike,
77
+ dtype: Any = None,
78
+ order: Literal['K', 'A', 'C', 'F'] | None = None,
79
+ copy: bool | None = None,
80
+ *,
81
+ xp: ModuleType | None = None,
82
+ check_finite: bool = False,
83
+ subok: bool = False,
84
+ ) -> Array:
85
+ """SciPy-specific replacement for `np.asarray` with `order`, `check_finite`, and
86
+ `subok`.
87
+
88
+ Memory layout parameter `order` is not exposed in the Array API standard.
89
+ `order` is only enforced if the input array implementation
90
+ is NumPy based, otherwise `order` is just silently ignored.
91
+
92
+ `check_finite` is also not a keyword in the array API standard; included
93
+ here for convenience rather than that having to be a separate function
94
+ call inside SciPy functions.
95
+
96
+ `subok` is included to allow this function to preserve the behaviour of
97
+ `np.asanyarray` for NumPy based inputs.
98
+ """
99
+ if xp is None:
100
+ xp = array_namespace(array)
101
+ if is_numpy(xp):
102
+ # Use NumPy API to support order
103
+ if copy is True:
104
+ array = np.array(array, order=order, dtype=dtype, subok=subok)
105
+ elif subok:
106
+ array = np.asanyarray(array, order=order, dtype=dtype)
107
+ else:
108
+ array = np.asarray(array, order=order, dtype=dtype)
109
+ else:
110
+ try:
111
+ array = xp.asarray(array, dtype=dtype, copy=copy)
112
+ except TypeError:
113
+ coerced_xp = array_namespace(xp.asarray(3))
114
+ array = coerced_xp.asarray(array, dtype=dtype, copy=copy)
115
+
116
+ if check_finite:
117
+ _check_finite(array, xp)
118
+
119
+ return array
120
+
121
+
122
+ def xp_copy(x: Array, *, xp: ModuleType | None = None) -> Array:
123
+ """
124
+ Copies an array.
125
+
126
+ Parameters
127
+ ----------
128
+ x : array
129
+
130
+ xp : array_namespace
131
+
132
+ Returns
133
+ -------
134
+ copy : array
135
+ Copied array
136
+
137
+ Notes
138
+ -----
139
+ This copy function does not offer all the semantics of `np.copy`, i.e. the
140
+ `subok` and `order` keywords are not used.
141
+ """
142
+ # Note: for older NumPy versions, `np.asarray` did not support the `copy` kwarg,
143
+ # so this uses our other helper `_asarray`.
144
+ if xp is None:
145
+ xp = array_namespace(x)
146
+
147
+ return _asarray(x, copy=True, xp=xp)
148
+
149
+
150
+ def _xp_copy_to_numpy(x: Array) -> np.ndarray:
151
+ """Copies a possibly on device array to a NumPy array.
152
+
153
+ This function is intended only for converting alternative backend
154
+ arrays to numpy arrays within test code, to make it easier for use
155
+ of the alternative backend to be isolated only to the function being
156
+ tested. `_xp_copy_to_numpy` should NEVER be used except in test code
157
+ for the specific purpose mentioned above. In production code, attempts
158
+ to copy device arrays to NumPy arrays should fail, or else functions
159
+ may appear to be working on the GPU when they actually aren't.
160
+
161
+ Parameters
162
+ ----------
163
+ x : array
164
+
165
+ Returns
166
+ -------
167
+ ndarray
168
+ """
169
+ xp = array_namespace(x)
170
+ if is_numpy(xp):
171
+ return x.copy()
172
+ if is_cupy(xp):
173
+ return x.get()
174
+ if is_torch(xp):
175
+ return x.cpu().numpy()
176
+ if is_array_api_strict(xp):
177
+ # array api strict supports multiple devices, so need to
178
+ # ensure x is on the cpu before copying to NumPy.
179
+ return np.asarray(
180
+ xp.asarray(x, device=xp.Device("CPU_DEVICE")), copy=True
181
+ )
182
+ # Fall back to np.asarray. This works for dask.array. It
183
+ # currently works for jax.numpy, but hopefully JAX will make
184
+ # the transfer guard workable enough for use in scipy tests, in
185
+ # which case, JAX will have to be handled explicitly.
186
+ # If new backends are added, they may require explicit handling as
187
+ # well.
188
+ return np.asarray(x, copy=True)
189
+
190
+
191
+ _default_xp_ctxvar: ContextVar[ModuleType] = ContextVar("_default_xp")
192
+
193
+ @contextmanager
194
+ def default_xp(xp: ModuleType) -> Generator[None, None, None]:
195
+ """In all ``xp_assert_*`` and ``assert_*`` function calls executed within this
196
+ context manager, test by default that the array namespace is
197
+ the provided across all arrays, unless one explicitly passes the ``xp=``
198
+ parameter or ``check_namespace=False``.
199
+
200
+ Without this context manager, the default value for `xp` is the namespace
201
+ for the desired array (the second parameter of the tests).
202
+ """
203
+ token = _default_xp_ctxvar.set(xp)
204
+ try:
205
+ yield
206
+ finally:
207
+ _default_xp_ctxvar.reset(token)
208
+
209
+
210
+ def eager_warns(warning_type, *, match=None, xp):
211
+ """pytest.warns context manager if arrays of specified namespace are always eager.
212
+
213
+ Otherwise, context manager that *ignores* specified warning.
214
+ """
215
+ import pytest
216
+ from scipy._lib._util import ignore_warns
217
+ if is_numpy(xp) or is_array_api_strict(xp) or is_cupy(xp):
218
+ return pytest.warns(warning_type, match=match)
219
+ return ignore_warns(warning_type, match='' if match is None else match)
220
+
221
+
222
+ def _strict_check(actual, desired, xp, *,
223
+ check_namespace=True, check_dtype=True, check_shape=True,
224
+ check_0d=True):
225
+ __tracebackhide__ = True # Hide traceback for py.test
226
+
227
+ if xp is None:
228
+ try:
229
+ xp = _default_xp_ctxvar.get()
230
+ except LookupError:
231
+ xp = array_namespace(desired)
232
+
233
+ if check_namespace:
234
+ _assert_matching_namespace(actual, desired, xp)
235
+
236
+ # only NumPy distinguishes between scalars and arrays; we do if check_0d=True.
237
+ # do this first so we can then cast to array (and thus use the array API) below.
238
+ if is_numpy(xp) and check_0d:
239
+ _msg = ("Array-ness does not match:\n Actual: "
240
+ f"{type(actual)}\n Desired: {type(desired)}")
241
+ assert ((xp.isscalar(actual) and xp.isscalar(desired))
242
+ or (not xp.isscalar(actual) and not xp.isscalar(desired))), _msg
243
+
244
+ actual = xp.asarray(actual)
245
+ desired = xp.asarray(desired)
246
+
247
+ if check_dtype:
248
+ _msg = f"dtypes do not match.\nActual: {actual.dtype}\nDesired: {desired.dtype}"
249
+ assert actual.dtype == desired.dtype, _msg
250
+
251
+ if check_shape:
252
+ if is_dask(xp):
253
+ actual.compute_chunk_sizes()
254
+ desired.compute_chunk_sizes()
255
+ _msg = f"Shapes do not match.\nActual: {actual.shape}\nDesired: {desired.shape}"
256
+ assert actual.shape == desired.shape, _msg
257
+
258
+ desired = xp.broadcast_to(desired, actual.shape)
259
+ return actual, desired, xp
260
+
261
+
262
+ def _assert_matching_namespace(actual, desired, xp):
263
+ __tracebackhide__ = True # Hide traceback for py.test
264
+
265
+ desired_arr_space = array_namespace(desired)
266
+ _msg = ("Namespace of desired array does not match expectations "
267
+ "set by the `default_xp` context manager or by the `xp`"
268
+ "pytest fixture.\n"
269
+ f"Desired array's space: {desired_arr_space.__name__}\n"
270
+ f"Expected namespace: {xp.__name__}")
271
+ assert desired_arr_space == xp, _msg
272
+
273
+ actual_arr_space = array_namespace(actual)
274
+ _msg = ("Namespace of actual and desired arrays do not match.\n"
275
+ f"Actual: {actual_arr_space.__name__}\n"
276
+ f"Desired: {xp.__name__}")
277
+ assert actual_arr_space == xp, _msg
278
+
279
+
280
+ def xp_assert_equal(actual, desired, *, check_namespace=True, check_dtype=True,
281
+ check_shape=True, check_0d=True, err_msg='', xp=None):
282
+ __tracebackhide__ = True # Hide traceback for py.test
283
+
284
+ actual, desired, xp = _strict_check(
285
+ actual, desired, xp, check_namespace=check_namespace,
286
+ check_dtype=check_dtype, check_shape=check_shape,
287
+ check_0d=check_0d
288
+ )
289
+
290
+ if is_cupy(xp):
291
+ return xp.testing.assert_array_equal(actual, desired, err_msg=err_msg)
292
+ elif is_torch(xp):
293
+ # PyTorch recommends using `rtol=0, atol=0` like this
294
+ # to test for exact equality
295
+ err_msg = None if err_msg == '' else err_msg
296
+ return xp.testing.assert_close(actual, desired, rtol=0, atol=0, equal_nan=True,
297
+ check_dtype=False, msg=err_msg)
298
+ # JAX uses `np.testing`
299
+ return np.testing.assert_array_equal(actual, desired, err_msg=err_msg)
300
+
301
+
302
+ def xp_assert_close(actual, desired, *, rtol=None, atol=0, check_namespace=True,
303
+ check_dtype=True, check_shape=True, check_0d=True,
304
+ err_msg='', xp=None):
305
+ __tracebackhide__ = True # Hide traceback for py.test
306
+
307
+ actual, desired, xp = _strict_check(
308
+ actual, desired, xp,
309
+ check_namespace=check_namespace, check_dtype=check_dtype,
310
+ check_shape=check_shape, check_0d=check_0d
311
+ )
312
+
313
+ floating = xp.isdtype(actual.dtype, ('real floating', 'complex floating'))
314
+ if rtol is None and floating:
315
+ # multiplier of 4 is used as for `np.float64` this puts the default `rtol`
316
+ # roughly half way between sqrt(eps) and the default for
317
+ # `numpy.testing.assert_allclose`, 1e-7
318
+ rtol = xp.finfo(actual.dtype).eps**0.5 * 4
319
+ elif rtol is None:
320
+ rtol = 1e-7
321
+
322
+ if is_cupy(xp):
323
+ return xp.testing.assert_allclose(actual, desired, rtol=rtol,
324
+ atol=atol, err_msg=err_msg)
325
+ elif is_torch(xp):
326
+ err_msg = None if err_msg == '' else err_msg
327
+ return xp.testing.assert_close(actual, desired, rtol=rtol, atol=atol,
328
+ equal_nan=True, check_dtype=False, msg=err_msg)
329
+ # JAX uses `np.testing`
330
+ return np.testing.assert_allclose(actual, desired, rtol=rtol,
331
+ atol=atol, err_msg=err_msg)
332
+
333
+
334
+ def xp_assert_close_nulp(actual, desired, *, nulp=1, check_namespace=True,
335
+ check_dtype=True, check_shape=True, check_0d=True,
336
+ err_msg='', xp=None):
337
+ __tracebackhide__ = True # Hide traceback for py.test
338
+
339
+ actual, desired, xp = _strict_check(
340
+ actual, desired, xp,
341
+ check_namespace=check_namespace, check_dtype=check_dtype,
342
+ check_shape=check_shape, check_0d=check_0d
343
+ )
344
+
345
+ actual, desired = map(_xp_copy_to_numpy, (actual, desired))
346
+ return np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=nulp)
347
+
348
+
349
+ def xp_assert_less(actual, desired, *, check_namespace=True, check_dtype=True,
350
+ check_shape=True, check_0d=True, err_msg='', verbose=True, xp=None):
351
+ __tracebackhide__ = True # Hide traceback for py.test
352
+
353
+ actual, desired, xp = _strict_check(
354
+ actual, desired, xp, check_namespace=check_namespace,
355
+ check_dtype=check_dtype, check_shape=check_shape,
356
+ check_0d=check_0d
357
+ )
358
+
359
+ if is_cupy(xp):
360
+ return xp.testing.assert_array_less(actual, desired,
361
+ err_msg=err_msg, verbose=verbose)
362
+ elif is_torch(xp):
363
+ if actual.device.type != 'cpu':
364
+ actual = actual.cpu()
365
+ if desired.device.type != 'cpu':
366
+ desired = desired.cpu()
367
+ # JAX uses `np.testing`
368
+ return np.testing.assert_array_less(actual, desired,
369
+ err_msg=err_msg, verbose=verbose)
370
+
371
+
372
+ def assert_array_almost_equal(actual, desired, decimal=6, *args, **kwds):
373
+ """Backwards compatible replacement. In new code, use xp_assert_close instead.
374
+ """
375
+ rtol, atol = 0, 1.5*10**(-decimal)
376
+ return xp_assert_close(actual, desired,
377
+ atol=atol, rtol=rtol, check_dtype=False, check_shape=False,
378
+ *args, **kwds)
379
+
380
+
381
+ def assert_almost_equal(actual, desired, decimal=7, *args, **kwds):
382
+ """Backwards compatible replacement. In new code, use xp_assert_close instead.
383
+ """
384
+ rtol, atol = 0, 1.5*10**(-decimal)
385
+ return xp_assert_close(actual, desired,
386
+ atol=atol, rtol=rtol, check_dtype=False, check_shape=False,
387
+ *args, **kwds)
388
+
389
+
390
+ def xp_unsupported_param_msg(param: Any) -> str:
391
+ return f'Providing {param!r} is only supported for numpy arrays.'
392
+
393
+
394
+ def is_complex(x: Array, xp: ModuleType) -> bool:
395
+ return xp.isdtype(x.dtype, 'complex floating')
396
+
397
+
398
+ def get_native_namespace_name(xp: ModuleType) -> str:
399
+ """Return name for native namespace (without array_api_compat prefix)."""
400
+ name = xp.__name__
401
+ return name.removeprefix(f"{_compat_module_name()}.")
402
+
403
+
404
+ def scipy_namespace_for(xp: ModuleType) -> ModuleType | None:
405
+ """Return the `scipy`-like namespace of a non-NumPy backend
406
+
407
+ That is, return the namespace corresponding with backend `xp` that contains
408
+ `scipy` sub-namespaces like `linalg` and `special`. If no such namespace
409
+ exists, return ``None``. Useful for dispatching.
410
+ """
411
+
412
+ if is_cupy(xp):
413
+ import cupyx # type: ignore[import-not-found,import-untyped]
414
+ return cupyx.scipy
415
+
416
+ if is_jax(xp):
417
+ import jax # type: ignore[import-not-found]
418
+ return jax.scipy
419
+
420
+ if is_torch(xp):
421
+ return xp
422
+
423
+ return None
424
+
425
+
426
+ # maybe use `scipy.linalg` if/when array API support is added
427
+ def xp_vector_norm(x: Array, /, *,
428
+ axis: int | tuple[int] | None = None,
429
+ keepdims: bool = False,
430
+ ord: int | float = 2,
431
+ xp: ModuleType | None = None) -> Array:
432
+ xp = array_namespace(x) if xp is None else xp
433
+
434
+ if SCIPY_ARRAY_API:
435
+ # check for optional `linalg` extension
436
+ if hasattr(xp, 'linalg'):
437
+ return xp.linalg.vector_norm(x, axis=axis, keepdims=keepdims, ord=ord)
438
+ else:
439
+ if ord != 2:
440
+ raise ValueError(
441
+ "only the Euclidean norm (`ord=2`) is currently supported in "
442
+ "`xp_vector_norm` for backends not implementing the `linalg` "
443
+ "extension."
444
+ )
445
+ # return (x @ x)**0.5
446
+ # or to get the right behavior with nd, complex arrays
447
+ return xp.sum(xp.conj(x) * x, axis=axis, keepdims=keepdims)**0.5
448
+ else:
449
+ # to maintain backwards compatibility
450
+ return np.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
451
+
452
+
453
+ def xp_ravel(x: Array, /, *, xp: ModuleType | None = None) -> Array:
454
+ # Equivalent of np.ravel written in terms of array API
455
+ # Even though it's one line, it comes up so often that it's worth having
456
+ # this function for readability
457
+ xp = array_namespace(x) if xp is None else xp
458
+ return xp.reshape(x, (-1,))
459
+
460
+
461
+ def xp_swapaxes(a, axis1, axis2, xp=None):
462
+ # Equivalent of np.swapaxes written in terms of array API
463
+ xp = array_namespace(a) if xp is None else xp
464
+ axes = list(range(a.ndim))
465
+ axes[axis1], axes[axis2] = axes[axis2], axes[axis1]
466
+ a = xp.permute_dims(a, axes)
467
+ return a
468
+
469
+
470
+ # utility to find common dtype with option to force floating
471
+ def xp_result_type(*args, force_floating=False, xp):
472
+ """
473
+ Returns the dtype that results from applying type promotion rules
474
+ (see Array API Standard Type Promotion Rules) to the arguments. Augments
475
+ standard `result_type` in a few ways:
476
+
477
+ - There is a `force_floating` argument that ensures that the result type
478
+ is floating point, even when all args are integer.
479
+ - When a TypeError is raised (e.g. due to an unsupported promotion)
480
+ and `force_floating=True`, we define a custom rule: use the result type
481
+ of the default float and any other floats passed. See
482
+ https://github.com/scipy/scipy/pull/22695/files#r1997905891
483
+ for rationale.
484
+ - This function accepts array-like iterables, which are immediately converted
485
+ to the namespace's arrays before result type calculation. Consequently, the
486
+ result dtype may be different when an argument is `1.` vs `[1.]`.
487
+
488
+ Typically, this function will be called shortly after `array_namespace`
489
+ on a subset of the arguments passed to `array_namespace`.
490
+ """
491
+ # prevent double conversion of iterable to array
492
+ # avoid `np.iterable` for torch arrays due to pytorch/pytorch#143334
493
+ # don't use `array_api_compat.is_array_api_obj` as it returns True for NumPy scalars
494
+ args = [(_asarray(arg, subok=True, xp=xp) if is_torch_array(arg) or np.iterable(arg)
495
+ else arg) for arg in args]
496
+ args_not_none = [arg for arg in args if arg is not None]
497
+ if force_floating:
498
+ args_not_none.append(1.0)
499
+
500
+ if is_numpy(xp) and xp.__version__ < '2.0':
501
+ # Follow NEP 50 promotion rules anyway
502
+ args_not_none = [arg.dtype if getattr(arg, 'size', 0) == 1 else arg
503
+ for arg in args_not_none]
504
+ return xp.result_type(*args_not_none)
505
+
506
+ try: # follow library's preferred promotion rules
507
+ return xp.result_type(*args_not_none)
508
+ except TypeError: # mixed type promotion isn't defined
509
+ if not force_floating:
510
+ raise
511
+ # use `result_type` of default floating point type and any floats present
512
+ # This can be revisited, but right now, the only backends that get here
513
+ # are array-api-strict (which is not for production use) and PyTorch
514
+ # (due to data-apis/array-api-compat#279).
515
+ float_args = []
516
+ for arg in args_not_none:
517
+ arg_array = xp.asarray(arg) if np.isscalar(arg) else arg
518
+ dtype = getattr(arg_array, 'dtype', arg)
519
+ if xp.isdtype(dtype, ('real floating', 'complex floating')):
520
+ float_args.append(arg)
521
+ return xp.result_type(*float_args, xp_default_dtype(xp))
522
+
523
+
524
+ def xp_promote(*args, broadcast=False, force_floating=False, xp):
525
+ """
526
+ Promotes elements of *args to result dtype, ignoring `None`s.
527
+ Includes options for forcing promotion to floating point and
528
+ broadcasting the arrays, again ignoring `None`s.
529
+ Type promotion rules follow `xp_result_type` instead of `xp.result_type`.
530
+
531
+ Typically, this function will be called shortly after `array_namespace`
532
+ on a subset of the arguments passed to `array_namespace`.
533
+
534
+ This function accepts array-like iterables, which are immediately converted
535
+ to the namespace's arrays before result type calculation. Consequently, the
536
+ result dtype may be different when an argument is `1.` vs `[1.]`.
537
+
538
+ See Also
539
+ --------
540
+ xp_result_type
541
+ """
542
+ if not args:
543
+ return args
544
+
545
+ # prevent double conversion of iterable to array
546
+ # avoid `np.iterable` for torch arrays due to pytorch/pytorch#143334
547
+ # don't use `array_api_compat.is_array_api_obj` as it returns True for NumPy scalars
548
+ args = [(_asarray(arg, subok=True, xp=xp) if is_torch_array(arg) or np.iterable(arg)
549
+ else arg) for arg in args]
550
+
551
+ dtype = xp_result_type(*args, force_floating=force_floating, xp=xp)
552
+
553
+ args = [(_asarray(arg, dtype=dtype, subok=True, xp=xp) if arg is not None else arg)
554
+ for arg in args]
555
+
556
+ if not broadcast:
557
+ return args[0] if len(args)==1 else tuple(args)
558
+
559
+ args_not_none = [arg for arg in args if arg is not None]
560
+
561
+ # determine result shape
562
+ shapes = {arg.shape for arg in args_not_none}
563
+ try:
564
+ shape = (np.broadcast_shapes(*shapes) if len(shapes) != 1
565
+ else args_not_none[0].shape)
566
+ except ValueError as e:
567
+ message = "Array shapes are incompatible for broadcasting."
568
+ raise ValueError(message) from e
569
+
570
+ out = []
571
+ for arg in args:
572
+ if arg is None:
573
+ out.append(arg)
574
+ continue
575
+
576
+ # broadcast only if needed
577
+ # Even if two arguments need broadcasting, this is faster than
578
+ # `broadcast_arrays`, especially since we've already determined `shape`
579
+ if arg.shape != shape:
580
+ kwargs = {'subok': True} if is_numpy(xp) else {}
581
+ arg = xp.broadcast_to(arg, shape, **kwargs)
582
+
583
+ # This is much faster than xp.astype(arg, dtype, copy=False)
584
+ if arg.dtype != dtype:
585
+ arg = xp.astype(arg, dtype)
586
+
587
+ out.append(arg)
588
+
589
+ return out[0] if len(out)==1 else tuple(out)
590
+
591
+
592
+ def xp_float_to_complex(arr: Array, xp: ModuleType | None = None) -> Array:
593
+ xp = array_namespace(arr) if xp is None else xp
594
+ arr_dtype = arr.dtype
595
+ # The standard float dtypes are float32 and float64.
596
+ # Convert float32 to complex64,
597
+ # and float64 (and non-standard real dtypes) to complex128
598
+ if xp.isdtype(arr_dtype, xp.float32):
599
+ arr = xp.astype(arr, xp.complex64)
600
+ elif xp.isdtype(arr_dtype, 'real floating'):
601
+ arr = xp.astype(arr, xp.complex128)
602
+
603
+ return arr
604
+
605
+
606
+ def xp_default_dtype(xp):
607
+ """Query the namespace-dependent default floating-point dtype.
608
+ """
609
+ if is_torch(xp):
610
+ # historically, we allow pytorch to keep its default of float32
611
+ return xp.get_default_dtype()
612
+ else:
613
+ # we default to float64
614
+ return xp.float64
615
+
616
+
617
+ ### MArray Helpers ###
618
+ def xp_result_device(*args):
619
+ """Return the device of an array in `args`, for the purpose of
620
+ input-output device propagation.
621
+ If there are multiple devices, return an arbitrary one.
622
+ If there are no arrays, return None (this typically happens only on NumPy).
623
+ """
624
+ for arg in args:
625
+ # Do not do a duck-type test for the .device attribute, as many backends today
626
+ # don't have it yet. See workarouunds in array_api_compat.device().
627
+ if is_array_api_obj(arg):
628
+ return xp_device(arg)
629
+ return None
630
+
631
+
632
+ # np.r_ replacement
633
+ def concat_1d(xp: ModuleType | None, *arrays: Iterable[ArrayLike]) -> Array:
634
+ """A replacement for `np.r_` as `xp.concat` does not accept python scalars
635
+ or 0-D arrays.
636
+ """
637
+ arys = [xpx.atleast_nd(xp.asarray(a), ndim=1, xp=xp) for a in arrays]
638
+ return xp.concat(arys)
639
+
640
+
641
+ def is_marray(xp):
642
+ """Returns True if `xp` is an MArray namespace; False otherwise."""
643
+ return "marray" in xp.__name__
644
+
645
+
646
+ def _length_nonmasked(x, axis, keepdims=False, xp=None):
647
+ xp = array_namespace(x) if xp is None else xp
648
+ if is_marray(xp):
649
+ if np.iterable(axis):
650
+ message = '`axis` must be an integer or None for use with `MArray`.'
651
+ raise NotImplementedError(message)
652
+ return xp.astype(xp.count(x, axis=axis, keepdims=keepdims), x.dtype)
653
+ return (xp_size(x) if axis is None else
654
+ # compact way to deal with axis tuples or ints
655
+ int(np.prod(np.asarray(x.shape)[np.asarray(axis)])))
656
+
657
+
658
+ def _share_masks(*args, xp):
659
+ if is_marray(xp):
660
+ mask = functools.reduce(operator.or_, (arg.mask for arg in args))
661
+ args = [xp.asarray(arg.data, mask=mask) for arg in args]
662
+ return args[0] if len(args) == 1 else args
663
+
664
+ ### End MArray Helpers ###
665
+
666
+
667
+ @dataclasses.dataclass(repr=False)
668
+ class _XPSphinxCapability:
669
+ cpu: bool | None # None if not applicable
670
+ gpu: bool | None
671
+ warnings: list[str] = dataclasses.field(default_factory=list)
672
+
673
+ def _render(self, value):
674
+ if value is None:
675
+ return "n/a"
676
+ if not value:
677
+ return "⛔"
678
+ if self.warnings:
679
+ res = "⚠️ " + '; '.join(self.warnings)
680
+ assert len(res) <= 20, "Warnings too long"
681
+ return res
682
+ return "✅"
683
+
684
+ def __str__(self):
685
+ cpu = self._render(self.cpu)
686
+ gpu = self._render(self.gpu)
687
+ return f"{cpu:20} {gpu:20}"
688
+
689
+
690
+ def _make_sphinx_capabilities(
691
+ # lists of tuples [(module name, reason), ...]
692
+ skip_backends=(), xfail_backends=(),
693
+ # @pytest.mark.skip/xfail_xp_backends kwargs
694
+ cpu_only=False, np_only=False, out_of_scope=False, exceptions=(),
695
+ # xpx.lazy_xp_backends kwargs
696
+ allow_dask_compute=False, jax_jit=True,
697
+ # list of tuples [(module name, reason), ...]
698
+ warnings = (),
699
+ # unused in documentation
700
+ reason=None,
701
+ ):
702
+ if out_of_scope:
703
+ return {"out_of_scope": True}
704
+
705
+ exceptions = set(exceptions)
706
+
707
+ # Default capabilities
708
+ capabilities = {
709
+ "numpy": _XPSphinxCapability(cpu=True, gpu=None),
710
+ "array_api_strict": _XPSphinxCapability(cpu=True, gpu=None),
711
+ "cupy": _XPSphinxCapability(cpu=None, gpu=True),
712
+ "torch": _XPSphinxCapability(cpu=True, gpu=True),
713
+ "jax.numpy": _XPSphinxCapability(cpu=True, gpu=True,
714
+ warnings=[] if jax_jit else ["no JIT"]),
715
+ # Note: Dask+CuPy is currently untested and unsupported
716
+ "dask.array": _XPSphinxCapability(cpu=True, gpu=None,
717
+ warnings=["computes graph"] if allow_dask_compute else []),
718
+ }
719
+
720
+ # documentation doesn't display the reason
721
+ for module, _ in list(skip_backends) + list(xfail_backends):
722
+ backend = capabilities[module]
723
+ if backend.cpu is not None:
724
+ backend.cpu = False
725
+ if backend.gpu is not None:
726
+ backend.gpu = False
727
+
728
+ for module, backend in capabilities.items():
729
+ if np_only and module not in exceptions | {"numpy"}:
730
+ if backend.cpu is not None:
731
+ backend.cpu = False
732
+ if backend.gpu is not None:
733
+ backend.gpu = False
734
+ elif cpu_only and module not in exceptions and backend.gpu is not None:
735
+ backend.gpu = False
736
+
737
+ for module, warning in warnings:
738
+ backend = capabilities[module]
739
+ backend.warnings.append(warning)
740
+
741
+ return capabilities
742
+
743
+
744
+ def _make_capabilities_note(fun_name, capabilities, extra_note=None):
745
+ if "out_of_scope" in capabilities:
746
+ # It will be better to link to a section of the dev-arrayapi docs
747
+ # that explains what is and isn't in-scope, but such a section
748
+ # doesn't exist yet. Using :ref:`dev-arrayapi` as a placeholder.
749
+ note = f"""
750
+ **Array API Standard Support**
751
+
752
+ `{fun_name}` is not in-scope for support of Python Array API Standard compatible
753
+ backends other than NumPy.
754
+
755
+ See :ref:`dev-arrayapi` for more information.
756
+ """
757
+ return textwrap.dedent(note)
758
+
759
+ # Note: deliberately not documenting array-api-strict
760
+ note = f"""
761
+ **Array API Standard Support**
762
+
763
+ `{fun_name}` has experimental support for Python Array API Standard compatible
764
+ backends in addition to NumPy. Please consider testing these features
765
+ by setting an environment variable ``SCIPY_ARRAY_API=1`` and providing
766
+ CuPy, PyTorch, JAX, or Dask arrays as array arguments. The following
767
+ combinations of backend and device (or other capability) are supported.
768
+
769
+ ==================== ==================== ====================
770
+ Library CPU GPU
771
+ ==================== ==================== ====================
772
+ NumPy {capabilities['numpy'] }
773
+ CuPy {capabilities['cupy'] }
774
+ PyTorch {capabilities['torch'] }
775
+ JAX {capabilities['jax.numpy'] }
776
+ Dask {capabilities['dask.array'] }
777
+ ==================== ==================== ====================
778
+
779
+ """ + (extra_note or "") + " See :ref:`dev-arrayapi` for more information."
780
+
781
+ return textwrap.dedent(note)
782
+
783
+
784
+ def xp_capabilities(
785
+ *,
786
+ # Alternative capabilities table.
787
+ # Used only for testing this decorator.
788
+ capabilities_table=None,
789
+ # Generate pytest.mark.skip/xfail_xp_backends.
790
+ # See documentation in conftest.py.
791
+ # lists of tuples [(module name, reason), ...]
792
+ skip_backends=(), xfail_backends=(),
793
+ cpu_only=False, np_only=False, reason=None,
794
+ out_of_scope=False, exceptions=(),
795
+ # lists of tuples [(module name, reason), ...]
796
+ warnings=(),
797
+ # xpx.testing.lazy_xp_function kwargs.
798
+ # Refer to array-api-extra documentation.
799
+ allow_dask_compute=False, jax_jit=True,
800
+ # Extra note to inject into the docstring
801
+ extra_note=None,
802
+ ):
803
+ """Decorator for a function that states its support among various
804
+ Array API compatible backends.
805
+
806
+ This decorator has two effects:
807
+ 1. It allows tagging tests with ``@make_xp_test_case`` or
808
+ ``make_xp_pytest_param`` (see below) to automatically generate
809
+ SKIP/XFAIL markers and perform additional backend-specific
810
+ testing, such as extra validation for Dask and JAX;
811
+ 2. It automatically adds a note to the function's docstring, containing
812
+ a table matching what has been tested.
813
+
814
+ See Also
815
+ --------
816
+ make_xp_test_case
817
+ make_xp_pytest_param
818
+ array_api_extra.testing.lazy_xp_function
819
+ """
820
+ capabilities_table = (xp_capabilities_table if capabilities_table is None
821
+ else capabilities_table)
822
+
823
+ if out_of_scope:
824
+ np_only = True
825
+
826
+ capabilities = dict(
827
+ skip_backends=skip_backends,
828
+ xfail_backends=xfail_backends,
829
+ cpu_only=cpu_only,
830
+ np_only=np_only,
831
+ out_of_scope=out_of_scope,
832
+ reason=reason,
833
+ exceptions=exceptions,
834
+ allow_dask_compute=allow_dask_compute,
835
+ jax_jit=jax_jit,
836
+ warnings=warnings,
837
+ )
838
+ sphinx_capabilities = _make_sphinx_capabilities(**capabilities)
839
+
840
+ def decorator(f):
841
+ # Don't use a wrapper, as in some cases @xp_capabilities is
842
+ # applied to a ufunc
843
+ capabilities_table[f] = capabilities
844
+ note = _make_capabilities_note(f.__name__, sphinx_capabilities, extra_note)
845
+ doc = FunctionDoc(f)
846
+ doc['Notes'].append(note)
847
+ doc = str(doc).split("\n", 1)[1].lstrip(" \n") # remove signature
848
+ try:
849
+ f.__doc__ = doc
850
+ except AttributeError:
851
+ # Can't update __doc__ on ufuncs if SciPy
852
+ # was compiled against NumPy < 2.2.
853
+ pass
854
+
855
+ return f
856
+ return decorator
857
+
858
+
859
+ def make_xp_test_case(*funcs, capabilities_table=None):
860
+ capabilities_table = (xp_capabilities_table if capabilities_table is None
861
+ else capabilities_table)
862
+ """Generate pytest decorator for a test function that tests functionality
863
+ of one or more Array API compatible functions.
864
+
865
+ Read the parameters of the ``@xp_capabilities`` decorator applied to the
866
+ listed functions and:
867
+
868
+ - Generate the ``@pytest.mark.skip_xp_backends`` and
869
+ ``@pytest.mark.xfail_xp_backends`` decorators
870
+ for the decorated test function
871
+ - Tag the function with `xpx.testing.lazy_xp_function`
872
+
873
+ Example::
874
+
875
+ @make_xp_test_case(f1)
876
+ def test_f1(xp):
877
+ ...
878
+
879
+ @make_xp_test_case(f2)
880
+ def test_f2(xp):
881
+ ...
882
+
883
+ @make_xp_test_case(f1, f2)
884
+ def test_f1_and_f2(xp):
885
+ ...
886
+
887
+ The above is equivalent to::
888
+ @pytest.mark.skip_xp_backends(...)
889
+ @pytest.mark.skip_xp_backends(...)
890
+ @pytest.mark.xfail_xp_backends(...)
891
+ @pytest.mark.xfail_xp_backends(...)
892
+ def test_f1(xp):
893
+ ...
894
+
895
+ etc., where the arguments of ``skip_xp_backends`` and ``xfail_xp_backends`` are
896
+ determined by the ``@xp_capabilities`` decorator applied to the functions.
897
+
898
+ See Also
899
+ --------
900
+ xp_capabilities
901
+ make_xp_pytest_marks
902
+ make_xp_pytest_param
903
+ array_api_extra.testing.lazy_xp_function
904
+ """
905
+ marks = make_xp_pytest_marks(*funcs, capabilities_table=capabilities_table)
906
+ return lambda func: functools.reduce(lambda f, g: g(f), marks, func)
907
+
908
+
909
+ def make_xp_pytest_param(func, *args, capabilities_table=None):
910
+ """Variant of ``make_xp_test_case`` that returns a pytest.param for a function,
911
+ with all necessary skip_xp_backends and xfail_xp_backends marks applied::
912
+
913
+ @pytest.mark.parametrize(
914
+ "func", [make_xp_pytest_param(f1), make_xp_pytest_param(f2)]
915
+ )
916
+ def test(func, xp):
917
+ ...
918
+
919
+ The above is equivalent to::
920
+
921
+ @pytest.mark.parametrize(
922
+ "func", [
923
+ pytest.param(f1, marks=[
924
+ pytest.mark.skip_xp_backends(...),
925
+ pytest.mark.xfail_xp_backends(...), ...]),
926
+ pytest.param(f2, marks=[
927
+ pytest.mark.skip_xp_backends(...),
928
+ pytest.mark.xfail_xp_backends(...), ...]),
929
+ )
930
+ def test(func, xp):
931
+ ...
932
+
933
+ Parameters
934
+ ----------
935
+ func : Callable
936
+ Function to be tested. It must be decorated with ``@xp_capabilities``.
937
+ *args : Any, optional
938
+ Extra pytest parameters for the use case, e.g.::
939
+
940
+ @pytest.mark.parametrize("func,verb", [
941
+ make_xp_pytest_param(f1, "hello"),
942
+ make_xp_pytest_param(f2, "world")])
943
+ def test(func, verb, xp):
944
+ # iterates on (func=f1, verb="hello")
945
+ # and (func=f2, verb="world")
946
+
947
+ See Also
948
+ --------
949
+ xp_capabilities
950
+ make_xp_test_case
951
+ make_xp_pytest_marks
952
+ array_api_extra.testing.lazy_xp_function
953
+ """
954
+ import pytest
955
+
956
+ marks = make_xp_pytest_marks(func, capabilities_table=capabilities_table)
957
+ return pytest.param(func, *args, marks=marks, id=func.__name__)
958
+
959
+
960
+ def make_xp_pytest_marks(*funcs, capabilities_table=None):
961
+ """Variant of ``make_xp_test_case`` that returns a list of pytest marks,
962
+ which can be used with the module-level `pytestmark = ...` variable::
963
+
964
+ pytestmark = make_xp_pytest_marks(f1, f2)
965
+
966
+ def test(xp):
967
+ ...
968
+
969
+ In this example, the whole test module is dedicated to testing `f1` or `f2`,
970
+ and the two functions have the same capabilities, so it's unnecessary to
971
+ cherry-pick which test tests which function.
972
+ The above is equivalent to::
973
+
974
+ pytestmark = [
975
+ pytest.mark.skip_xp_backends(...),
976
+ pytest.mark.xfail_xp_backends(...), ...]),
977
+ ]
978
+
979
+ def test(xp):
980
+ ...
981
+
982
+ See Also
983
+ --------
984
+ xp_capabilities
985
+ make_xp_test_case
986
+ make_xp_pytest_param
987
+ array_api_extra.testing.lazy_xp_function
988
+ """
989
+ capabilities_table = (xp_capabilities_table if capabilities_table is None
990
+ else capabilities_table)
991
+ import pytest
992
+
993
+ marks = []
994
+ for func in funcs:
995
+ capabilities = capabilities_table[func]
996
+ exceptions = capabilities['exceptions']
997
+ reason = capabilities['reason']
998
+
999
+ if capabilities['cpu_only']:
1000
+ marks.append(pytest.mark.skip_xp_backends(
1001
+ cpu_only=True, exceptions=exceptions, reason=reason))
1002
+ if capabilities['np_only']:
1003
+ marks.append(pytest.mark.skip_xp_backends(
1004
+ np_only=True, exceptions=exceptions, reason=reason))
1005
+
1006
+ for mod_name, reason in capabilities['skip_backends']:
1007
+ marks.append(pytest.mark.skip_xp_backends(mod_name, reason=reason))
1008
+ for mod_name, reason in capabilities['xfail_backends']:
1009
+ marks.append(pytest.mark.xfail_xp_backends(mod_name, reason=reason))
1010
+
1011
+ lazy_kwargs = {k: capabilities[k]
1012
+ for k in ('allow_dask_compute', 'jax_jit')}
1013
+ lazy_xp_function(func, **lazy_kwargs)
1014
+
1015
+ return marks
1016
+
1017
+
1018
+ # Is it OK to have a dictionary that is mutated (once upon import) in many places?
1019
+ xp_capabilities_table = {} # type: ignore[var-annotated]
1020
+
1021
+
1022
+ def xp_device_type(a: Array) -> Literal["cpu", "cuda", None]:
1023
+ if is_numpy_array(a):
1024
+ return "cpu"
1025
+ if is_cupy_array(a):
1026
+ return "cuda"
1027
+ if is_torch_array(a):
1028
+ # TODO this can return other backends e.g. tpu but they're unsupported in scipy
1029
+ return a.device.type
1030
+ if is_jax_array(a):
1031
+ # TODO this can return other backends e.g. tpu but they're unsupported in scipy
1032
+ return "cuda" if (p := a.device.platform) == "gpu" else p
1033
+ if is_dask_array(a):
1034
+ return xp_device_type(a._meta)
1035
+ # array-api-strict is a stand-in for unknown libraries; don't special-case it
1036
+ return None
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_array_api_compat_vendor.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT RENAME THIS FILE
2
+ # This is a hook for array_api_extra/src/array_api_extra/_lib/_compat.py
3
+ # to override functions of array_api_compat.
4
+
5
+ from .array_api_compat import * # noqa: F403
6
+ from ._array_api_override import array_namespace as scipy_array_namespace
7
+
8
+ # overrides array_api_compat.array_namespace inside array-api-extra
9
+ array_namespace = scipy_array_namespace # type: ignore[assignment]
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_array_api_docs_tables.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generate flat tables showing Array API capabilities for use in docs.
2
+
3
+ These tables are intended for presenting Array API capabilities across
4
+ a wide number of functions at once. Rows correspond to functions and
5
+ columns correspond to library/device/option combinations.
6
+ """
7
+
8
+ from collections import defaultdict
9
+ from enum import auto, Enum
10
+ from importlib import import_module
11
+ from types import ModuleType
12
+
13
+ from scipy._lib._array_api import xp_capabilities_table
14
+ from scipy._lib._array_api import _make_sphinx_capabilities
15
+
16
+ # For undocumented aliases of public functions which are kept around for
17
+ # backwards compatibility reasons. These should be excluded from the
18
+ # tables since they would be redundant. There are also no docs pages to
19
+ # link entries to.
20
+ ALIASES = {
21
+ "scipy.linalg": {
22
+ # Alias of scipy.linalg.solve_continuous_lyapunov
23
+ "solve_lyapunov",
24
+ },
25
+ "scipy.ndimage": {
26
+ # Alias of scipy.ndimage.sum_labels
27
+ "sum",
28
+ },
29
+ "scipy.special": {
30
+ # Alias of scipy.special.jv
31
+ "jn",
32
+ # Alias of scipy.special.roots_legendre
33
+ "p_roots",
34
+ # Alias of scipy.special.roots_chebyt
35
+ "t_roots",
36
+ # Alias of scipy.special.roots_chebyu
37
+ "u_roots",
38
+ # Alias of scipy.special.roots_chebyc
39
+ "c_roots",
40
+ # Alias of scipy.special.roots_chebys
41
+ "s_roots",
42
+ # Alias of scipy.special.roots_jacobi
43
+ "j_roots",
44
+ # Alias of scipy.special.roots_laguerre
45
+ "l_roots",
46
+ # Alias of scipy.special.roots_genlaguerre
47
+ "la_roots",
48
+ # Alias of scipy.special.roots_hermite
49
+ "h_roots",
50
+ # Alias of scipy.special.roots_hermitenorm
51
+ "he_roots",
52
+ # Alias of scipy.special.roots_gegenbauer
53
+ "cg_roots",
54
+ # Alias of scipy.special.roots_sh_legendre
55
+ "ps_roots",
56
+ # Alias of scipy.special.roots_sh_chebyt
57
+ "ts_roots",
58
+ # Alias of scipy.special.roots_chebyu
59
+ "us_roots",
60
+ # Alias of scipy.special.roots_sh_jacobi
61
+ "js_roots",
62
+ }
63
+ }
64
+
65
+ # Shortened names for use in table.
66
+ BACKEND_NAMES_MAP = {
67
+ "jax.numpy": "jax",
68
+ "dask.array": "dask",
69
+ }
70
+
71
+
72
+ class BackendSupportStatus(Enum):
73
+ YES = auto()
74
+ NO = auto()
75
+ OUT_OF_SCOPE = auto()
76
+ UNKNOWN = auto()
77
+
78
+
79
+ def _process_capabilities_table_entry(entry: dict | None) -> dict[str, dict[str, bool]]:
80
+ """Returns dict showing alternative backend support in easy to consume form.
81
+
82
+ Parameters
83
+ ----------
84
+ entry : Optional[dict]
85
+ A dict with the structure of the values of the dict
86
+ scipy._lib._array_api.xp_capabilities_table. If None, it is
87
+ assumped that no alternative backends are supported.
88
+ Default: None.
89
+
90
+ Returns
91
+ -------
92
+ dict[str, dict[str, bool]]
93
+ The output dict currently has keys "cpu", "gpu", "jit" and "lazy".
94
+ The value associated to each key is itself a dict. The keys of
95
+ the inner dicts correspond to backends, with bool values stating
96
+ whether or not the backend is supported with a given device or
97
+ mode. Inapplicable backends do not appear in the inner dicts
98
+ (e.g. since cupy is gpu-only, it does not appear in the inner
99
+ dict keyed on "cpu"). Only alternative backends to NumPy are
100
+ included since NumPY support should be guaranteed.
101
+
102
+ """
103
+ # This is a template for the output format. If more backends and
104
+ # backend options are added, it will need to be updated manually.
105
+ # Entries start as boolean, but upon returning, will take values
106
+ # from the BackendSupportStatus Enum.
107
+ output = {
108
+ "cpu": {"torch": False, "jax": False, "dask": False},
109
+ "gpu": {"cupy": False, "torch": False, "jax": False},
110
+ "jit": {"jax": False},
111
+ "lazy": {"dask": False},
112
+ }
113
+ S = BackendSupportStatus
114
+ if entry is None:
115
+ # If there is no entry, assume no alternative backends are supported.
116
+ # If the list of supported backends will grows, this hard-coded dict
117
+ # will need to be updated.
118
+ return {
119
+ outer_key: {inner_key: S.UNKNOWN for inner_key in outer_value}
120
+ for outer_key, outer_value in output.items()
121
+ }
122
+
123
+ if entry["out_of_scope"]:
124
+ # None is used to signify out-of-scope functions.
125
+ return {
126
+ outer_key: {inner_key: S.OUT_OF_SCOPE for inner_key in outer_value}
127
+ for outer_key, outer_value in output.items()
128
+ }
129
+
130
+ # For now, use _make_sphinx_capabilities because that's where
131
+ # the relevant logic for determining what is and isn't
132
+ # supported based on xp_capabilities_table entries lives.
133
+ # Perhaps this logic should be decoupled from sphinx.
134
+ for backend, capabilities in _make_sphinx_capabilities(**entry).items():
135
+ if backend in {"array_api_strict", "numpy"}:
136
+ continue
137
+ backend = BACKEND_NAMES_MAP.get(backend, backend)
138
+ cpu, gpu = capabilities.cpu, capabilities.gpu
139
+ if cpu is not None:
140
+ if backend not in output["cpu"]:
141
+ raise ValueError(
142
+ "Input capabilities table entry contains unhandled"
143
+ f" backend {backend} on cpu."
144
+ )
145
+ output["cpu"][backend] = cpu
146
+ if gpu is not None:
147
+ if backend not in output["gpu"]:
148
+ raise ValueError(
149
+ "Input capabilities table entry contains unhandled"
150
+ f" backend {backend} on gpu."
151
+ )
152
+ output["gpu"][backend] = gpu
153
+ if backend == "jax":
154
+ output["jit"]["jax"] = entry["jax_jit"] and output["cpu"]["jax"]
155
+ if backend == "dask.array":
156
+ support_lazy = not entry["allow_dask_compute"] and output["dask"]
157
+ output["lazy"]["dask"] = support_lazy
158
+ return {
159
+ outer_key: {
160
+ inner_key: S.YES if inner_value else S.NO
161
+ for inner_key, inner_value in outer_value.items()
162
+ }
163
+ for outer_key, outer_value in output.items()
164
+ }
165
+
166
+
167
+ def is_named_function_like_object(obj):
168
+ return (
169
+ not isinstance(obj, ModuleType | type)
170
+ and callable(obj) and hasattr(obj, "__name__")
171
+ )
172
+
173
+
174
+ def make_flat_capabilities_table(
175
+ modules: str | list[str],
176
+ backend_type: str,
177
+ /,
178
+ *,
179
+ capabilities_table: list[str] | None = None,
180
+ ) -> list[dict[str, str]]:
181
+ """Generate full table of array api capabilities across public functions.
182
+
183
+ Parameters
184
+ ----------
185
+ modules : str | list[str]
186
+ A string containing single SciPy module, (e.g `scipy.stats`, `scipy.fft`)
187
+ or a list of such strings.
188
+
189
+ backend_type : {'cpu', 'gpu', 'jit', 'lazy'}
190
+
191
+ capabilities_table : Optional[list[str]]
192
+ Table in the form of `scipy._lib._array_api.xp_capabilities_table`.
193
+ If None, uses `scipy._lib._array_api.xp_capabilities_table`.
194
+ Default: None.
195
+
196
+ Returns
197
+ -------
198
+ output : list[dict[str, str]]
199
+ `output` is a table in dict format
200
+ (keys corresponding to column names). The first column is "module".
201
+ The other columns correspond to supported backends for the given
202
+ `backend_type`, e.g. jax.numpy, torch, and dask on cpu.
203
+ numpy is excluded because it should always be supported.
204
+ See the helper function
205
+ `_process_capabilities_table_entry` above).
206
+
207
+ """
208
+ if backend_type not in {"cpu", "gpu", "jit", "lazy"}:
209
+ raise ValueError(f"Received unhandled backend type {backend_type}")
210
+
211
+ if isinstance(modules, str):
212
+ modules = [modules]
213
+
214
+ if capabilities_table is None:
215
+ capabilities_table = xp_capabilities_table
216
+
217
+ output = []
218
+
219
+ for module_name in modules:
220
+ module = import_module(module_name)
221
+ public_things = module.__all__
222
+ for name in public_things:
223
+ if name in ALIASES.get(module_name, {}):
224
+ # Skip undocumented aliases that are kept
225
+ # for backwards compatibility reasons.
226
+ continue
227
+ thing = getattr(module, name)
228
+ if not is_named_function_like_object(thing):
229
+ continue
230
+ entry = xp_capabilities_table.get(thing, None)
231
+ capabilities = _process_capabilities_table_entry(entry)[backend_type]
232
+ row = {"module": module_name}
233
+ row.update({"function": name})
234
+ row.update(capabilities)
235
+ output.append(row)
236
+ return output
237
+
238
+
239
+ def calculate_table_statistics(
240
+ flat_table: list[dict[str, str]]
241
+ ) -> dict[str, tuple[dict[str, str], bool]]:
242
+ """Get counts of what is supported per module.
243
+
244
+ Parameters
245
+ ----------
246
+ flat_table : list[dict[str, str]]
247
+ A table as returned by `make_flat_capabilities_table`
248
+
249
+ Returns
250
+ -------
251
+ dict[str, tuple[dict[str, str], bool]]
252
+ dict mapping module names to 2-tuples containing an inner dict and a
253
+ bool. The inner dicts have a key "total" along with keys for each
254
+ backend column of the supplied flat capabilities table. The value
255
+ corresponding to total is the total count of functions in the given
256
+ module, and the value associated to the other keys is the count of
257
+ functions that support that particular backend. The bool is False if
258
+ the calculation may be innacurate due to missing xp_capabilities
259
+ decorators, and True if all functions for that particular module have
260
+ been decorated with xp_capabilities.
261
+ """
262
+ if not flat_table:
263
+ return []
264
+
265
+ counter = defaultdict(lambda: defaultdict(int))
266
+
267
+ S = BackendSupportStatus
268
+ # Keep track of which modules have functions with missing xp_capabilities
269
+ # decorators so this information can be passed back to the caller.
270
+ missing_xp_capabilities = set()
271
+ for entry in flat_table:
272
+ entry = entry.copy()
273
+ entry.pop("function")
274
+ module = entry.pop("module")
275
+ current_counter = counter[module]
276
+
277
+ # By design, all backends and options must be considered out-of-scope
278
+ # if one is, so just pick an arbitrary entry here to test if function is
279
+ # in-scope.
280
+ if next(iter(entry.values())) != S.OUT_OF_SCOPE:
281
+ current_counter["total"] += 1
282
+ for key, value in entry.items():
283
+ # Functions missing xp_capabilities will be tabulated as
284
+ # unsupported, but may actually be supported. There is a
285
+ # note about this in the documentation and this function is
286
+ # set up to return information needed to put asterisks next
287
+ # to percentages impacted by missing xp_capabilities decorators.
288
+ current_counter[key] += 1 if value == S.YES else 0
289
+ if value == S.UNKNOWN:
290
+ missing_xp_capabilities.add(module)
291
+ return {
292
+ key: (dict(value), key not in missing_xp_capabilities)
293
+ for key, value in counter.items()
294
+ }
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_array_api_no_0d.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Extra testing functions that forbid 0d-input, see #21044
3
+
4
+ While the xp_assert_* functions generally aim to follow the conventions of the
5
+ underlying `xp` library, NumPy in particular is inconsistent in its handling
6
+ of scalars vs. 0d-arrays, see https://github.com/numpy/numpy/issues/24897.
7
+
8
+ For example, this means that the following operations (as of v2.0.1) currently
9
+ return scalars, even though a 0d-array would often be more appropriate:
10
+
11
+ import numpy as np
12
+ np.array(0) * 2 # scalar, not 0d array
13
+ - np.array(0) # scalar, not 0d-array
14
+ np.sin(np.array(0)) # scalar, not 0d array
15
+ np.mean([1, 2, 3]) # scalar, not 0d array
16
+
17
+ Libraries like CuPy tend to return a 0d-array in scenarios like those above,
18
+ and even `xp.asarray(0)[()]` remains a 0d-array there. To deal with the reality
19
+ of the inconsistencies present in NumPy, as well as 20+ years of code on top,
20
+ the `xp_assert_*` functions here enforce consistency in the only way that
21
+ doesn't go against the tide, i.e. by forbidding 0d-arrays as the return type.
22
+
23
+ However, when scalars are not generally the expected NumPy return type,
24
+ it remains preferable to use the assert functions from
25
+ the `scipy._lib._array_api` module, which have less surprising behaviour.
26
+ """
27
+ from scipy._lib._array_api import array_namespace, is_numpy
28
+ from scipy._lib._array_api import (xp_assert_close as xp_assert_close_base,
29
+ xp_assert_equal as xp_assert_equal_base,
30
+ xp_assert_less as xp_assert_less_base)
31
+
32
+ __all__: list[str] = []
33
+
34
+
35
+ def _check_scalar(actual, desired, *, xp=None, **kwargs):
36
+ __tracebackhide__ = True # Hide traceback for py.test
37
+
38
+ if xp is None:
39
+ xp = array_namespace(actual)
40
+
41
+ # necessary to handle non-numpy scalars, e.g. bare `0.0` has no shape
42
+ desired = xp.asarray(desired)
43
+
44
+ # Only NumPy distinguishes between scalars and arrays;
45
+ # shape check in xp_assert_* is sufficient except for shape == ()
46
+ if not (is_numpy(xp) and desired.shape == ()):
47
+ return
48
+
49
+ _msg = ("Result is a NumPy 0d-array. Many SciPy functions intend to follow "
50
+ "the convention of many NumPy functions, returning a scalar when a "
51
+ "0d-array would be correct. The specialized `xp_assert_*` functions "
52
+ "in the `scipy._lib._array_api_no_0d` module err on the side of "
53
+ "caution and do not accept 0d-arrays by default. If the correct "
54
+ "result may legitimately be a 0d-array, pass `check_0d=True`, "
55
+ "or use the `xp_assert_*` functions from `scipy._lib._array_api`.")
56
+ assert xp.isscalar(actual), _msg
57
+
58
+
59
+ def xp_assert_equal(actual, desired, *, check_0d=False, **kwargs):
60
+ # in contrast to xp_assert_equal_base, this defaults to check_0d=False,
61
+ # but will do an extra check in that case, which forbids 0d-arrays for `actual`
62
+ __tracebackhide__ = True # Hide traceback for py.test
63
+
64
+ # array-ness (check_0d == True) is taken care of by the *_base functions
65
+ if not check_0d:
66
+ _check_scalar(actual, desired, **kwargs)
67
+ return xp_assert_equal_base(actual, desired, check_0d=check_0d, **kwargs)
68
+
69
+
70
+ def xp_assert_close(actual, desired, *, check_0d=False, **kwargs):
71
+ # as for xp_assert_equal
72
+ __tracebackhide__ = True
73
+
74
+ if not check_0d:
75
+ _check_scalar(actual, desired, **kwargs)
76
+ return xp_assert_close_base(actual, desired, check_0d=check_0d, **kwargs)
77
+
78
+
79
+ def xp_assert_less(actual, desired, *, check_0d=False, **kwargs):
80
+ # as for xp_assert_equal
81
+ __tracebackhide__ = True
82
+
83
+ if not check_0d:
84
+ _check_scalar(actual, desired, **kwargs)
85
+ return xp_assert_less_base(actual, desired, check_0d=check_0d, **kwargs)
86
+
87
+
88
+ def assert_array_almost_equal(actual, desired, decimal=6, *args, **kwds):
89
+ """Backwards compatible replacement. In new code, use xp_assert_close instead.
90
+ """
91
+ rtol, atol = 0, 1.5*10**(-decimal)
92
+ return xp_assert_close(actual, desired,
93
+ atol=atol, rtol=rtol, check_dtype=False, check_shape=False,
94
+ *args, **kwds)
95
+
96
+
97
+ def assert_almost_equal(actual, desired, decimal=7, *args, **kwds):
98
+ """Backwards compatible replacement. In new code, use xp_assert_close instead.
99
+ """
100
+ rtol, atol = 0, 1.5*10**(-decimal)
101
+ return xp_assert_close(actual, desired,
102
+ atol=atol, rtol=rtol, check_dtype=False, check_shape=False,
103
+ *args, **kwds)
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_array_api_override.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Override functions from array_api_compat, for use by array-api-extra
3
+ and internally.
4
+
5
+ See also _array_api_compat_vendor.py
6
+ """
7
+ import enum
8
+ import os
9
+
10
+ from functools import lru_cache
11
+ from types import ModuleType
12
+ from typing import Any, TypeAlias
13
+
14
+ import numpy as np
15
+ import numpy.typing as npt
16
+
17
+ from scipy._lib import array_api_compat
18
+ import scipy._lib.array_api_compat.numpy as np_compat
19
+ from scipy._lib.array_api_compat import is_array_api_obj, is_jax_array
20
+ from scipy._lib._sparse import SparseABC
21
+
22
+
23
+ Array: TypeAlias = Any # To be changed to a Protocol later (see array-api#589)
24
+ ArrayLike: TypeAlias = Array | npt.ArrayLike
25
+
26
+ # To enable array API and strict array-like input validation
27
+ SCIPY_ARRAY_API: str | bool = os.environ.get("SCIPY_ARRAY_API", False)
28
+ # To control the default device - for use in the test suite only
29
+ SCIPY_DEVICE = os.environ.get("SCIPY_DEVICE", "cpu")
30
+
31
+
32
+ class _ArrayClsInfo(enum.Enum):
33
+ skip = 0
34
+ numpy = 1
35
+ array_like = 2
36
+ unknown = 3
37
+
38
+
39
+ @lru_cache(100)
40
+ def _validate_array_cls(cls: type) -> _ArrayClsInfo:
41
+ if issubclass(cls, (list, tuple)):
42
+ return _ArrayClsInfo.array_like
43
+
44
+ # this comes from `_util._asarray_validated`
45
+ if issubclass(cls, SparseABC):
46
+ msg = ('Sparse arrays/matrices are not supported by this function. '
47
+ 'Perhaps one of the `scipy.sparse.linalg` functions '
48
+ 'would work instead.')
49
+ raise ValueError(msg)
50
+
51
+ if issubclass(cls, np.ma.MaskedArray):
52
+ raise TypeError("Inputs of type `numpy.ma.MaskedArray` are not supported.")
53
+
54
+ if issubclass(cls, np.matrix):
55
+ raise TypeError("Inputs of type `numpy.matrix` are not supported.")
56
+
57
+ if issubclass(cls, (np.ndarray, np.generic)):
58
+ return _ArrayClsInfo.numpy
59
+
60
+ # Note: this must happen after the test for np.generic, because
61
+ # np.float64 and np.complex128 are subclasses of float and complex respectively.
62
+ # This matches the behavior of array_api_compat.
63
+ if issubclass(cls, (int, float, complex, bool, type(None))):
64
+ return _ArrayClsInfo.skip
65
+
66
+ return _ArrayClsInfo.unknown
67
+
68
+
69
+ def array_namespace(*arrays: Array) -> ModuleType:
70
+ """Get the array API compatible namespace for the arrays xs.
71
+
72
+ Parameters
73
+ ----------
74
+ *arrays : sequence of array_like
75
+ Arrays used to infer the common namespace.
76
+
77
+ Returns
78
+ -------
79
+ namespace : module
80
+ Common namespace.
81
+
82
+ Notes
83
+ -----
84
+ Wrapper around `array_api_compat.array_namespace`.
85
+
86
+ 1. Check for the global switch `SCIPY_ARRAY_API`. If disabled, just
87
+ return array_api_compat.numpy namespace and skip all compliance checks.
88
+
89
+ 2. Check for known-bad array classes.
90
+ The following subclasses are not supported and raise and error:
91
+
92
+ - `numpy.ma.MaskedArray`
93
+ - `numpy.matrix`
94
+ - NumPy arrays which do not have a boolean or numerical dtype
95
+ - `scipy.sparse` arrays
96
+
97
+ 3. Coerce array-likes to NumPy arrays and check their dtype.
98
+ Note that non-scalar array-likes can't be mixed with non-NumPy Array
99
+ API objects; e.g.
100
+
101
+ - `array_namespace([1, 2])` returns NumPy namespace;
102
+ - `array_namespace(np.asarray([1, 2], [3, 4])` returns NumPy namespace;
103
+ - `array_namespace(cp.asarray([1, 2], [3, 4])` raises an error.
104
+ """
105
+ if not SCIPY_ARRAY_API:
106
+ # here we could wrap the namespace if needed
107
+ return np_compat
108
+
109
+ numpy_arrays = []
110
+ api_arrays = []
111
+
112
+ for array in arrays:
113
+ arr_info = _validate_array_cls(type(array))
114
+ if arr_info is _ArrayClsInfo.skip:
115
+ pass
116
+
117
+ elif arr_info is _ArrayClsInfo.numpy:
118
+ if array.dtype.kind in 'iufcb': # Numeric or bool
119
+ numpy_arrays.append(array)
120
+ elif array.dtype.kind == 'V' and is_jax_array(array):
121
+ # Special case for JAX zero gradient arrays;
122
+ # see array_api_compat._common._helpers._is_jax_zero_gradient_array
123
+ api_arrays.append(array) # JAX zero gradient array
124
+ else:
125
+ raise TypeError(f"An argument has dtype `{array.dtype!r}`; "
126
+ "only boolean and numerical dtypes are supported.")
127
+
128
+ elif arr_info is _ArrayClsInfo.unknown and is_array_api_obj(array):
129
+ api_arrays.append(array)
130
+
131
+ else:
132
+ # list, tuple, or arbitrary object
133
+ try:
134
+ array = np.asanyarray(array)
135
+ except TypeError:
136
+ raise TypeError("An argument is neither array API compatible nor "
137
+ "coercible by NumPy.")
138
+ if array.dtype.kind not in 'iufcb': # Numeric or bool
139
+ raise TypeError(f"An argument has dtype `{array.dtype!r}`; "
140
+ "only boolean and numerical dtypes are supported.")
141
+ numpy_arrays.append(array)
142
+
143
+ # When there are exclusively NumPy and ArrayLikes, skip calling
144
+ # array_api_compat.array_namespace for performance.
145
+ if not api_arrays:
146
+ return np_compat
147
+
148
+ # In case of mix of NumPy/ArrayLike and non-NumPy Array API arrays,
149
+ # let array_api_compat.array_namespace raise an error.
150
+ return array_api_compat.array_namespace(*numpy_arrays, *api_arrays)
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_bunch.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys as _sys
2
+ from keyword import iskeyword as _iskeyword
3
+
4
+
5
+ def _validate_names(typename, field_names, extra_field_names):
6
+ """
7
+ Ensure that all the given names are valid Python identifiers that
8
+ do not start with '_'. Also check that there are no duplicates
9
+ among field_names + extra_field_names.
10
+ """
11
+ for name in [typename] + field_names + extra_field_names:
12
+ if not isinstance(name, str):
13
+ raise TypeError('typename and all field names must be strings')
14
+ if not name.isidentifier():
15
+ raise ValueError('typename and all field names must be valid '
16
+ f'identifiers: {name!r}')
17
+ if _iskeyword(name):
18
+ raise ValueError('typename and all field names cannot be a '
19
+ f'keyword: {name!r}')
20
+
21
+ seen = set()
22
+ for name in field_names + extra_field_names:
23
+ if name.startswith('_'):
24
+ raise ValueError('Field names cannot start with an underscore: '
25
+ f'{name!r}')
26
+ if name in seen:
27
+ raise ValueError(f'Duplicate field name: {name!r}')
28
+ seen.add(name)
29
+
30
+
31
+ # Note: This code is adapted from CPython:Lib/collections/__init__.py
32
+ def _make_tuple_bunch(typename, field_names, extra_field_names=None,
33
+ module=None):
34
+ """
35
+ Create a namedtuple-like class with additional attributes.
36
+
37
+ This function creates a subclass of tuple that acts like a namedtuple
38
+ and that has additional attributes.
39
+
40
+ The additional attributes are listed in `extra_field_names`. The
41
+ values assigned to these attributes are not part of the tuple.
42
+
43
+ The reason this function exists is to allow functions in SciPy
44
+ that currently return a tuple or a namedtuple to returned objects
45
+ that have additional attributes, while maintaining backwards
46
+ compatibility.
47
+
48
+ This should only be used to enhance *existing* functions in SciPy.
49
+ New functions are free to create objects as return values without
50
+ having to maintain backwards compatibility with an old tuple or
51
+ namedtuple return value.
52
+
53
+ Parameters
54
+ ----------
55
+ typename : str
56
+ The name of the type.
57
+ field_names : list of str
58
+ List of names of the values to be stored in the tuple. These names
59
+ will also be attributes of instances, so the values in the tuple
60
+ can be accessed by indexing or as attributes. At least one name
61
+ is required. See the Notes for additional restrictions.
62
+ extra_field_names : list of str, optional
63
+ List of names of values that will be stored as attributes of the
64
+ object. See the notes for additional restrictions.
65
+
66
+ Returns
67
+ -------
68
+ cls : type
69
+ The new class.
70
+
71
+ Notes
72
+ -----
73
+ There are restrictions on the names that may be used in `field_names`
74
+ and `extra_field_names`:
75
+
76
+ * The names must be unique--no duplicates allowed.
77
+ * The names must be valid Python identifiers, and must not begin with
78
+ an underscore.
79
+ * The names must not be Python keywords (e.g. 'def', 'and', etc., are
80
+ not allowed).
81
+
82
+ Examples
83
+ --------
84
+ >>> from scipy._lib._bunch import _make_tuple_bunch
85
+
86
+ Create a class that acts like a namedtuple with length 2 (with field
87
+ names `x` and `y`) that will also have the attributes `w` and `beta`:
88
+
89
+ >>> Result = _make_tuple_bunch('Result', ['x', 'y'], ['w', 'beta'])
90
+
91
+ `Result` is the new class. We call it with keyword arguments to create
92
+ a new instance with given values.
93
+
94
+ >>> result1 = Result(x=1, y=2, w=99, beta=0.5)
95
+ >>> result1
96
+ Result(x=1, y=2, w=99, beta=0.5)
97
+
98
+ `result1` acts like a tuple of length 2:
99
+
100
+ >>> len(result1)
101
+ 2
102
+ >>> result1[:]
103
+ (1, 2)
104
+
105
+ The values assigned when the instance was created are available as
106
+ attributes:
107
+
108
+ >>> result1.y
109
+ 2
110
+ >>> result1.beta
111
+ 0.5
112
+ """
113
+ if len(field_names) == 0:
114
+ raise ValueError('field_names must contain at least one name')
115
+
116
+ if extra_field_names is None:
117
+ extra_field_names = []
118
+ _validate_names(typename, field_names, extra_field_names)
119
+
120
+ typename = _sys.intern(str(typename))
121
+ field_names = tuple(map(_sys.intern, field_names))
122
+ extra_field_names = tuple(map(_sys.intern, extra_field_names))
123
+
124
+ all_names = field_names + extra_field_names
125
+ arg_list = ', '.join(field_names)
126
+ full_list = ', '.join(all_names)
127
+ repr_fmt = ''.join(('(',
128
+ ', '.join(f'{name}=%({name})r' for name in all_names),
129
+ ')'))
130
+ tuple_new = tuple.__new__
131
+ _dict, _tuple, _zip = dict, tuple, zip
132
+
133
+ # Create all the named tuple methods to be added to the class namespace
134
+
135
+ s = f"""\
136
+ def __new__(_cls, {arg_list}, **extra_fields):
137
+ return _tuple_new(_cls, ({arg_list},))
138
+
139
+ def __init__(self, {arg_list}, **extra_fields):
140
+ for key in self._extra_fields:
141
+ if key not in extra_fields:
142
+ raise TypeError("missing keyword argument '%s'" % (key,))
143
+ for key, val in extra_fields.items():
144
+ if key not in self._extra_fields:
145
+ raise TypeError("unexpected keyword argument '%s'" % (key,))
146
+ self.__dict__[key] = val
147
+
148
+ def __setattr__(self, key, val):
149
+ if key in {repr(field_names)}:
150
+ raise AttributeError("can't set attribute %r of class %r"
151
+ % (key, self.__class__.__name__))
152
+ else:
153
+ self.__dict__[key] = val
154
+ """
155
+ del arg_list
156
+ namespace = {'_tuple_new': tuple_new,
157
+ '__builtins__': dict(TypeError=TypeError,
158
+ AttributeError=AttributeError),
159
+ '__name__': f'namedtuple_{typename}'}
160
+ exec(s, namespace)
161
+ __new__ = namespace['__new__']
162
+ __new__.__doc__ = f'Create new instance of {typename}({full_list})'
163
+ __init__ = namespace['__init__']
164
+ __init__.__doc__ = f'Instantiate instance of {typename}({full_list})'
165
+ __setattr__ = namespace['__setattr__']
166
+
167
+ def __repr__(self):
168
+ 'Return a nicely formatted representation string'
169
+ return self.__class__.__name__ + repr_fmt % self._asdict()
170
+
171
+ def _asdict(self):
172
+ 'Return a new dict which maps field names to their values.'
173
+ out = _dict(_zip(self._fields, self))
174
+ out.update(self.__dict__)
175
+ return out
176
+
177
+ def __getnewargs_ex__(self):
178
+ 'Return self as a plain tuple. Used by copy and pickle.'
179
+ return _tuple(self), self.__dict__
180
+
181
+ # Modify function metadata to help with introspection and debugging
182
+ for method in (__new__, __repr__, _asdict, __getnewargs_ex__):
183
+ method.__qualname__ = f'{typename}.{method.__name__}'
184
+
185
+ # Build-up the class namespace dictionary
186
+ # and use type() to build the result class
187
+ class_namespace = {
188
+ '__doc__': f'{typename}({full_list})',
189
+ '_fields': field_names,
190
+ '__new__': __new__,
191
+ '__init__': __init__,
192
+ '__repr__': __repr__,
193
+ '__setattr__': __setattr__,
194
+ '_asdict': _asdict,
195
+ '_extra_fields': extra_field_names,
196
+ '__getnewargs_ex__': __getnewargs_ex__,
197
+ # _field_defaults and _replace are added to get Polars to detect
198
+ # a bunch object as a namedtuple. See gh-22450
199
+ '_field_defaults': {},
200
+ '_replace': None,
201
+ }
202
+ for index, name in enumerate(field_names):
203
+
204
+ def _get(self, index=index):
205
+ return self[index]
206
+ class_namespace[name] = property(_get)
207
+ for name in extra_field_names:
208
+
209
+ def _get(self, name=name):
210
+ return self.__dict__[name]
211
+ class_namespace[name] = property(_get)
212
+
213
+ result = type(typename, (tuple,), class_namespace)
214
+
215
+ # For pickling to work, the __module__ variable needs to be set to the
216
+ # frame where the named tuple is created. Bypass this step in environments
217
+ # where sys._getframe is not defined (Jython for example) or sys._getframe
218
+ # is not defined for arguments greater than 0 (IronPython), or where the
219
+ # user has specified a particular module.
220
+ if module is None:
221
+ try:
222
+ module = _sys._getframe(1).f_globals.get('__name__', '__main__')
223
+ except (AttributeError, ValueError):
224
+ pass
225
+ if module is not None:
226
+ result.__module__ = module
227
+ __new__.__module__ = module
228
+
229
+ return result
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_ccallback.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import _ccallback_c
2
+
3
+ import ctypes
4
+
5
+ PyCFuncPtr = ctypes.CFUNCTYPE(ctypes.c_void_p).__bases__[0]
6
+
7
+ ffi = None
8
+
9
+ class CData:
10
+ pass
11
+
12
+ def _import_cffi():
13
+ global ffi, CData
14
+
15
+ if ffi is not None:
16
+ return
17
+
18
+ try:
19
+ import cffi
20
+ ffi = cffi.FFI()
21
+ CData = ffi.CData
22
+ except ImportError:
23
+ ffi = False
24
+
25
+
26
+ class LowLevelCallable(tuple):
27
+ """
28
+ Low-level callback function.
29
+
30
+ Some functions in SciPy take as arguments callback functions, which
31
+ can either be python callables or low-level compiled functions. Using
32
+ compiled callback functions can improve performance somewhat by
33
+ avoiding wrapping data in Python objects.
34
+
35
+ Such low-level functions in SciPy are wrapped in `LowLevelCallable`
36
+ objects, which can be constructed from function pointers obtained from
37
+ ctypes, cffi, Cython, or contained in Python `PyCapsule` objects.
38
+
39
+ .. seealso::
40
+
41
+ Functions accepting low-level callables:
42
+
43
+ `scipy.integrate.quad`, `scipy.ndimage.generic_filter`,
44
+ `scipy.ndimage.generic_filter1d`, `scipy.ndimage.geometric_transform`
45
+
46
+ Usage examples:
47
+
48
+ :ref:`ndimage-ccallbacks`, :ref:`quad-callbacks`
49
+
50
+ Parameters
51
+ ----------
52
+ function : {PyCapsule, ctypes function pointer, cffi function pointer}
53
+ Low-level callback function.
54
+ user_data : {PyCapsule, ctypes void pointer, cffi void pointer}
55
+ User data to pass on to the callback function.
56
+ signature : str, optional
57
+ Signature of the function. If omitted, determined from *function*,
58
+ if possible.
59
+
60
+ Attributes
61
+ ----------
62
+ function
63
+ Callback function given.
64
+ user_data
65
+ User data given.
66
+ signature
67
+ Signature of the function.
68
+
69
+ Methods
70
+ -------
71
+ from_cython
72
+ Class method for constructing callables from Cython C-exported
73
+ functions.
74
+
75
+ Notes
76
+ -----
77
+ The argument ``function`` can be one of:
78
+
79
+ - PyCapsule, whose name contains the C function signature
80
+ - ctypes function pointer
81
+ - cffi function pointer
82
+
83
+ The signature of the low-level callback must match one of those expected
84
+ by the routine it is passed to.
85
+
86
+ If constructing low-level functions from a PyCapsule, the name of the
87
+ capsule must be the corresponding signature, in the format::
88
+
89
+ return_type (arg1_type, arg2_type, ...)
90
+
91
+ For example::
92
+
93
+ "void (double)"
94
+ "double (double, int *, void *)"
95
+
96
+ The context of a PyCapsule passed in as ``function`` is used as ``user_data``,
97
+ if an explicit value for ``user_data`` was not given.
98
+
99
+ """
100
+
101
+ # Make the class immutable
102
+ __slots__ = ()
103
+
104
+ def __new__(cls, function, user_data=None, signature=None):
105
+ # We need to hold a reference to the function & user data,
106
+ # to prevent them going out of scope
107
+ item = cls._parse_callback(function, user_data, signature)
108
+ return tuple.__new__(cls, (item, function, user_data))
109
+
110
+ def __repr__(self):
111
+ return f"LowLevelCallable({self.function!r}, {self.user_data!r})"
112
+
113
+ @property
114
+ def function(self):
115
+ return tuple.__getitem__(self, 1)
116
+
117
+ @property
118
+ def user_data(self):
119
+ return tuple.__getitem__(self, 2)
120
+
121
+ @property
122
+ def signature(self):
123
+ return _ccallback_c.get_capsule_signature(tuple.__getitem__(self, 0))
124
+
125
+ def __getitem__(self, idx):
126
+ raise ValueError()
127
+
128
+ @classmethod
129
+ def from_cython(cls, module, name, user_data=None, signature=None):
130
+ """
131
+ Create a low-level callback function from an exported Cython function.
132
+
133
+ Parameters
134
+ ----------
135
+ module : module
136
+ Cython module where the exported function resides
137
+ name : str
138
+ Name of the exported function
139
+ user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional
140
+ User data to pass on to the callback function.
141
+ signature : str, optional
142
+ Signature of the function. If omitted, determined from *function*.
143
+
144
+ """
145
+ try:
146
+ function = module.__pyx_capi__[name]
147
+ except AttributeError as e:
148
+ message = "Given module is not a Cython module with __pyx_capi__ attribute"
149
+ raise ValueError(message) from e
150
+ except KeyError as e:
151
+ message = f"No function {name!r} found in __pyx_capi__ of the module"
152
+ raise ValueError(message) from e
153
+ return cls(function, user_data, signature)
154
+
155
+ @classmethod
156
+ def _parse_callback(cls, obj, user_data=None, signature=None):
157
+ _import_cffi()
158
+
159
+ if isinstance(obj, LowLevelCallable):
160
+ func = tuple.__getitem__(obj, 0)
161
+ elif isinstance(obj, PyCFuncPtr):
162
+ func, signature = _get_ctypes_func(obj, signature)
163
+ elif isinstance(obj, CData):
164
+ func, signature = _get_cffi_func(obj, signature)
165
+ elif _ccallback_c.check_capsule(obj):
166
+ func = obj
167
+ else:
168
+ raise ValueError("Given input is not a callable or a "
169
+ "low-level callable (pycapsule/ctypes/cffi)")
170
+
171
+ if isinstance(user_data, ctypes.c_void_p):
172
+ context = _get_ctypes_data(user_data)
173
+ elif isinstance(user_data, CData):
174
+ context = _get_cffi_data(user_data)
175
+ elif user_data is None:
176
+ context = 0
177
+ elif _ccallback_c.check_capsule(user_data):
178
+ context = user_data
179
+ else:
180
+ raise ValueError("Given user data is not a valid "
181
+ "low-level void* pointer (pycapsule/ctypes/cffi)")
182
+
183
+ return _ccallback_c.get_raw_capsule(func, signature, context)
184
+
185
+
186
+ #
187
+ # ctypes helpers
188
+ #
189
+
190
+ def _get_ctypes_func(func, signature=None):
191
+ # Get function pointer
192
+ func_ptr = ctypes.cast(func, ctypes.c_void_p).value
193
+
194
+ # Construct function signature
195
+ if signature is None:
196
+ signature = _typename_from_ctypes(func.restype) + " ("
197
+ for j, arg in enumerate(func.argtypes):
198
+ if j == 0:
199
+ signature += _typename_from_ctypes(arg)
200
+ else:
201
+ signature += ", " + _typename_from_ctypes(arg)
202
+ signature += ")"
203
+
204
+ return func_ptr, signature
205
+
206
+
207
+ def _typename_from_ctypes(item):
208
+ if item is None:
209
+ return "void"
210
+ elif item is ctypes.c_void_p:
211
+ return "void *"
212
+
213
+ name = item.__name__
214
+
215
+ pointer_level = 0
216
+ while name.startswith("LP_"):
217
+ pointer_level += 1
218
+ name = name[3:]
219
+
220
+ if name.startswith('c_'):
221
+ name = name[2:]
222
+
223
+ if pointer_level > 0:
224
+ name += " " + "*"*pointer_level
225
+
226
+ return name
227
+
228
+
229
+ def _get_ctypes_data(data):
230
+ # Get voidp pointer
231
+ return ctypes.cast(data, ctypes.c_void_p).value
232
+
233
+
234
+ #
235
+ # CFFI helpers
236
+ #
237
+
238
+ def _get_cffi_func(func, signature=None):
239
+ # Get function pointer
240
+ func_ptr = ffi.cast('uintptr_t', func)
241
+
242
+ # Get signature
243
+ if signature is None:
244
+ signature = ffi.getctype(ffi.typeof(func)).replace('(*)', ' ')
245
+
246
+ return func_ptr, signature
247
+
248
+
249
+ def _get_cffi_data(data):
250
+ # Get pointer
251
+ return ffi.cast('uintptr_t', data)
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_ccallback_c.cpython-312-x86_64-linux-gnu.so ADDED
Binary file (95.5 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_disjoint_set.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Disjoint set data structure
3
+ """
4
+
5
+
6
+ class DisjointSet:
7
+ """ Disjoint set data structure for incremental connectivity queries.
8
+
9
+ .. versionadded:: 1.6.0
10
+
11
+ Attributes
12
+ ----------
13
+ n_subsets : int
14
+ The number of subsets.
15
+
16
+ Methods
17
+ -------
18
+ add
19
+ merge
20
+ connected
21
+ subset
22
+ subset_size
23
+ subsets
24
+ __getitem__
25
+
26
+ Notes
27
+ -----
28
+ This class implements the disjoint set [1]_, also known as the *union-find*
29
+ or *merge-find* data structure. The *find* operation (implemented in
30
+ `__getitem__`) implements the *path halving* variant. The *merge* method
31
+ implements the *merge by size* variant.
32
+
33
+ References
34
+ ----------
35
+ .. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure
36
+
37
+ Examples
38
+ --------
39
+ >>> from scipy.cluster.hierarchy import DisjointSet
40
+
41
+ Initialize a disjoint set:
42
+
43
+ >>> disjoint_set = DisjointSet([1, 2, 3, 'a', 'b'])
44
+
45
+ Merge some subsets:
46
+
47
+ >>> disjoint_set.merge(1, 2)
48
+ True
49
+ >>> disjoint_set.merge(3, 'a')
50
+ True
51
+ >>> disjoint_set.merge('a', 'b')
52
+ True
53
+ >>> disjoint_set.merge('b', 'b')
54
+ False
55
+
56
+ Find root elements:
57
+
58
+ >>> disjoint_set[2]
59
+ 1
60
+ >>> disjoint_set['b']
61
+ 3
62
+
63
+ Test connectivity:
64
+
65
+ >>> disjoint_set.connected(1, 2)
66
+ True
67
+ >>> disjoint_set.connected(1, 'b')
68
+ False
69
+
70
+ List elements in disjoint set:
71
+
72
+ >>> list(disjoint_set)
73
+ [1, 2, 3, 'a', 'b']
74
+
75
+ Get the subset containing 'a':
76
+
77
+ >>> disjoint_set.subset('a')
78
+ {'a', 3, 'b'}
79
+
80
+ Get the size of the subset containing 'a' (without actually instantiating
81
+ the subset):
82
+
83
+ >>> disjoint_set.subset_size('a')
84
+ 3
85
+
86
+ Get all subsets in the disjoint set:
87
+
88
+ >>> disjoint_set.subsets()
89
+ [{1, 2}, {'a', 3, 'b'}]
90
+ """
91
+ def __init__(self, elements=None):
92
+ self.n_subsets = 0
93
+ self._sizes = {}
94
+ self._parents = {}
95
+ # _nbrs is a circular linked list which links connected elements.
96
+ self._nbrs = {}
97
+ # _indices tracks the element insertion order in `__iter__`.
98
+ self._indices = {}
99
+ if elements is not None:
100
+ for x in elements:
101
+ self.add(x)
102
+
103
+ def __iter__(self):
104
+ """Returns an iterator of the elements in the disjoint set.
105
+
106
+ Elements are ordered by insertion order.
107
+ """
108
+ return iter(self._indices)
109
+
110
+ def __len__(self):
111
+ return len(self._indices)
112
+
113
+ def __contains__(self, x):
114
+ return x in self._indices
115
+
116
+ def __getitem__(self, x):
117
+ """Find the root element of `x`.
118
+
119
+ Parameters
120
+ ----------
121
+ x : hashable object
122
+ Input element.
123
+
124
+ Returns
125
+ -------
126
+ root : hashable object
127
+ Root element of `x`.
128
+ """
129
+ if x not in self._indices:
130
+ raise KeyError(x)
131
+
132
+ # find by "path halving"
133
+ parents = self._parents
134
+ while self._indices[x] != self._indices[parents[x]]:
135
+ parents[x] = parents[parents[x]]
136
+ x = parents[x]
137
+ return x
138
+
139
+ def add(self, x):
140
+ """Add element `x` to disjoint set
141
+ """
142
+ if x in self._indices:
143
+ return
144
+
145
+ self._sizes[x] = 1
146
+ self._parents[x] = x
147
+ self._nbrs[x] = x
148
+ self._indices[x] = len(self._indices)
149
+ self.n_subsets += 1
150
+
151
+ def merge(self, x, y):
152
+ """Merge the subsets of `x` and `y`.
153
+
154
+ The smaller subset (the child) is merged into the larger subset (the
155
+ parent). If the subsets are of equal size, the root element which was
156
+ first inserted into the disjoint set is selected as the parent.
157
+
158
+ Parameters
159
+ ----------
160
+ x, y : hashable object
161
+ Elements to merge.
162
+
163
+ Returns
164
+ -------
165
+ merged : bool
166
+ True if `x` and `y` were in disjoint sets, False otherwise.
167
+ """
168
+ xr = self[x]
169
+ yr = self[y]
170
+ if self._indices[xr] == self._indices[yr]:
171
+ return False
172
+
173
+ sizes = self._sizes
174
+ if (sizes[xr], self._indices[yr]) < (sizes[yr], self._indices[xr]):
175
+ xr, yr = yr, xr
176
+ self._parents[yr] = xr
177
+ self._sizes[xr] += self._sizes[yr]
178
+ self._nbrs[xr], self._nbrs[yr] = self._nbrs[yr], self._nbrs[xr]
179
+ self.n_subsets -= 1
180
+ return True
181
+
182
+ def connected(self, x, y):
183
+ """Test whether `x` and `y` are in the same subset.
184
+
185
+ Parameters
186
+ ----------
187
+ x, y : hashable object
188
+ Elements to test.
189
+
190
+ Returns
191
+ -------
192
+ result : bool
193
+ True if `x` and `y` are in the same set, False otherwise.
194
+ """
195
+ return self._indices[self[x]] == self._indices[self[y]]
196
+
197
+ def subset(self, x):
198
+ """Get the subset containing `x`.
199
+
200
+ Parameters
201
+ ----------
202
+ x : hashable object
203
+ Input element.
204
+
205
+ Returns
206
+ -------
207
+ result : set
208
+ Subset containing `x`.
209
+ """
210
+ if x not in self._indices:
211
+ raise KeyError(x)
212
+
213
+ result = [x]
214
+ nxt = self._nbrs[x]
215
+ while self._indices[nxt] != self._indices[x]:
216
+ result.append(nxt)
217
+ nxt = self._nbrs[nxt]
218
+ return set(result)
219
+
220
+ def subset_size(self, x):
221
+ """Get the size of the subset containing `x`.
222
+
223
+ Note that this method is faster than ``len(self.subset(x))`` because
224
+ the size is directly read off an internal field, without the need to
225
+ instantiate the full subset.
226
+
227
+ Parameters
228
+ ----------
229
+ x : hashable object
230
+ Input element.
231
+
232
+ Returns
233
+ -------
234
+ result : int
235
+ Size of the subset containing `x`.
236
+ """
237
+ return self._sizes[self[x]]
238
+
239
+ def subsets(self):
240
+ """Get all the subsets in the disjoint set.
241
+
242
+ Returns
243
+ -------
244
+ result : list
245
+ Subsets in the disjoint set.
246
+ """
247
+ result = []
248
+ visited = set()
249
+ for x in self:
250
+ if x not in visited:
251
+ xset = self.subset(x)
252
+ visited.update(xset)
253
+ result.append(xset)
254
+ return result
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_docscrape.py ADDED
@@ -0,0 +1,761 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # copied from numpydoc/docscrape.py, commit 97a6026508e0dd5382865672e9563a72cc113bd2
2
+ """Extract reference documentation from the NumPy source tree."""
3
+
4
+ import copy
5
+ import inspect
6
+ import pydoc
7
+ import re
8
+ import sys
9
+ import textwrap
10
+ from collections import namedtuple
11
+ from collections.abc import Callable, Mapping
12
+ from functools import cached_property
13
+ from warnings import warn
14
+
15
+
16
+ def strip_blank_lines(l):
17
+ "Remove leading and trailing blank lines from a list of lines"
18
+ while l and not l[0].strip():
19
+ del l[0]
20
+ while l and not l[-1].strip():
21
+ del l[-1]
22
+ return l
23
+
24
+
25
+ class Reader:
26
+ """A line-based string reader."""
27
+
28
+ def __init__(self, data):
29
+ """
30
+ Parameters
31
+ ----------
32
+ data : str
33
+ String with lines separated by '\\n'.
34
+
35
+ """
36
+ if isinstance(data, list):
37
+ self._str = data
38
+ else:
39
+ self._str = data.split("\n") # store string as list of lines
40
+
41
+ self.reset()
42
+
43
+ def __getitem__(self, n):
44
+ return self._str[n]
45
+
46
+ def reset(self):
47
+ self._l = 0 # current line nr
48
+
49
+ def read(self):
50
+ if not self.eof():
51
+ out = self[self._l]
52
+ self._l += 1
53
+ return out
54
+ else:
55
+ return ""
56
+
57
+ def seek_next_non_empty_line(self):
58
+ for l in self[self._l :]:
59
+ if l.strip():
60
+ break
61
+ else:
62
+ self._l += 1
63
+
64
+ def eof(self):
65
+ return self._l >= len(self._str)
66
+
67
+ def read_to_condition(self, condition_func):
68
+ start = self._l
69
+ for line in self[start:]:
70
+ if condition_func(line):
71
+ return self[start : self._l]
72
+ self._l += 1
73
+ if self.eof():
74
+ return self[start : self._l + 1]
75
+ return []
76
+
77
+ def read_to_next_empty_line(self):
78
+ self.seek_next_non_empty_line()
79
+
80
+ def is_empty(line):
81
+ return not line.strip()
82
+
83
+ return self.read_to_condition(is_empty)
84
+
85
+ def read_to_next_unindented_line(self):
86
+ def is_unindented(line):
87
+ return line.strip() and (len(line.lstrip()) == len(line))
88
+
89
+ return self.read_to_condition(is_unindented)
90
+
91
+ def peek(self, n=0):
92
+ if self._l + n < len(self._str):
93
+ return self[self._l + n]
94
+ else:
95
+ return ""
96
+
97
+ def is_empty(self):
98
+ return not "".join(self._str).strip()
99
+
100
+
101
+ class ParseError(Exception):
102
+ def __str__(self):
103
+ message = self.args[0]
104
+ if hasattr(self, "docstring"):
105
+ message = f"{message} in {self.docstring!r}"
106
+ return message
107
+
108
+
109
+ Parameter = namedtuple("Parameter", ["name", "type", "desc"])
110
+
111
+
112
+ class NumpyDocString(Mapping):
113
+ """Parses a numpydoc string to an abstract representation
114
+
115
+ Instances define a mapping from section title to structured data.
116
+
117
+ """
118
+
119
+ sections = {
120
+ "Signature": "",
121
+ "Summary": [""],
122
+ "Extended Summary": [],
123
+ "Parameters": [],
124
+ "Attributes": [],
125
+ "Methods": [],
126
+ "Returns": [],
127
+ "Yields": [],
128
+ "Receives": [],
129
+ "Other Parameters": [],
130
+ "Raises": [],
131
+ "Warns": [],
132
+ "Warnings": [],
133
+ "See Also": [],
134
+ "Notes": [],
135
+ "References": "",
136
+ "Examples": "",
137
+ "index": {},
138
+ }
139
+
140
+ def __init__(self, docstring, config=None):
141
+ orig_docstring = docstring
142
+ docstring = textwrap.dedent(docstring).split("\n")
143
+
144
+ self._doc = Reader(docstring)
145
+ self._parsed_data = copy.deepcopy(self.sections)
146
+
147
+ try:
148
+ self._parse()
149
+ except ParseError as e:
150
+ e.docstring = orig_docstring
151
+ raise
152
+
153
+ def __getitem__(self, key):
154
+ return self._parsed_data[key]
155
+
156
+ def __setitem__(self, key, val):
157
+ if key not in self._parsed_data:
158
+ self._error_location(f"Unknown section {key}", error=False)
159
+ else:
160
+ self._parsed_data[key] = val
161
+
162
+ def __iter__(self):
163
+ return iter(self._parsed_data)
164
+
165
+ def __len__(self):
166
+ return len(self._parsed_data)
167
+
168
+ def _is_at_section(self):
169
+ self._doc.seek_next_non_empty_line()
170
+
171
+ if self._doc.eof():
172
+ return False
173
+
174
+ l1 = self._doc.peek().strip() # e.g. Parameters
175
+
176
+ if l1.startswith(".. index::"):
177
+ return True
178
+
179
+ l2 = self._doc.peek(1).strip() # ---------- or ==========
180
+ if len(l2) >= 3 and (set(l2) in ({"-"}, {"="})) and len(l2) != len(l1):
181
+ snip = "\n".join(self._doc._str[:2]) + "..."
182
+ self._error_location(
183
+ f"potentially wrong underline length... \n{l1} \n{l2} in \n{snip}",
184
+ error=False,
185
+ )
186
+ return l2.startswith("-" * len(l1)) or l2.startswith("=" * len(l1))
187
+
188
+ def _strip(self, doc):
189
+ i = 0
190
+ j = 0
191
+ for i, line in enumerate(doc):
192
+ if line.strip():
193
+ break
194
+
195
+ for j, line in enumerate(doc[::-1]):
196
+ if line.strip():
197
+ break
198
+
199
+ return doc[i : len(doc) - j]
200
+
201
+ def _read_to_next_section(self):
202
+ section = self._doc.read_to_next_empty_line()
203
+
204
+ while not self._is_at_section() and not self._doc.eof():
205
+ if not self._doc.peek(-1).strip(): # previous line was empty
206
+ section += [""]
207
+
208
+ section += self._doc.read_to_next_empty_line()
209
+
210
+ return section
211
+
212
+ def _read_sections(self):
213
+ while not self._doc.eof():
214
+ data = self._read_to_next_section()
215
+ name = data[0].strip()
216
+
217
+ if name.startswith(".."): # index section
218
+ yield name, data[1:]
219
+ elif len(data) < 2:
220
+ yield StopIteration
221
+ else:
222
+ yield name, self._strip(data[2:])
223
+
224
+ def _parse_param_list(self, content, single_element_is_type=False):
225
+ content = dedent_lines(content)
226
+ r = Reader(content)
227
+ params = []
228
+ while not r.eof():
229
+ header = r.read().strip()
230
+ if " : " in header:
231
+ arg_name, arg_type = header.split(" : ", maxsplit=1)
232
+ else:
233
+ # NOTE: param line with single element should never have a
234
+ # a " :" before the description line, so this should probably
235
+ # warn.
236
+ if header.endswith(" :"):
237
+ header = header[:-2]
238
+ if single_element_is_type:
239
+ arg_name, arg_type = "", header
240
+ else:
241
+ arg_name, arg_type = header, ""
242
+
243
+ desc = r.read_to_next_unindented_line()
244
+ desc = dedent_lines(desc)
245
+ desc = strip_blank_lines(desc)
246
+
247
+ params.append(Parameter(arg_name, arg_type, desc))
248
+
249
+ return params
250
+
251
+ # See also supports the following formats.
252
+ #
253
+ # <FUNCNAME>
254
+ # <FUNCNAME> SPACE* COLON SPACE+ <DESC> SPACE*
255
+ # <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)+ (COMMA | PERIOD)? SPACE*
256
+ # <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)* SPACE* COLON SPACE+ <DESC> SPACE*
257
+
258
+ # <FUNCNAME> is one of
259
+ # <PLAIN_FUNCNAME>
260
+ # COLON <ROLE> COLON BACKTICK <PLAIN_FUNCNAME> BACKTICK
261
+ # where
262
+ # <PLAIN_FUNCNAME> is a legal function name, and
263
+ # <ROLE> is any nonempty sequence of word characters.
264
+ # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`
265
+ # <DESC> is a string describing the function.
266
+
267
+ _role = r":(?P<role>(py:)?\w+):"
268
+ _funcbacktick = r"`(?P<name>(?:~\w+\.)?[a-zA-Z0-9_\.-]+)`"
269
+ _funcplain = r"(?P<name2>[a-zA-Z0-9_\.-]+)"
270
+ _funcname = r"(" + _role + _funcbacktick + r"|" + _funcplain + r")"
271
+ _funcnamenext = _funcname.replace("role", "rolenext")
272
+ _funcnamenext = _funcnamenext.replace("name", "namenext")
273
+ _description = r"(?P<description>\s*:(\s+(?P<desc>\S+.*))?)?\s*$"
274
+ _func_rgx = re.compile(r"^\s*" + _funcname + r"\s*")
275
+ _line_rgx = re.compile(
276
+ r"^\s*"
277
+ + r"(?P<allfuncs>"
278
+ + _funcname # group for all function names
279
+ + r"(?P<morefuncs>([,]\s+"
280
+ + _funcnamenext
281
+ + r")*)"
282
+ + r")"
283
+ + r"(?P<trailing>[,\.])?" # end of "allfuncs"
284
+ + _description # Some function lists have a trailing comma (or period) '\s*'
285
+ )
286
+
287
+ # Empty <DESC> elements are replaced with '..'
288
+ empty_description = ".."
289
+
290
+ def _parse_see_also(self, content):
291
+ """
292
+ func_name : Descriptive text
293
+ continued text
294
+ another_func_name : Descriptive text
295
+ func_name1, func_name2, :meth:`func_name`, func_name3
296
+
297
+ """
298
+
299
+ content = dedent_lines(content)
300
+
301
+ items = []
302
+
303
+ def parse_item_name(text):
304
+ """Match ':role:`name`' or 'name'."""
305
+ m = self._func_rgx.match(text)
306
+ if not m:
307
+ self._error_location(f"Error parsing See Also entry {line!r}")
308
+ role = m.group("role")
309
+ name = m.group("name") if role else m.group("name2")
310
+ return name, role, m.end()
311
+
312
+ rest = []
313
+ for line in content:
314
+ if not line.strip():
315
+ continue
316
+
317
+ line_match = self._line_rgx.match(line)
318
+ description = None
319
+ if line_match:
320
+ description = line_match.group("desc")
321
+ if line_match.group("trailing") and description:
322
+ self._error_location(
323
+ "Unexpected comma or period after function list at index %d of "
324
+ 'line "%s"' % (line_match.end("trailing"), line),
325
+ error=False,
326
+ )
327
+ if not description and line.startswith(" "):
328
+ rest.append(line.strip())
329
+ elif line_match:
330
+ funcs = []
331
+ text = line_match.group("allfuncs")
332
+ while True:
333
+ if not text.strip():
334
+ break
335
+ name, role, match_end = parse_item_name(text)
336
+ funcs.append((name, role))
337
+ text = text[match_end:].strip()
338
+ if text and text[0] == ",":
339
+ text = text[1:].strip()
340
+ rest = list(filter(None, [description]))
341
+ items.append((funcs, rest))
342
+ else:
343
+ self._error_location(f"Error parsing See Also entry {line!r}")
344
+ return items
345
+
346
+ def _parse_index(self, section, content):
347
+ """
348
+ .. index:: default
349
+ :refguide: something, else, and more
350
+
351
+ """
352
+
353
+ def strip_each_in(lst):
354
+ return [s.strip() for s in lst]
355
+
356
+ out = {}
357
+ section = section.split("::")
358
+ if len(section) > 1:
359
+ out["default"] = strip_each_in(section[1].split(","))[0]
360
+ for line in content:
361
+ line = line.split(":")
362
+ if len(line) > 2:
363
+ out[line[1]] = strip_each_in(line[2].split(","))
364
+ return out
365
+
366
+ def _parse_summary(self):
367
+ """Grab signature (if given) and summary"""
368
+ if self._is_at_section():
369
+ return
370
+
371
+ # If several signatures present, take the last one
372
+ while True:
373
+ summary = self._doc.read_to_next_empty_line()
374
+ summary_str = " ".join([s.strip() for s in summary]).strip()
375
+ compiled = re.compile(r"^([\w., ]+=)?\s*[\w\.]+\(.*\)$")
376
+ if compiled.match(summary_str):
377
+ self["Signature"] = summary_str
378
+ if not self._is_at_section():
379
+ continue
380
+ break
381
+
382
+ if summary is not None:
383
+ self["Summary"] = summary
384
+
385
+ if not self._is_at_section():
386
+ self["Extended Summary"] = self._read_to_next_section()
387
+
388
+ def _parse(self):
389
+ self._doc.reset()
390
+ self._parse_summary()
391
+
392
+ sections = list(self._read_sections())
393
+ section_names = {section for section, content in sections}
394
+
395
+ has_yields = "Yields" in section_names
396
+ # We could do more tests, but we are not. Arbitrarily.
397
+ if not has_yields and "Receives" in section_names:
398
+ msg = "Docstring contains a Receives section but not Yields."
399
+ raise ValueError(msg)
400
+
401
+ for section, content in sections:
402
+ if not section.startswith(".."):
403
+ section = (s.capitalize() for s in section.split(" "))
404
+ section = " ".join(section)
405
+ if self.get(section):
406
+ self._error_location(
407
+ "The section %s appears twice in %s"
408
+ % (section, "\n".join(self._doc._str))
409
+ )
410
+
411
+ if section in ("Parameters", "Other Parameters", "Attributes", "Methods"):
412
+ self[section] = self._parse_param_list(content)
413
+ elif section in ("Returns", "Yields", "Raises", "Warns", "Receives"):
414
+ self[section] = self._parse_param_list(
415
+ content, single_element_is_type=True
416
+ )
417
+ elif section.startswith(".. index::"):
418
+ self["index"] = self._parse_index(section, content)
419
+ elif section == "See Also":
420
+ self["See Also"] = self._parse_see_also(content)
421
+ else:
422
+ self[section] = content
423
+
424
+ @property
425
+ def _obj(self):
426
+ if hasattr(self, "_cls"):
427
+ return self._cls
428
+ elif hasattr(self, "_f"):
429
+ return self._f
430
+ return None
431
+
432
+ def _error_location(self, msg, error=True):
433
+ if self._obj is not None:
434
+ # we know where the docs came from:
435
+ try:
436
+ filename = inspect.getsourcefile(self._obj)
437
+ except TypeError:
438
+ filename = None
439
+ # Make UserWarning more descriptive via object introspection.
440
+ # Skip if introspection fails
441
+ name = getattr(self._obj, "__name__", None)
442
+ if name is None:
443
+ name = getattr(getattr(self._obj, "__class__", None), "__name__", None)
444
+ if name is not None:
445
+ msg += f" in the docstring of {name}"
446
+ msg += f" in {filename}." if filename else ""
447
+ if error:
448
+ raise ValueError(msg)
449
+ else:
450
+ warn(msg, stacklevel=3)
451
+
452
+ # string conversion routines
453
+
454
+ def _str_header(self, name, symbol="-"):
455
+ return [name, len(name) * symbol]
456
+
457
+ def _str_indent(self, doc, indent=4):
458
+ return [" " * indent + line for line in doc]
459
+
460
+ def _str_signature(self):
461
+ if self["Signature"]:
462
+ return [self["Signature"].replace("*", r"\*")] + [""]
463
+ return [""]
464
+
465
+ def _str_summary(self):
466
+ if self["Summary"]:
467
+ return self["Summary"] + [""]
468
+ return []
469
+
470
+ def _str_extended_summary(self):
471
+ if self["Extended Summary"]:
472
+ return self["Extended Summary"] + [""]
473
+ return []
474
+
475
+ def _str_param_list(self, name):
476
+ out = []
477
+ if self[name]:
478
+ out += self._str_header(name)
479
+ for param in self[name]:
480
+ parts = []
481
+ if param.name:
482
+ parts.append(param.name)
483
+ if param.type:
484
+ parts.append(param.type)
485
+ out += [" : ".join(parts)]
486
+ if param.desc and "".join(param.desc).strip():
487
+ out += self._str_indent(param.desc)
488
+ out += [""]
489
+ return out
490
+
491
+ def _str_section(self, name):
492
+ out = []
493
+ if self[name]:
494
+ out += self._str_header(name)
495
+ out += self[name]
496
+ out += [""]
497
+ return out
498
+
499
+ def _str_see_also(self, func_role):
500
+ if not self["See Also"]:
501
+ return []
502
+ out = []
503
+ out += self._str_header("See Also")
504
+ out += [""]
505
+ last_had_desc = True
506
+ for funcs, desc in self["See Also"]:
507
+ assert isinstance(funcs, list)
508
+ links = []
509
+ for func, role in funcs:
510
+ if role:
511
+ link = f":{role}:`{func}`"
512
+ elif func_role:
513
+ link = f":{func_role}:`{func}`"
514
+ else:
515
+ link = f"`{func}`_"
516
+ links.append(link)
517
+ link = ", ".join(links)
518
+ out += [link]
519
+ if desc:
520
+ out += self._str_indent([" ".join(desc)])
521
+ last_had_desc = True
522
+ else:
523
+ last_had_desc = False
524
+ out += self._str_indent([self.empty_description])
525
+
526
+ if last_had_desc:
527
+ out += [""]
528
+ out += [""]
529
+ return out
530
+
531
+ def _str_index(self):
532
+ idx = self["index"]
533
+ out = []
534
+ output_index = False
535
+ default_index = idx.get("default", "")
536
+ if default_index:
537
+ output_index = True
538
+ out += [f".. index:: {default_index}"]
539
+ for section, references in idx.items():
540
+ if section == "default":
541
+ continue
542
+ output_index = True
543
+ out += [f" :{section}: {', '.join(references)}"]
544
+ if output_index:
545
+ return out
546
+ return ""
547
+
548
+ def __str__(self, func_role=""):
549
+ out = []
550
+ out += self._str_signature()
551
+ out += self._str_summary()
552
+ out += self._str_extended_summary()
553
+ out += self._str_param_list("Parameters")
554
+ for param_list in ("Attributes", "Methods"):
555
+ out += self._str_param_list(param_list)
556
+ for param_list in (
557
+ "Returns",
558
+ "Yields",
559
+ "Receives",
560
+ "Other Parameters",
561
+ "Raises",
562
+ "Warns",
563
+ ):
564
+ out += self._str_param_list(param_list)
565
+ out += self._str_section("Warnings")
566
+ out += self._str_see_also(func_role)
567
+ for s in ("Notes", "References", "Examples"):
568
+ out += self._str_section(s)
569
+ out += self._str_index()
570
+ return "\n".join(out)
571
+
572
+
573
+ def dedent_lines(lines):
574
+ """Deindent a list of lines maximally"""
575
+ return textwrap.dedent("\n".join(lines)).split("\n")
576
+
577
+
578
+ class FunctionDoc(NumpyDocString):
579
+ def __init__(self, func, role="func", doc=None, config=None):
580
+ self._f = func
581
+ self._role = role # e.g. "func" or "meth"
582
+
583
+ if doc is None:
584
+ if func is None:
585
+ raise ValueError("No function or docstring given")
586
+ doc = inspect.getdoc(func) or ""
587
+ if config is None:
588
+ config = {}
589
+ NumpyDocString.__init__(self, doc, config)
590
+
591
+ def get_func(self):
592
+ func_name = getattr(self._f, "__name__", self.__class__.__name__)
593
+ if inspect.isclass(self._f):
594
+ func = getattr(self._f, "__call__", self._f.__init__)
595
+ else:
596
+ func = self._f
597
+ return func, func_name
598
+
599
+ def __str__(self):
600
+ out = ""
601
+
602
+ func, func_name = self.get_func()
603
+
604
+ roles = {"func": "function", "meth": "method"}
605
+
606
+ if self._role:
607
+ if self._role not in roles:
608
+ print(f"Warning: invalid role {self._role}")
609
+ out += f".. {roles.get(self._role, '')}:: {func_name}\n \n\n"
610
+
611
+ out += super().__str__(func_role=self._role)
612
+ return out
613
+
614
+
615
+ class ObjDoc(NumpyDocString):
616
+ def __init__(self, obj, doc=None, config=None):
617
+ self._f = obj
618
+ if config is None:
619
+ config = {}
620
+ NumpyDocString.__init__(self, doc, config=config)
621
+
622
+
623
+ class ClassDoc(NumpyDocString):
624
+ extra_public_methods = ["__call__"]
625
+
626
+ def __init__(self, cls, doc=None, modulename="", func_doc=FunctionDoc, config=None):
627
+ if not inspect.isclass(cls) and cls is not None:
628
+ raise ValueError(f"Expected a class or None, but got {cls!r}")
629
+ self._cls = cls
630
+
631
+ if "sphinx" in sys.modules:
632
+ from sphinx.ext.autodoc import ALL
633
+ else:
634
+ ALL = object()
635
+
636
+ if config is None:
637
+ config = {}
638
+ self.show_inherited_members = config.get("show_inherited_class_members", True)
639
+
640
+ if modulename and not modulename.endswith("."):
641
+ modulename += "."
642
+ self._mod = modulename
643
+
644
+ if doc is None:
645
+ if cls is None:
646
+ raise ValueError("No class or documentation string given")
647
+ doc = pydoc.getdoc(cls)
648
+
649
+ NumpyDocString.__init__(self, doc)
650
+
651
+ _members = config.get("members", [])
652
+ if _members is ALL:
653
+ _members = None
654
+ _exclude = config.get("exclude-members", [])
655
+
656
+ if config.get("show_class_members", True) and _exclude is not ALL:
657
+
658
+ def splitlines_x(s):
659
+ if not s:
660
+ return []
661
+ else:
662
+ return s.splitlines()
663
+
664
+ for field, items in [
665
+ ("Methods", self.methods),
666
+ ("Attributes", self.properties),
667
+ ]:
668
+ if not self[field]:
669
+ doc_list = []
670
+ for name in sorted(items):
671
+ if name in _exclude or (_members and name not in _members):
672
+ continue
673
+ try:
674
+ doc_item = pydoc.getdoc(getattr(self._cls, name))
675
+ doc_list.append(Parameter(name, "", splitlines_x(doc_item)))
676
+ except AttributeError:
677
+ pass # method doesn't exist
678
+ self[field] = doc_list
679
+
680
+ @property
681
+ def methods(self):
682
+ if self._cls is None:
683
+ return []
684
+ return [
685
+ name
686
+ for name, func in inspect.getmembers(self._cls)
687
+ if (
688
+ (not name.startswith("_") or name in self.extra_public_methods)
689
+ and isinstance(func, Callable)
690
+ and self._is_show_member(name)
691
+ )
692
+ ]
693
+
694
+ @property
695
+ def properties(self):
696
+ if self._cls is None:
697
+ return []
698
+ return [
699
+ name
700
+ for name, func in inspect.getmembers(self._cls)
701
+ if (
702
+ not name.startswith("_")
703
+ and not self._should_skip_member(name, self._cls)
704
+ and (
705
+ func is None
706
+ or isinstance(func, property | cached_property)
707
+ or inspect.isdatadescriptor(func)
708
+ )
709
+ and self._is_show_member(name)
710
+ )
711
+ ]
712
+
713
+ @staticmethod
714
+ def _should_skip_member(name, klass):
715
+ return (
716
+ # Namedtuples should skip everything in their ._fields as the
717
+ # docstrings for each of the members is: "Alias for field number X"
718
+ issubclass(klass, tuple)
719
+ and hasattr(klass, "_asdict")
720
+ and hasattr(klass, "_fields")
721
+ and name in klass._fields
722
+ )
723
+
724
+ def _is_show_member(self, name):
725
+ return (
726
+ # show all class members
727
+ self.show_inherited_members
728
+ # or class member is not inherited
729
+ or name in self._cls.__dict__
730
+ )
731
+
732
+
733
+ def get_doc_object(
734
+ obj,
735
+ what=None,
736
+ doc=None,
737
+ config=None,
738
+ class_doc=ClassDoc,
739
+ func_doc=FunctionDoc,
740
+ obj_doc=ObjDoc,
741
+ ):
742
+ if what is None:
743
+ if inspect.isclass(obj):
744
+ what = "class"
745
+ elif inspect.ismodule(obj):
746
+ what = "module"
747
+ elif isinstance(obj, Callable):
748
+ what = "function"
749
+ else:
750
+ what = "object"
751
+ if config is None:
752
+ config = {}
753
+
754
+ if what == "class":
755
+ return class_doc(obj, func_doc=func_doc, doc=doc, config=config)
756
+ elif what in ("function", "method"):
757
+ return func_doc(obj, doc=doc, config=config)
758
+ else:
759
+ if doc is None:
760
+ doc = pydoc.getdoc(obj)
761
+ return obj_doc(obj, doc, config=config)
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_elementwise_iterative_method.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # `_elementwise_iterative_method.py` includes tools for writing functions that
2
+ # - are vectorized to work elementwise on arrays,
3
+ # - implement non-trivial, iterative algorithms with a callback interface, and
4
+ # - return rich objects with iteration count, termination status, etc.
5
+ #
6
+ # Examples include:
7
+ # `scipy.optimize._chandrupatla._chandrupatla for scalar rootfinding,
8
+ # `scipy.optimize._chandrupatla._chandrupatla_minimize for scalar minimization,
9
+ # `scipy.optimize._differentiate._differentiate for numerical differentiation,
10
+ # `scipy.optimize._bracket._bracket_root for finding rootfinding brackets,
11
+ # `scipy.optimize._bracket._bracket_minimize for finding minimization brackets,
12
+ # `scipy.integrate._tanhsinh._tanhsinh` for numerical quadrature,
13
+ # `scipy.differentiate.derivative` for finite difference based differentiation.
14
+
15
+ import math
16
+ import numpy as np
17
+ from ._util import _RichResult, _call_callback_maybe_halt
18
+ from ._array_api import array_namespace, xp_size, xp_result_type
19
+ import scipy._lib.array_api_extra as xpx
20
+
21
+ _ESIGNERR = -1
22
+ _ECONVERR = -2
23
+ _EVALUEERR = -3
24
+ _ECALLBACK = -4
25
+ _EINPUTERR = -5
26
+ _ECONVERGED = 0
27
+ _EINPROGRESS = 1
28
+
29
+ def _initialize(func, xs, args, complex_ok=False, preserve_shape=None, xp=None):
30
+ """Initialize abscissa, function, and args arrays for elementwise function
31
+
32
+ Parameters
33
+ ----------
34
+ func : callable
35
+ An elementwise function with signature
36
+
37
+ func(x: ndarray, *args) -> ndarray
38
+
39
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
40
+ which may contain an arbitrary number of arrays that are broadcastable
41
+ with ``x``.
42
+ xs : tuple of arrays
43
+ Finite real abscissa arrays. Must be broadcastable.
44
+ args : tuple, optional
45
+ Additional positional arguments to be passed to `func`.
46
+ preserve_shape : bool, default:False
47
+ When ``preserve_shape=False`` (default), `func` may be passed
48
+ arguments of any shape; `_scalar_optimization_loop` is permitted
49
+ to reshape and compress arguments at will. When
50
+ ``preserve_shape=False``, arguments passed to `func` must have shape
51
+ `shape` or ``shape + (n,)``, where ``n`` is any integer.
52
+ xp : namespace
53
+ Namespace of array arguments in `xs`.
54
+
55
+ Returns
56
+ -------
57
+ xs, fs, args : tuple of arrays
58
+ Broadcasted, writeable, 1D abscissa and function value arrays (or
59
+ NumPy floats, if appropriate). The dtypes of the `xs` and `fs` are
60
+ `xfat`; the dtype of the `args` are unchanged.
61
+ shape : tuple of ints
62
+ Original shape of broadcasted arrays.
63
+ xfat : NumPy dtype
64
+ Result dtype of abscissae, function values, and args determined using
65
+ `np.result_type`, except integer types are promoted to `np.float64`.
66
+
67
+ Raises
68
+ ------
69
+ ValueError
70
+ If the result dtype is not that of a real scalar
71
+
72
+ Notes
73
+ -----
74
+ Useful for initializing the input of SciPy functions that accept
75
+ an elementwise callable, abscissae, and arguments; e.g.
76
+ `scipy.optimize._chandrupatla`.
77
+ """
78
+ nx = len(xs)
79
+ xp = array_namespace(*xs) if xp is None else xp
80
+
81
+ # Try to preserve `dtype`, but we need to ensure that the arguments are at
82
+ # least floats before passing them into the function; integers can overflow
83
+ # and cause failure.
84
+ # There might be benefit to combining the `xs` into a single array and
85
+ # calling `func` once on the combined array. For now, keep them separate.
86
+ xat = xp_result_type(*xs, force_floating=True, xp=xp)
87
+ xas = xp.broadcast_arrays(*xs, *args) # broadcast and rename
88
+ xs, args = xas[:nx], xas[nx:]
89
+ xs = [xp.asarray(x, dtype=xat) for x in xs] # use copy=False when implemented
90
+ fs = [xp.asarray(func(x, *args)) for x in xs]
91
+ shape = xs[0].shape
92
+ fshape = fs[0].shape
93
+
94
+ if preserve_shape:
95
+ # bind original shape/func now to avoid late-binding gotcha
96
+ def func(x, *args, shape=shape, func=func, **kwargs):
97
+ i = (0,)*(len(fshape) - len(shape))
98
+ return func(x[i], *args, **kwargs)
99
+ shape = np.broadcast_shapes(fshape, shape) # just shapes; use of NumPy OK
100
+ xs = [xp.broadcast_to(x, shape) for x in xs]
101
+ args = [xp.broadcast_to(arg, shape) for arg in args]
102
+
103
+ message = ("The shape of the array returned by `func` must be the same as "
104
+ "the broadcasted shape of `x` and all other `args`.")
105
+ if preserve_shape is not None: # only in tanhsinh for now
106
+ message = f"When `preserve_shape=False`, {message.lower()}"
107
+ shapes_equal = [f.shape == shape for f in fs]
108
+ if not all(shapes_equal): # use Python all to reduce overhead
109
+ raise ValueError(message)
110
+
111
+ # These algorithms tend to mix the dtypes of the abscissae and function
112
+ # values, so figure out what the result will be and convert them all to
113
+ # that type from the outset.
114
+ xfat = xp.result_type(*([f.dtype for f in fs] + [xat]))
115
+ if not complex_ok and not xp.isdtype(xfat, "real floating"):
116
+ raise ValueError("Abscissae and function output must be real numbers.")
117
+ xs = [xp.asarray(x, dtype=xfat, copy=True) for x in xs]
118
+ fs = [xp.asarray(f, dtype=xfat, copy=True) for f in fs]
119
+
120
+ # To ensure that we can do indexing, we'll work with at least 1d arrays,
121
+ # but remember the appropriate shape of the output.
122
+ xs = [xp.reshape(x, (-1,)) for x in xs]
123
+ fs = [xp.reshape(f, (-1,)) for f in fs]
124
+ args = [xp.reshape(xp.asarray(arg, copy=True), (-1,)) for arg in args]
125
+ return func, xs, fs, args, shape, xfat, xp
126
+
127
+
128
+ def _loop(work, callback, shape, maxiter, func, args, dtype, pre_func_eval,
129
+ post_func_eval, check_termination, post_termination_check,
130
+ customize_result, res_work_pairs, xp, preserve_shape=False):
131
+ """Main loop of a vectorized scalar optimization algorithm
132
+
133
+ Parameters
134
+ ----------
135
+ work : _RichResult
136
+ All variables that need to be retained between iterations. Must
137
+ contain attributes `nit`, `nfev`, and `success`. All arrays are
138
+ subject to being "compressed" if `preserve_shape is False`; nest
139
+ arrays that should not be compressed inside another object (e.g.
140
+ `dict` or `_RichResult`).
141
+ callback : callable
142
+ User-specified callback function
143
+ shape : tuple of ints
144
+ The shape of all output arrays
145
+ maxiter :
146
+ Maximum number of iterations of the algorithm
147
+ func : callable
148
+ The user-specified callable that is being optimized or solved
149
+ args : tuple
150
+ Additional positional arguments to be passed to `func`.
151
+ dtype : NumPy dtype
152
+ The common dtype of all abscissae and function values
153
+ pre_func_eval : callable
154
+ A function that accepts `work` and returns `x`, the active elements
155
+ of `x` at which `func` will be evaluated. May modify attributes
156
+ of `work` with any algorithmic steps that need to happen
157
+ at the beginning of an iteration, before `func` is evaluated,
158
+ post_func_eval : callable
159
+ A function that accepts `x`, `func(x)`, and `work`. May modify
160
+ attributes of `work` with any algorithmic steps that need to happen
161
+ in the middle of an iteration, after `func` is evaluated but before
162
+ the termination check.
163
+ check_termination : callable
164
+ A function that accepts `work` and returns `stop`, a boolean array
165
+ indicating which of the active elements have met a termination
166
+ condition.
167
+ post_termination_check : callable
168
+ A function that accepts `work`. May modify `work` with any algorithmic
169
+ steps that need to happen after the termination check and before the
170
+ end of the iteration.
171
+ customize_result : callable
172
+ A function that accepts `res` and `shape` and returns `shape`. May
173
+ modify `res` (in-place) according to preferences (e.g. rearrange
174
+ elements between attributes) and modify `shape` if needed.
175
+ res_work_pairs : list of (str, str)
176
+ Identifies correspondence between attributes of `res` and attributes
177
+ of `work`; i.e., attributes of active elements of `work` will be
178
+ copied to the appropriate indices of `res` when appropriate. The order
179
+ determines the order in which _RichResult attributes will be
180
+ pretty-printed.
181
+ preserve_shape : bool, default: False
182
+ Whether to compress the attributes of `work` (to avoid unnecessary
183
+ computation on elements that have already converged).
184
+
185
+ Returns
186
+ -------
187
+ res : _RichResult
188
+ The final result object
189
+
190
+ Notes
191
+ -----
192
+ Besides providing structure, this framework provides several important
193
+ services for a vectorized optimization algorithm.
194
+
195
+ - It handles common tasks involving iteration count, function evaluation
196
+ count, a user-specified callback, and associated termination conditions.
197
+ - It compresses the attributes of `work` to eliminate unnecessary
198
+ computation on elements that have already converged.
199
+
200
+ """
201
+ if xp is None:
202
+ raise NotImplementedError("Must provide xp.")
203
+
204
+ cb_terminate = False
205
+
206
+ # Initialize the result object and active element index array
207
+ n_elements = math.prod(shape)
208
+ active = xp.arange(n_elements) # in-progress element indices
209
+ res_dict = {i: xp.zeros(n_elements, dtype=dtype) for i, j in res_work_pairs}
210
+ res_dict['success'] = xp.zeros(n_elements, dtype=xp.bool)
211
+ res_dict['status'] = xp.full(n_elements, xp.asarray(_EINPROGRESS), dtype=xp.int32)
212
+ res_dict['nit'] = xp.zeros(n_elements, dtype=xp.int32)
213
+ res_dict['nfev'] = xp.zeros(n_elements, dtype=xp.int32)
214
+ res = _RichResult(res_dict)
215
+ work.args = args
216
+
217
+ active = _check_termination(work, res, res_work_pairs, active,
218
+ check_termination, preserve_shape, xp)
219
+
220
+ if callback is not None:
221
+ temp = _prepare_result(work, res, res_work_pairs, active, shape,
222
+ customize_result, preserve_shape, xp)
223
+ if _call_callback_maybe_halt(callback, temp):
224
+ cb_terminate = True
225
+
226
+ while work.nit < maxiter and xp_size(active) and not cb_terminate and n_elements:
227
+ x = pre_func_eval(work)
228
+
229
+ if work.args and work.args[0].ndim != x.ndim:
230
+ # `x` always starts as 1D. If the SciPy function that uses
231
+ # _loop added dimensions to `x`, we need to
232
+ # add them to the elements of `args`.
233
+ args = []
234
+ for arg in work.args:
235
+ n_new_dims = x.ndim - arg.ndim
236
+ new_shape = arg.shape + (1,)*n_new_dims
237
+ args.append(xp.reshape(arg, new_shape))
238
+ work.args = args
239
+
240
+ x_shape = x.shape
241
+ if preserve_shape:
242
+ x = xp.reshape(x, (shape + (-1,)))
243
+ f = func(x, *work.args)
244
+ f = xp.asarray(f, dtype=dtype)
245
+ if preserve_shape:
246
+ x = xp.reshape(x, x_shape)
247
+ f = xp.reshape(f, x_shape)
248
+ work.nfev += 1 if x.ndim == 1 else x.shape[-1]
249
+
250
+ post_func_eval(x, f, work)
251
+
252
+ work.nit += 1
253
+ active = _check_termination(work, res, res_work_pairs, active,
254
+ check_termination, preserve_shape, xp)
255
+
256
+ if callback is not None:
257
+ temp = _prepare_result(work, res, res_work_pairs, active, shape,
258
+ customize_result, preserve_shape, xp)
259
+ if _call_callback_maybe_halt(callback, temp):
260
+ cb_terminate = True
261
+ break
262
+ if xp_size(active) == 0:
263
+ break
264
+
265
+ post_termination_check(work)
266
+
267
+ work.status = xpx.at(work.status)[:].set(_ECALLBACK if cb_terminate else _ECONVERR)
268
+ return _prepare_result(work, res, res_work_pairs, active, shape,
269
+ customize_result, preserve_shape, xp)
270
+
271
+
272
+ def _check_termination(work, res, res_work_pairs, active, check_termination,
273
+ preserve_shape, xp):
274
+ # Checks termination conditions, updates elements of `res` with
275
+ # corresponding elements of `work`, and compresses `work`.
276
+
277
+ stop = check_termination(work)
278
+
279
+ if xp.any(stop):
280
+ # update the active elements of the result object with the active
281
+ # elements for which a termination condition has been met
282
+ _update_active(work, res, res_work_pairs, active, stop, preserve_shape, xp)
283
+
284
+ if preserve_shape:
285
+ stop = stop[active]
286
+
287
+ proceed = ~stop
288
+ active = active[proceed]
289
+
290
+ if not preserve_shape:
291
+ # compress the arrays to avoid unnecessary computation
292
+ for key, val in work.items():
293
+ # `continued_fraction` hacks `n`; improve if this becomes a problem
294
+ if key in {'args', 'n'}:
295
+ continue
296
+ work[key] = val[proceed] if getattr(val, 'ndim', 0) > 0 else val
297
+ work.args = [arg[proceed] for arg in work.args]
298
+
299
+ return active
300
+
301
+
302
+ def _update_active(work, res, res_work_pairs, active, mask, preserve_shape, xp):
303
+ # Update `active` indices of the arrays in result object `res` with the
304
+ # contents of the scalars and arrays in `update_dict`. When provided,
305
+ # `mask` is a boolean array applied both to the arrays in `update_dict`
306
+ # that are to be used and to the arrays in `res` that are to be updated.
307
+ update_dict = {key1: work[key2] for key1, key2 in res_work_pairs}
308
+ update_dict['success'] = work.status == 0
309
+
310
+ if mask is not None:
311
+ if preserve_shape:
312
+ active_mask = xp.zeros_like(mask)
313
+ active_mask = xpx.at(active_mask)[active].set(True)
314
+ active_mask = active_mask & mask
315
+ for key, val in update_dict.items():
316
+ val = val[active_mask] if getattr(val, 'ndim', 0) > 0 else val
317
+ res[key] = xpx.at(res[key])[active_mask].set(val)
318
+ else:
319
+ active_mask = active[mask]
320
+ for key, val in update_dict.items():
321
+ val = val[mask] if getattr(val, 'ndim', 0) > 0 else val
322
+ res[key] = xpx.at(res[key])[active_mask].set(val)
323
+ else:
324
+ for key, val in update_dict.items():
325
+ if preserve_shape and getattr(val, 'ndim', 0) > 0:
326
+ val = val[active]
327
+ res[key] = xpx.at(res[key])[active].set(val)
328
+
329
+
330
+ def _prepare_result(work, res, res_work_pairs, active, shape, customize_result,
331
+ preserve_shape, xp):
332
+ # Prepare the result object `res` by creating a copy, copying the latest
333
+ # data from work, running the provided result customization function,
334
+ # and reshaping the data to the original shapes.
335
+ res = res.copy()
336
+ _update_active(work, res, res_work_pairs, active, None, preserve_shape, xp)
337
+
338
+ shape = customize_result(res, shape)
339
+
340
+ for key, val in res.items():
341
+ # this looks like it won't work for xp != np if val is not numeric
342
+ temp = xp.reshape(val, shape)
343
+ res[key] = temp[()] if temp.ndim == 0 else temp
344
+
345
+ res['_order_keys'] = ['success'] + [i for i, j in res_work_pairs]
346
+ return _RichResult(**res)
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_fpumode.cpython-312-x86_64-linux-gnu.so ADDED
Binary file (16.2 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_gcutils.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module for testing automatic garbage collection of objects
3
+
4
+ .. autosummary::
5
+ :toctree: generated/
6
+
7
+ set_gc_state - enable or disable garbage collection
8
+ gc_state - context manager for given state of garbage collector
9
+ assert_deallocated - context manager to check for circular references on object
10
+
11
+ """
12
+ import weakref
13
+ import gc
14
+
15
+ from contextlib import contextmanager
16
+ from platform import python_implementation
17
+
18
+ __all__ = ['set_gc_state', 'gc_state', 'assert_deallocated']
19
+
20
+
21
+ IS_PYPY = python_implementation() == 'PyPy'
22
+
23
+
24
+ class ReferenceError(AssertionError):
25
+ pass
26
+
27
+
28
+ def set_gc_state(state):
29
+ """ Set status of garbage collector """
30
+ if gc.isenabled() == state:
31
+ return
32
+ if state:
33
+ gc.enable()
34
+ else:
35
+ gc.disable()
36
+
37
+
38
+ @contextmanager
39
+ def gc_state(state):
40
+ """ Context manager to set state of garbage collector to `state`
41
+
42
+ Parameters
43
+ ----------
44
+ state : bool
45
+ True for gc enabled, False for disabled
46
+
47
+ Examples
48
+ --------
49
+ >>> with gc_state(False):
50
+ ... assert not gc.isenabled()
51
+ >>> with gc_state(True):
52
+ ... assert gc.isenabled()
53
+ """
54
+ orig_state = gc.isenabled()
55
+ set_gc_state(state)
56
+ yield
57
+ set_gc_state(orig_state)
58
+
59
+
60
+ @contextmanager
61
+ def assert_deallocated(func, *args, **kwargs):
62
+ """Context manager to check that object is deallocated
63
+
64
+ This is useful for checking that an object can be freed directly by
65
+ reference counting, without requiring gc to break reference cycles.
66
+ GC is disabled inside the context manager.
67
+
68
+ This check is not available on PyPy.
69
+
70
+ Parameters
71
+ ----------
72
+ func : callable
73
+ Callable to create object to check
74
+ \\*args : sequence
75
+ positional arguments to `func` in order to create object to check
76
+ \\*\\*kwargs : dict
77
+ keyword arguments to `func` in order to create object to check
78
+
79
+ Examples
80
+ --------
81
+ >>> class C: pass
82
+ >>> with assert_deallocated(C) as c:
83
+ ... # do something
84
+ ... del c
85
+
86
+ >>> class C:
87
+ ... def __init__(self):
88
+ ... self._circular = self # Make circular reference
89
+ >>> with assert_deallocated(C) as c: #doctest: +IGNORE_EXCEPTION_DETAIL
90
+ ... # do something
91
+ ... del c
92
+ Traceback (most recent call last):
93
+ ...
94
+ ReferenceError: Remaining reference(s) to object
95
+ """
96
+ if IS_PYPY:
97
+ raise RuntimeError("assert_deallocated is unavailable on PyPy")
98
+
99
+ with gc_state(False):
100
+ obj = func(*args, **kwargs)
101
+ ref = weakref.ref(obj)
102
+ yield obj
103
+ del obj
104
+ if ref() is not None:
105
+ raise ReferenceError("Remaining reference(s) to object")
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_pep440.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility to compare pep440 compatible version strings.
2
+
3
+ The LooseVersion and StrictVersion classes that distutils provides don't
4
+ work; they don't recognize anything like alpha/beta/rc/dev versions.
5
+ """
6
+
7
+ # Copyright (c) Donald Stufft and individual contributors.
8
+ # All rights reserved.
9
+
10
+ # Redistribution and use in source and binary forms, with or without
11
+ # modification, are permitted provided that the following conditions are met:
12
+
13
+ # 1. Redistributions of source code must retain the above copyright notice,
14
+ # this list of conditions and the following disclaimer.
15
+
16
+ # 2. Redistributions in binary form must reproduce the above copyright
17
+ # notice, this list of conditions and the following disclaimer in the
18
+ # documentation and/or other materials provided with the distribution.
19
+
20
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21
+ # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24
+ # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25
+ # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26
+ # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28
+ # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29
+ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30
+ # POSSIBILITY OF SUCH DAMAGE.
31
+
32
+ import collections
33
+ import itertools
34
+ import re
35
+
36
+
37
+ __all__ = [
38
+ "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN",
39
+ ]
40
+
41
+
42
+ # BEGIN packaging/_structures.py
43
+
44
+
45
+ class Infinity:
46
+ def __repr__(self):
47
+ return "Infinity"
48
+
49
+ def __hash__(self):
50
+ return hash(repr(self))
51
+
52
+ def __lt__(self, other):
53
+ return False
54
+
55
+ def __le__(self, other):
56
+ return False
57
+
58
+ def __eq__(self, other):
59
+ return isinstance(other, self.__class__)
60
+
61
+ def __ne__(self, other):
62
+ return not isinstance(other, self.__class__)
63
+
64
+ def __gt__(self, other):
65
+ return True
66
+
67
+ def __ge__(self, other):
68
+ return True
69
+
70
+ def __neg__(self):
71
+ return NegativeInfinity
72
+
73
+
74
+ Infinity = Infinity()
75
+
76
+
77
+ class NegativeInfinity:
78
+ def __repr__(self):
79
+ return "-Infinity"
80
+
81
+ def __hash__(self):
82
+ return hash(repr(self))
83
+
84
+ def __lt__(self, other):
85
+ return True
86
+
87
+ def __le__(self, other):
88
+ return True
89
+
90
+ def __eq__(self, other):
91
+ return isinstance(other, self.__class__)
92
+
93
+ def __ne__(self, other):
94
+ return not isinstance(other, self.__class__)
95
+
96
+ def __gt__(self, other):
97
+ return False
98
+
99
+ def __ge__(self, other):
100
+ return False
101
+
102
+ def __neg__(self):
103
+ return Infinity
104
+
105
+
106
+ # BEGIN packaging/version.py
107
+
108
+
109
+ NegativeInfinity = NegativeInfinity()
110
+
111
+ _Version = collections.namedtuple(
112
+ "_Version",
113
+ ["epoch", "release", "dev", "pre", "post", "local"],
114
+ )
115
+
116
+
117
+ def parse(version):
118
+ """
119
+ Parse the given version string and return either a :class:`Version` object
120
+ or a :class:`LegacyVersion` object depending on if the given version is
121
+ a valid PEP 440 version or a legacy version.
122
+ """
123
+ try:
124
+ return Version(version)
125
+ except InvalidVersion:
126
+ return LegacyVersion(version)
127
+
128
+
129
+ class InvalidVersion(ValueError):
130
+ """
131
+ An invalid version was found, users should refer to PEP 440.
132
+ """
133
+
134
+
135
+ class _BaseVersion:
136
+
137
+ def __hash__(self):
138
+ return hash(self._key)
139
+
140
+ def __lt__(self, other):
141
+ return self._compare(other, lambda s, o: s < o)
142
+
143
+ def __le__(self, other):
144
+ return self._compare(other, lambda s, o: s <= o)
145
+
146
+ def __eq__(self, other):
147
+ return self._compare(other, lambda s, o: s == o)
148
+
149
+ def __ge__(self, other):
150
+ return self._compare(other, lambda s, o: s >= o)
151
+
152
+ def __gt__(self, other):
153
+ return self._compare(other, lambda s, o: s > o)
154
+
155
+ def __ne__(self, other):
156
+ return self._compare(other, lambda s, o: s != o)
157
+
158
+ def _compare(self, other, method):
159
+ if not isinstance(other, _BaseVersion):
160
+ return NotImplemented
161
+
162
+ return method(self._key, other._key)
163
+
164
+
165
+ class LegacyVersion(_BaseVersion):
166
+
167
+ def __init__(self, version):
168
+ self._version = str(version)
169
+ self._key = _legacy_cmpkey(self._version)
170
+
171
+ def __str__(self):
172
+ return self._version
173
+
174
+ def __repr__(self):
175
+ return f"<LegacyVersion({repr(str(self))})>"
176
+
177
+ @property
178
+ def public(self):
179
+ return self._version
180
+
181
+ @property
182
+ def base_version(self):
183
+ return self._version
184
+
185
+ @property
186
+ def local(self):
187
+ return None
188
+
189
+ @property
190
+ def is_prerelease(self):
191
+ return False
192
+
193
+ @property
194
+ def is_postrelease(self):
195
+ return False
196
+
197
+
198
+ _legacy_version_component_re = re.compile(
199
+ r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
200
+ )
201
+
202
+ _legacy_version_replacement_map = {
203
+ "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
204
+ }
205
+
206
+
207
+ def _parse_version_parts(s):
208
+ for part in _legacy_version_component_re.split(s):
209
+ part = _legacy_version_replacement_map.get(part, part)
210
+
211
+ if not part or part == ".":
212
+ continue
213
+
214
+ if part[:1] in "0123456789":
215
+ # pad for numeric comparison
216
+ yield part.zfill(8)
217
+ else:
218
+ yield "*" + part
219
+
220
+ # ensure that alpha/beta/candidate are before final
221
+ yield "*final"
222
+
223
+
224
+ def _legacy_cmpkey(version):
225
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch
226
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
227
+ # which uses the defacto standard originally implemented by setuptools,
228
+ # as before all PEP 440 versions.
229
+ epoch = -1
230
+
231
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
232
+ # its adoption of the packaging library.
233
+ parts = []
234
+ for part in _parse_version_parts(version.lower()):
235
+ if part.startswith("*"):
236
+ # remove "-" before a prerelease tag
237
+ if part < "*final":
238
+ while parts and parts[-1] == "*final-":
239
+ parts.pop()
240
+
241
+ # remove trailing zeros from each series of numeric parts
242
+ while parts and parts[-1] == "00000000":
243
+ parts.pop()
244
+
245
+ parts.append(part)
246
+ parts = tuple(parts)
247
+
248
+ return epoch, parts
249
+
250
+
251
+ # Deliberately not anchored to the start and end of the string, to make it
252
+ # easier for 3rd party code to reuse
253
+ VERSION_PATTERN = r"""
254
+ v?
255
+ (?:
256
+ (?:(?P<epoch>[0-9]+)!)? # epoch
257
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
258
+ (?P<pre> # pre-release
259
+ [-_\.]?
260
+ (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
261
+ [-_\.]?
262
+ (?P<pre_n>[0-9]+)?
263
+ )?
264
+ (?P<post> # post release
265
+ (?:-(?P<post_n1>[0-9]+))
266
+ |
267
+ (?:
268
+ [-_\.]?
269
+ (?P<post_l>post|rev|r)
270
+ [-_\.]?
271
+ (?P<post_n2>[0-9]+)?
272
+ )
273
+ )?
274
+ (?P<dev> # dev release
275
+ [-_\.]?
276
+ (?P<dev_l>dev)
277
+ [-_\.]?
278
+ (?P<dev_n>[0-9]+)?
279
+ )?
280
+ )
281
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
282
+ """
283
+
284
+
285
+ class Version(_BaseVersion):
286
+
287
+ _regex = re.compile(
288
+ r"^\s*" + VERSION_PATTERN + r"\s*$",
289
+ re.VERBOSE | re.IGNORECASE,
290
+ )
291
+
292
+ def __init__(self, version):
293
+ # Validate the version and parse it into pieces
294
+ match = self._regex.search(version)
295
+ if not match:
296
+ raise InvalidVersion(f"Invalid version: '{version}'")
297
+
298
+ # Store the parsed out pieces of the version
299
+ self._version = _Version(
300
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
301
+ release=tuple(int(i) for i in match.group("release").split(".")),
302
+ pre=_parse_letter_version(
303
+ match.group("pre_l"),
304
+ match.group("pre_n"),
305
+ ),
306
+ post=_parse_letter_version(
307
+ match.group("post_l"),
308
+ match.group("post_n1") or match.group("post_n2"),
309
+ ),
310
+ dev=_parse_letter_version(
311
+ match.group("dev_l"),
312
+ match.group("dev_n"),
313
+ ),
314
+ local=_parse_local_version(match.group("local")),
315
+ )
316
+
317
+ # Generate a key which will be used for sorting
318
+ self._key = _cmpkey(
319
+ self._version.epoch,
320
+ self._version.release,
321
+ self._version.pre,
322
+ self._version.post,
323
+ self._version.dev,
324
+ self._version.local,
325
+ )
326
+
327
+ def __repr__(self):
328
+ return f"<Version({repr(str(self))})>"
329
+
330
+ def __str__(self):
331
+ parts = []
332
+
333
+ # Epoch
334
+ if self._version.epoch != 0:
335
+ parts.append(f"{self._version.epoch}!")
336
+
337
+ # Release segment
338
+ parts.append(".".join(str(x) for x in self._version.release))
339
+
340
+ # Pre-release
341
+ if self._version.pre is not None:
342
+ parts.append("".join(str(x) for x in self._version.pre))
343
+
344
+ # Post-release
345
+ if self._version.post is not None:
346
+ parts.append(f".post{self._version.post[1]}")
347
+
348
+ # Development release
349
+ if self._version.dev is not None:
350
+ parts.append(f".dev{self._version.dev[1]}")
351
+
352
+ # Local version segment
353
+ if self._version.local is not None:
354
+ parts.append(
355
+ "+{}".format(".".join(str(x) for x in self._version.local))
356
+ )
357
+
358
+ return "".join(parts)
359
+
360
+ @property
361
+ def public(self):
362
+ return str(self).split("+", 1)[0]
363
+
364
+ @property
365
+ def base_version(self):
366
+ parts = []
367
+
368
+ # Epoch
369
+ if self._version.epoch != 0:
370
+ parts.append(f"{self._version.epoch}!")
371
+
372
+ # Release segment
373
+ parts.append(".".join(str(x) for x in self._version.release))
374
+
375
+ return "".join(parts)
376
+
377
+ @property
378
+ def local(self):
379
+ version_string = str(self)
380
+ if "+" in version_string:
381
+ return version_string.split("+", 1)[1]
382
+
383
+ @property
384
+ def is_prerelease(self):
385
+ return bool(self._version.dev or self._version.pre)
386
+
387
+ @property
388
+ def is_postrelease(self):
389
+ return bool(self._version.post)
390
+
391
+
392
+ def _parse_letter_version(letter, number):
393
+ if letter:
394
+ # We assume there is an implicit 0 in a pre-release if there is
395
+ # no numeral associated with it.
396
+ if number is None:
397
+ number = 0
398
+
399
+ # We normalize any letters to their lower-case form
400
+ letter = letter.lower()
401
+
402
+ # We consider some words to be alternate spellings of other words and
403
+ # in those cases we want to normalize the spellings to our preferred
404
+ # spelling.
405
+ if letter == "alpha":
406
+ letter = "a"
407
+ elif letter == "beta":
408
+ letter = "b"
409
+ elif letter in ["c", "pre", "preview"]:
410
+ letter = "rc"
411
+ elif letter in ["rev", "r"]:
412
+ letter = "post"
413
+
414
+ return letter, int(number)
415
+ if not letter and number:
416
+ # We assume that if we are given a number but not given a letter,
417
+ # then this is using the implicit post release syntax (e.g., 1.0-1)
418
+ letter = "post"
419
+
420
+ return letter, int(number)
421
+
422
+
423
+ _local_version_seperators = re.compile(r"[\._-]")
424
+
425
+
426
+ def _parse_local_version(local):
427
+ """
428
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
429
+ """
430
+ if local is not None:
431
+ return tuple(
432
+ part.lower() if not part.isdigit() else int(part)
433
+ for part in _local_version_seperators.split(local)
434
+ )
435
+
436
+
437
+ def _cmpkey(epoch, release, pre, post, dev, local):
438
+ # When we compare a release version, we want to compare it with all of the
439
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
440
+ # leading zeros until we come to something non-zero, then take the rest,
441
+ # re-reverse it back into the correct order, and make it a tuple and use
442
+ # that for our sorting key.
443
+ release = tuple(
444
+ reversed(list(
445
+ itertools.dropwhile(
446
+ lambda x: x == 0,
447
+ reversed(release),
448
+ )
449
+ ))
450
+ )
451
+
452
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
453
+ # We'll do this by abusing the pre-segment, but we _only_ want to do this
454
+ # if there is no pre- or a post-segment. If we have one of those, then
455
+ # the normal sorting rules will handle this case correctly.
456
+ if pre is None and post is None and dev is not None:
457
+ pre = -Infinity
458
+ # Versions without a pre-release (except as noted above) should sort after
459
+ # those with one.
460
+ elif pre is None:
461
+ pre = Infinity
462
+
463
+ # Versions without a post-segment should sort before those with one.
464
+ if post is None:
465
+ post = -Infinity
466
+
467
+ # Versions without a development segment should sort after those with one.
468
+ if dev is None:
469
+ dev = Infinity
470
+
471
+ if local is None:
472
+ # Versions without a local segment should sort before those with one.
473
+ local = -Infinity
474
+ else:
475
+ # Versions with a local segment need that segment parsed to implement
476
+ # the sorting rules in PEP440.
477
+ # - Alphanumeric segments sort before numeric segments
478
+ # - Alphanumeric segments sort lexicographically
479
+ # - Numeric segments sort numerically
480
+ # - Shorter versions sort before longer versions when the prefixes
481
+ # match exactly
482
+ local = tuple(
483
+ (i, "") if isinstance(i, int) else (-Infinity, i)
484
+ for i in local
485
+ )
486
+
487
+ return epoch, release, pre, post, dev, local
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_public_api.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """PUBLIC_MODULES was once included in scipy._lib.tests.test_public_api.
2
+
3
+ It has been separated into this file so that this list of public modules
4
+ could be used when generating tables showing support for alternative
5
+ array API backends across modules in
6
+ scipy/doc/source/array_api_capabilities.py.
7
+ """
8
+
9
+ # Historically SciPy has not used leading underscores for private submodules
10
+ # much. This has resulted in lots of things that look like public modules
11
+ # (i.e. things that can be imported as `import scipy.somesubmodule.somefile`),
12
+ # but were never intended to be public. The PUBLIC_MODULES list contains
13
+ # modules that are either public because they were meant to be, or because they
14
+ # contain public functions/objects that aren't present in any other namespace
15
+ # for whatever reason and therefore should be treated as public.
16
+ PUBLIC_MODULES = ["scipy." + s for s in [
17
+ "cluster",
18
+ "cluster.vq",
19
+ "cluster.hierarchy",
20
+ "constants",
21
+ "datasets",
22
+ "differentiate",
23
+ "fft",
24
+ "fftpack",
25
+ "integrate",
26
+ "interpolate",
27
+ "io",
28
+ "io.arff",
29
+ "io.matlab",
30
+ "io.wavfile",
31
+ "linalg",
32
+ "linalg.blas",
33
+ "linalg.cython_blas",
34
+ "linalg.lapack",
35
+ "linalg.cython_lapack",
36
+ "linalg.interpolative",
37
+ "ndimage",
38
+ "odr",
39
+ "optimize",
40
+ "optimize.elementwise",
41
+ "signal",
42
+ "signal.windows",
43
+ "sparse",
44
+ "sparse.linalg",
45
+ "sparse.csgraph",
46
+ "spatial",
47
+ "spatial.distance",
48
+ "spatial.transform",
49
+ "special",
50
+ "stats",
51
+ "stats.contingency",
52
+ "stats.distributions",
53
+ "stats.mstats",
54
+ "stats.qmc",
55
+ "stats.sampling"
56
+ ]]
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_sparse.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC
2
+
3
+ __all__ = ["SparseABC", "issparse"]
4
+
5
+
6
+ class SparseABC(ABC):
7
+ pass
8
+
9
+
10
+ def issparse(x):
11
+ """Is `x` of a sparse array or sparse matrix type?
12
+
13
+ Parameters
14
+ ----------
15
+ x
16
+ object to check for being a sparse array or sparse matrix
17
+
18
+ Returns
19
+ -------
20
+ bool
21
+ True if `x` is a sparse array or a sparse matrix, False otherwise
22
+
23
+ Notes
24
+ -----
25
+ Use `isinstance(x, sp.sparse.sparray)` to check between an array or matrix.
26
+ Use `a.format` to check the sparse format, e.g. `a.format == 'csr'`.
27
+
28
+ Examples
29
+ --------
30
+ >>> import numpy as np
31
+ >>> from scipy.sparse import csr_array, csr_matrix, issparse
32
+ >>> issparse(csr_matrix([[5]]))
33
+ True
34
+ >>> issparse(csr_array([[5]]))
35
+ True
36
+ >>> issparse(np.array([[5]]))
37
+ False
38
+ >>> issparse(5)
39
+ False
40
+ """
41
+ return isinstance(x, SparseABC)
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_test_ccallback.cpython-312-x86_64-linux-gnu.so ADDED
Binary file (23 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_test_deprecation_call.cpython-312-x86_64-linux-gnu.so ADDED
Binary file (49.2 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_test_deprecation_def.cpython-312-x86_64-linux-gnu.so ADDED
Binary file (28.3 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_testutils.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generic test utilities.
3
+
4
+ """
5
+
6
+ import inspect
7
+ import os
8
+ import re
9
+ import shutil
10
+ import subprocess
11
+ import sys
12
+ import sysconfig
13
+ import threading
14
+ from importlib.util import module_from_spec, spec_from_file_location
15
+
16
+ import numpy as np
17
+ import scipy
18
+
19
+ try:
20
+ # Need type: ignore[import-untyped] for mypy >= 1.6
21
+ import cython # type: ignore[import-untyped]
22
+ from Cython.Compiler.Version import ( # type: ignore[import-untyped]
23
+ version as cython_version,
24
+ )
25
+ except ImportError:
26
+ cython = None
27
+ else:
28
+ from scipy._lib import _pep440
29
+ required_version = '3.0.8'
30
+ if _pep440.parse(cython_version) < _pep440.Version(required_version):
31
+ # too old or wrong cython, skip Cython API tests
32
+ cython = None
33
+
34
+
35
+ __all__ = ['PytestTester', 'check_free_memory', '_TestPythranFunc', 'IS_MUSL']
36
+
37
+
38
+ IS_MUSL = False
39
+ # alternate way is
40
+ # from packaging.tags import sys_tags
41
+ # _tags = list(sys_tags())
42
+ # if 'musllinux' in _tags[0].platform:
43
+ _v = sysconfig.get_config_var('HOST_GNU_TYPE') or ''
44
+ if 'musl' in _v:
45
+ IS_MUSL = True
46
+
47
+
48
+ IS_EDITABLE = 'editable' in scipy.__path__[0]
49
+
50
+
51
+ class FPUModeChangeWarning(RuntimeWarning):
52
+ """Warning about FPU mode change"""
53
+ pass
54
+
55
+
56
+ class PytestTester:
57
+ """
58
+ Run tests for this namespace
59
+
60
+ ``scipy.test()`` runs tests for all of SciPy, with the default settings.
61
+ When used from a submodule (e.g., ``scipy.cluster.test()``, only the tests
62
+ for that namespace are run.
63
+
64
+ Parameters
65
+ ----------
66
+ label : {'fast', 'full'}, optional
67
+ Whether to run only the fast tests, or also those marked as slow.
68
+ Default is 'fast'.
69
+ verbose : int, optional
70
+ Test output verbosity. Default is 1.
71
+ extra_argv : list, optional
72
+ Arguments to pass through to Pytest.
73
+ doctests : bool, optional
74
+ Whether to run doctests or not. Default is False.
75
+ coverage : bool, optional
76
+ Whether to run tests with code coverage measurements enabled.
77
+ Default is False.
78
+ tests : list of str, optional
79
+ List of module names to run tests for. By default, uses the module
80
+ from which the ``test`` function is called.
81
+ parallel : int, optional
82
+ Run tests in parallel with pytest-xdist, if number given is larger than
83
+ 1. Default is 1.
84
+
85
+ """
86
+ def __init__(self, module_name):
87
+ self.module_name = module_name
88
+
89
+ def __call__(self, label="fast", verbose=1, extra_argv=None, doctests=False,
90
+ coverage=False, tests=None, parallel=None):
91
+ import pytest
92
+
93
+ module = sys.modules[self.module_name]
94
+ module_path = os.path.abspath(module.__path__[0])
95
+
96
+ pytest_args = ['--showlocals', '--tb=short']
97
+
98
+ if extra_argv is None:
99
+ extra_argv = []
100
+ pytest_args += extra_argv
101
+ if any(arg == "-m" or arg == "--markers" for arg in extra_argv):
102
+ # Likely conflict with default --mode=fast
103
+ raise ValueError("Must specify -m before --")
104
+
105
+ if verbose and int(verbose) > 1:
106
+ pytest_args += ["-" + "v"*(int(verbose)-1)]
107
+
108
+ if coverage:
109
+ pytest_args += ["--cov=" + module_path]
110
+
111
+ if label == "fast":
112
+ pytest_args += ["-m", "not slow"]
113
+ elif label != "full":
114
+ pytest_args += ["-m", label]
115
+
116
+ if tests is None:
117
+ tests = [self.module_name]
118
+
119
+ if parallel is not None and parallel > 1:
120
+ if _pytest_has_xdist():
121
+ pytest_args += ['-n', str(parallel)]
122
+ else:
123
+ import warnings
124
+ warnings.warn('Could not run tests in parallel because '
125
+ 'pytest-xdist plugin is not available.',
126
+ stacklevel=2)
127
+
128
+ pytest_args += ['--pyargs'] + list(tests)
129
+
130
+ try:
131
+ code = pytest.main(pytest_args)
132
+ except SystemExit as exc:
133
+ code = exc.code
134
+
135
+ return (code == 0)
136
+
137
+
138
+ class _TestPythranFunc:
139
+ '''
140
+ These are situations that can be tested in our pythran tests:
141
+ - A function with multiple array arguments and then
142
+ other positional and keyword arguments.
143
+ - A function with array-like keywords (e.g. `def somefunc(x0, x1=None)`.
144
+ Note: list/tuple input is not yet tested!
145
+
146
+ `self.arguments`: A dictionary which key is the index of the argument,
147
+ value is tuple(array value, all supported dtypes)
148
+ `self.partialfunc`: A function used to freeze some non-array argument
149
+ that of no interests in the original function
150
+ '''
151
+ ALL_INTEGER = [np.int8, np.int16, np.int32, np.int64, np.intc, np.intp]
152
+ ALL_FLOAT = [np.float32, np.float64]
153
+ ALL_COMPLEX = [np.complex64, np.complex128]
154
+
155
+ def setup_method(self):
156
+ self.arguments = {}
157
+ self.partialfunc = None
158
+ self.expected = None
159
+
160
+ def get_optional_args(self, func):
161
+ # get optional arguments with its default value,
162
+ # used for testing keywords
163
+ signature = inspect.signature(func)
164
+ optional_args = {}
165
+ for k, v in signature.parameters.items():
166
+ if v.default is not inspect.Parameter.empty:
167
+ optional_args[k] = v.default
168
+ return optional_args
169
+
170
+ def get_max_dtype_list_length(self):
171
+ # get the max supported dtypes list length in all arguments
172
+ max_len = 0
173
+ for arg_idx in self.arguments:
174
+ cur_len = len(self.arguments[arg_idx][1])
175
+ if cur_len > max_len:
176
+ max_len = cur_len
177
+ return max_len
178
+
179
+ def get_dtype(self, dtype_list, dtype_idx):
180
+ # get the dtype from dtype_list via index
181
+ # if the index is out of range, then return the last dtype
182
+ if dtype_idx > len(dtype_list)-1:
183
+ return dtype_list[-1]
184
+ else:
185
+ return dtype_list[dtype_idx]
186
+
187
+ def test_all_dtypes(self):
188
+ for type_idx in range(self.get_max_dtype_list_length()):
189
+ args_array = []
190
+ for arg_idx in self.arguments:
191
+ new_dtype = self.get_dtype(self.arguments[arg_idx][1],
192
+ type_idx)
193
+ args_array.append(self.arguments[arg_idx][0].astype(new_dtype))
194
+ self.pythranfunc(*args_array)
195
+
196
+ def test_views(self):
197
+ args_array = []
198
+ for arg_idx in self.arguments:
199
+ args_array.append(self.arguments[arg_idx][0][::-1][::-1])
200
+ self.pythranfunc(*args_array)
201
+
202
+ def test_strided(self):
203
+ args_array = []
204
+ for arg_idx in self.arguments:
205
+ args_array.append(np.repeat(self.arguments[arg_idx][0],
206
+ 2, axis=0)[::2])
207
+ self.pythranfunc(*args_array)
208
+
209
+
210
+ def _pytest_has_xdist():
211
+ """
212
+ Check if the pytest-xdist plugin is installed, providing parallel tests
213
+ """
214
+ # Check xdist exists without importing, otherwise pytests emits warnings
215
+ from importlib.util import find_spec
216
+ return find_spec('xdist') is not None
217
+
218
+
219
+ def check_free_memory(free_mb):
220
+ """
221
+ Check *free_mb* of memory is available, otherwise do pytest.skip
222
+ """
223
+ import pytest
224
+
225
+ try:
226
+ mem_free = _parse_size(os.environ['SCIPY_AVAILABLE_MEM'])
227
+ msg = '{} MB memory required, but environment SCIPY_AVAILABLE_MEM={}'.format(
228
+ free_mb, os.environ['SCIPY_AVAILABLE_MEM'])
229
+ except KeyError:
230
+ mem_free = _get_mem_available()
231
+ if mem_free is None:
232
+ pytest.skip("Could not determine available memory; set SCIPY_AVAILABLE_MEM "
233
+ "variable to free memory in MB to run the test.")
234
+ msg = f'{free_mb} MB memory required, but {mem_free/1e6} MB available'
235
+
236
+ if mem_free < free_mb * 1e6:
237
+ pytest.skip(msg)
238
+
239
+
240
+ def _parse_size(size_str):
241
+ suffixes = {'': 1e6,
242
+ 'b': 1.0,
243
+ 'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,
244
+ 'kb': 1e3, 'Mb': 1e6, 'Gb': 1e9, 'Tb': 1e12,
245
+ 'kib': 1024.0, 'Mib': 1024.0**2, 'Gib': 1024.0**3, 'Tib': 1024.0**4}
246
+ m = re.match(r'^\s*(\d+)\s*({})\s*$'.format('|'.join(suffixes.keys())),
247
+ size_str,
248
+ re.I)
249
+ if not m or m.group(2) not in suffixes:
250
+ raise ValueError("Invalid size string")
251
+
252
+ return float(m.group(1)) * suffixes[m.group(2)]
253
+
254
+
255
+ def _get_mem_available():
256
+ """
257
+ Get information about memory available, not counting swap.
258
+ """
259
+ try:
260
+ import psutil
261
+ return psutil.virtual_memory().available
262
+ except (ImportError, AttributeError):
263
+ pass
264
+
265
+ if sys.platform.startswith('linux'):
266
+ info = {}
267
+ with open('/proc/meminfo') as f:
268
+ for line in f:
269
+ p = line.split()
270
+ info[p[0].strip(':').lower()] = float(p[1]) * 1e3
271
+
272
+ if 'memavailable' in info:
273
+ # Linux >= 3.14
274
+ return info['memavailable']
275
+ else:
276
+ return info['memfree'] + info['cached']
277
+
278
+ return None
279
+
280
+ def _test_cython_extension(tmp_path, srcdir):
281
+ """
282
+ Helper function to test building and importing Cython modules that
283
+ make use of the Cython APIs for BLAS, LAPACK, optimize, and special.
284
+ """
285
+ import pytest
286
+ try:
287
+ subprocess.check_call(["meson", "--version"])
288
+ except FileNotFoundError:
289
+ pytest.skip("No usable 'meson' found")
290
+
291
+ # Make safe for being called by multiple threads within one test
292
+ tmp_path = tmp_path / str(threading.get_ident())
293
+
294
+ # build the examples in a temporary directory
295
+ mod_name = os.path.split(srcdir)[1]
296
+ shutil.copytree(srcdir, tmp_path / mod_name)
297
+ build_dir = tmp_path / mod_name / 'tests' / '_cython_examples'
298
+ target_dir = build_dir / 'build'
299
+ os.makedirs(target_dir, exist_ok=True)
300
+
301
+ # Ensure we use the correct Python interpreter even when `meson` is
302
+ # installed in a different Python environment (see numpy#24956)
303
+ native_file = str(build_dir / 'interpreter-native-file.ini')
304
+ with open(native_file, 'w') as f:
305
+ f.write("[binaries]\n")
306
+ f.write(f"python = '{sys.executable}'")
307
+
308
+ if sys.platform == "win32":
309
+ subprocess.check_call(["meson", "setup",
310
+ "--buildtype=release",
311
+ "--native-file", native_file,
312
+ "--vsenv", str(build_dir)],
313
+ cwd=target_dir,
314
+ )
315
+ else:
316
+ subprocess.check_call(["meson", "setup",
317
+ "--native-file", native_file, str(build_dir)],
318
+ cwd=target_dir
319
+ )
320
+ subprocess.check_call(["meson", "compile", "-vv"], cwd=target_dir)
321
+
322
+ # import without adding the directory to sys.path
323
+ suffix = sysconfig.get_config_var('EXT_SUFFIX')
324
+
325
+ def load(modname):
326
+ so = (target_dir / modname).with_suffix(suffix)
327
+ spec = spec_from_file_location(modname, so)
328
+ mod = module_from_spec(spec)
329
+ spec.loader.exec_module(mod)
330
+ return mod
331
+
332
+ # test that the module can be imported
333
+ return load("extending"), load("extending_cpp")
334
+
335
+
336
+ def _run_concurrent_barrier(n_workers, fn, *args, **kwargs):
337
+ """
338
+ Run a given function concurrently across a given number of threads.
339
+
340
+ This is equivalent to using a ThreadPoolExecutor, but using the threading
341
+ primitives instead. This function ensures that the closure passed by
342
+ parameter gets called concurrently by setting up a barrier before it gets
343
+ called before any of the threads.
344
+
345
+ Arguments
346
+ ---------
347
+ n_workers: int
348
+ Number of concurrent threads to spawn.
349
+ fn: callable
350
+ Function closure to execute concurrently. Its first argument will
351
+ be the thread id.
352
+ *args: tuple
353
+ Variable number of positional arguments to pass to the function.
354
+ **kwargs: dict
355
+ Keyword arguments to pass to the function.
356
+ """
357
+ barrier = threading.Barrier(n_workers)
358
+
359
+ def closure(i, *args, **kwargs):
360
+ barrier.wait()
361
+ fn(i, *args, **kwargs)
362
+
363
+ workers = []
364
+ for i in range(0, n_workers):
365
+ workers.append(threading.Thread(
366
+ target=closure,
367
+ args=(i,) + args, kwargs=kwargs))
368
+
369
+ for worker in workers:
370
+ worker.start()
371
+
372
+ for worker in workers:
373
+ worker.join()
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_tmpdirs.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ''' Contexts for *with* statement providing temporary directories
2
+ '''
3
+ import os
4
+ from contextlib import contextmanager
5
+ from shutil import rmtree
6
+ from tempfile import mkdtemp
7
+
8
+
9
+ @contextmanager
10
+ def tempdir():
11
+ """Create and return a temporary directory. This has the same
12
+ behavior as mkdtemp but can be used as a context manager.
13
+
14
+ Upon exiting the context, the directory and everything contained
15
+ in it are removed.
16
+
17
+ Examples
18
+ --------
19
+ >>> import os
20
+ >>> with tempdir() as tmpdir:
21
+ ... fname = os.path.join(tmpdir, 'example_file.txt')
22
+ ... with open(fname, 'wt') as fobj:
23
+ ... _ = fobj.write('a string\\n')
24
+ >>> os.path.exists(tmpdir)
25
+ False
26
+ """
27
+ d = mkdtemp()
28
+ yield d
29
+ rmtree(d)
30
+
31
+
32
+ @contextmanager
33
+ def in_tempdir():
34
+ ''' Create, return, and change directory to a temporary directory
35
+
36
+ Examples
37
+ --------
38
+ >>> import os
39
+ >>> my_cwd = os.getcwd()
40
+ >>> with in_tempdir() as tmpdir:
41
+ ... _ = open('test.txt', 'wt').write('some text')
42
+ ... assert os.path.isfile('test.txt')
43
+ ... assert os.path.isfile(os.path.join(tmpdir, 'test.txt'))
44
+ >>> os.path.exists(tmpdir)
45
+ False
46
+ >>> os.getcwd() == my_cwd
47
+ True
48
+ '''
49
+ pwd = os.getcwd()
50
+ d = mkdtemp()
51
+ os.chdir(d)
52
+ yield d
53
+ os.chdir(pwd)
54
+ rmtree(d)
55
+
56
+
57
+ @contextmanager
58
+ def in_dir(dir=None):
59
+ """ Change directory to given directory for duration of ``with`` block
60
+
61
+ Useful when you want to use `in_tempdir` for the final test, but
62
+ you are still debugging. For example, you may want to do this in the end:
63
+
64
+ >>> with in_tempdir() as tmpdir:
65
+ ... # do something complicated which might break
66
+ ... pass
67
+
68
+ But, indeed, the complicated thing does break, and meanwhile, the
69
+ ``in_tempdir`` context manager wiped out the directory with the
70
+ temporary files that you wanted for debugging. So, while debugging, you
71
+ replace with something like:
72
+
73
+ >>> with in_dir() as tmpdir: # Use working directory by default
74
+ ... # do something complicated which might break
75
+ ... pass
76
+
77
+ You can then look at the temporary file outputs to debug what is happening,
78
+ fix, and finally replace ``in_dir`` with ``in_tempdir`` again.
79
+ """
80
+ cwd = os.getcwd()
81
+ if dir is None:
82
+ yield cwd
83
+ return
84
+ os.chdir(dir)
85
+ yield dir
86
+ os.chdir(cwd)
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_util.py ADDED
@@ -0,0 +1,1251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from contextlib import contextmanager
3
+ import functools
4
+ import operator
5
+ import warnings
6
+ import numbers
7
+ from collections import namedtuple
8
+ import inspect
9
+ import math
10
+ import os
11
+ import sys
12
+ import textwrap
13
+ from types import ModuleType
14
+ from typing import Literal, TypeAlias, TypeVar
15
+
16
+ import numpy as np
17
+ from scipy._lib._array_api import (Array, array_namespace, is_lazy_array, is_numpy,
18
+ is_marray, xp_size, xp_result_device, xp_result_type)
19
+ from scipy._lib._docscrape import FunctionDoc, Parameter
20
+ from scipy._lib._sparse import issparse
21
+
22
+ from numpy.exceptions import AxisError
23
+
24
+
25
+ np_long: type
26
+ np_ulong: type
27
+
28
+ if np.lib.NumpyVersion(np.__version__) >= "2.0.0.dev0":
29
+ try:
30
+ with warnings.catch_warnings():
31
+ warnings.filterwarnings(
32
+ "ignore",
33
+ r".*In the future `np\.long` will be defined as.*",
34
+ FutureWarning,
35
+ )
36
+ np_long = np.long # type: ignore[attr-defined]
37
+ np_ulong = np.ulong # type: ignore[attr-defined]
38
+ except AttributeError:
39
+ np_long = np.int_
40
+ np_ulong = np.uint
41
+ else:
42
+ np_long = np.int_
43
+ np_ulong = np.uint
44
+
45
+ IntNumber = int | np.integer
46
+ DecimalNumber = float | np.floating | np.integer
47
+
48
+ copy_if_needed: bool | None
49
+
50
+ if np.lib.NumpyVersion(np.__version__) >= "2.0.0":
51
+ copy_if_needed = None
52
+ elif np.lib.NumpyVersion(np.__version__) < "1.28.0":
53
+ copy_if_needed = False
54
+ else:
55
+ # 2.0.0 dev versions, handle cases where copy may or may not exist
56
+ try:
57
+ np.array([1]).__array__(copy=None) # type: ignore[call-overload]
58
+ copy_if_needed = None
59
+ except TypeError:
60
+ copy_if_needed = False
61
+
62
+
63
+ # Wrapped function for inspect.signature for compatibility with Python 3.14+
64
+ # See gh-23913
65
+ #
66
+ # PEP 649/749 allows for underfined annotations at runtime, and added the
67
+ # `annotation_format` parameter to handle these cases.
68
+ # `annotationlib.Format.FORWARDREF` is the closest to previous behavior,
69
+ # returning ForwardRef objects fornew undefined annotations cases.
70
+ #
71
+ # Consider dropping this wrapper when support for Python 3.13 is dropped.
72
+ if sys.version_info >= (3, 14):
73
+ import annotationlib
74
+ def wrapped_inspect_signature(callable):
75
+ """Get a signature object for the passed callable."""
76
+ return inspect.signature(callable,
77
+ annotation_format=annotationlib.Format.FORWARDREF)
78
+ else:
79
+ wrapped_inspect_signature = inspect.signature
80
+
81
+
82
+ _RNG: TypeAlias = np.random.Generator | np.random.RandomState
83
+ SeedType: TypeAlias = IntNumber | _RNG | None
84
+
85
+ GeneratorType = TypeVar("GeneratorType", bound=_RNG)
86
+
87
+
88
+ def _lazyselect(condlist, choicelist, arrays, default=0):
89
+ """
90
+ Mimic `np.select(condlist, choicelist)`.
91
+
92
+ Notice, it assumes that all `arrays` are of the same shape or can be
93
+ broadcasted together.
94
+
95
+ All functions in `choicelist` must accept array arguments in the order
96
+ given in `arrays` and must return an array of the same shape as broadcasted
97
+ `arrays`.
98
+
99
+ Examples
100
+ --------
101
+ >>> import numpy as np
102
+ >>> x = np.arange(6)
103
+ >>> np.select([x <3, x > 3], [x**2, x**3], default=0)
104
+ array([ 0, 1, 4, 0, 64, 125])
105
+
106
+ >>> _lazyselect([x < 3, x > 3], [lambda x: x**2, lambda x: x**3], (x,))
107
+ array([ 0., 1., 4., 0., 64., 125.])
108
+
109
+ >>> a = -np.ones_like(x)
110
+ >>> _lazyselect([x < 3, x > 3],
111
+ ... [lambda x, a: x**2, lambda x, a: a * x**3],
112
+ ... (x, a), default=np.nan)
113
+ array([ 0., 1., 4., nan, -64., -125.])
114
+
115
+ """
116
+ arrays = np.broadcast_arrays(*arrays)
117
+ tcode = np.mintypecode([a.dtype.char for a in arrays])
118
+ out = np.full(np.shape(arrays[0]), fill_value=default, dtype=tcode)
119
+ for func, cond in zip(choicelist, condlist):
120
+ if np.all(cond is False):
121
+ continue
122
+ cond, _ = np.broadcast_arrays(cond, arrays[0])
123
+ temp = tuple(np.extract(cond, arr) for arr in arrays)
124
+ np.place(out, cond, func(*temp))
125
+ return out
126
+
127
+
128
+ def _aligned_zeros(shape, dtype=float, order="C", align=None):
129
+ """Allocate a new ndarray with aligned memory.
130
+
131
+ Primary use case for this currently is working around a f2py issue
132
+ in NumPy 1.9.1, where dtype.alignment is such that np.zeros() does
133
+ not necessarily create arrays aligned up to it.
134
+
135
+ """
136
+ dtype = np.dtype(dtype)
137
+ if align is None:
138
+ align = dtype.alignment
139
+ if not hasattr(shape, '__len__'):
140
+ shape = (shape,)
141
+ size = functools.reduce(operator.mul, shape) * dtype.itemsize
142
+ buf = np.empty(size + align + 1, np.uint8)
143
+ offset = buf.__array_interface__['data'][0] % align
144
+ if offset != 0:
145
+ offset = align - offset
146
+ # Note: slices producing 0-size arrays do not necessarily change
147
+ # data pointer --- so we use and allocate size+1
148
+ buf = buf[offset:offset+size+1][:-1]
149
+ data = np.ndarray(shape, dtype, buf, order=order)
150
+ data.fill(0)
151
+ return data
152
+
153
+
154
+ def _prune_array(array):
155
+ """Return an array equivalent to the input array. If the input
156
+ array is a view of a much larger array, copy its contents to a
157
+ newly allocated array. Otherwise, return the input unchanged.
158
+ """
159
+ if array.base is not None and array.size < array.base.size // 2:
160
+ return array.copy()
161
+ return array
162
+
163
+
164
+ def float_factorial(n: int) -> float:
165
+ """Compute the factorial and return as a float
166
+
167
+ Returns infinity when result is too large for a double
168
+ """
169
+ return float(math.factorial(n)) if n < 171 else np.inf
170
+
171
+
172
+ _rng_desc = (
173
+ r"""If `rng` is passed by keyword, types other than `numpy.random.Generator` are
174
+ passed to `numpy.random.default_rng` to instantiate a ``Generator``.
175
+ If `rng` is already a ``Generator`` instance, then the provided instance is
176
+ used. Specify `rng` for repeatable function behavior.
177
+
178
+ If this argument is passed by position or `{old_name}` is passed by keyword,
179
+ legacy behavior for the argument `{old_name}` applies:
180
+
181
+ - If `{old_name}` is None (or `numpy.random`), the `numpy.random.RandomState`
182
+ singleton is used.
183
+ - If `{old_name}` is an int, a new ``RandomState`` instance is used,
184
+ seeded with `{old_name}`.
185
+ - If `{old_name}` is already a ``Generator`` or ``RandomState`` instance then
186
+ that instance is used.
187
+
188
+ .. versionchanged:: 1.15.0
189
+ As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
190
+ transition from use of `numpy.random.RandomState` to
191
+ `numpy.random.Generator`, this keyword was changed from `{old_name}` to `rng`.
192
+ For an interim period, both keywords will continue to work, although only one
193
+ may be specified at a time. After the interim period, function calls using the
194
+ `{old_name}` keyword will emit warnings. The behavior of both `{old_name}` and
195
+ `rng` are outlined above, but only the `rng` keyword should be used in new code.
196
+ """
197
+ )
198
+
199
+
200
+ # SPEC 7
201
+ def _transition_to_rng(old_name, *, position_num=None, end_version=None,
202
+ replace_doc=True):
203
+ """Example decorator to transition from old PRNG usage to new `rng` behavior
204
+
205
+ Suppose the decorator is applied to a function that used to accept parameter
206
+ `old_name='random_state'` either by keyword or as a positional argument at
207
+ `position_num=1`. At the time of application, the name of the argument in the
208
+ function signature is manually changed to the new name, `rng`. If positional
209
+ use was allowed before, this is not changed.*
210
+
211
+ - If the function is called with both `random_state` and `rng`, the decorator
212
+ raises an error.
213
+ - If `random_state` is provided as a keyword argument, the decorator passes
214
+ `random_state` to the function's `rng` argument as a keyword. If `end_version`
215
+ is specified, the decorator will emit a `DeprecationWarning` about the
216
+ deprecation of keyword `random_state`.
217
+ - If `random_state` is provided as a positional argument, the decorator passes
218
+ `random_state` to the function's `rng` argument by position. If `end_version`
219
+ is specified, the decorator will emit a `FutureWarning` about the changing
220
+ interpretation of the argument.
221
+ - If `rng` is provided as a keyword argument, the decorator validates `rng` using
222
+ `numpy.random.default_rng` before passing it to the function.
223
+ - If `end_version` is specified and neither `random_state` nor `rng` is provided
224
+ by the user, the decorator checks whether `np.random.seed` has been used to set
225
+ the global seed. If so, it emits a `FutureWarning`, noting that usage of
226
+ `numpy.random.seed` will eventually have no effect. Either way, the decorator
227
+ calls the function without explicitly passing the `rng` argument.
228
+
229
+ If `end_version` is specified, a user must pass `rng` as a keyword to avoid
230
+ warnings.
231
+
232
+ After the deprecation period, the decorator can be removed, and the function
233
+ can simply validate the `rng` argument by calling `np.random.default_rng(rng)`.
234
+
235
+ * A `FutureWarning` is emitted when the PRNG argument is used by
236
+ position. It indicates that the "Hinsen principle" (same
237
+ code yielding different results in two versions of the software)
238
+ will be violated, unless positional use is deprecated. Specifically:
239
+
240
+ - If `None` is passed by position and `np.random.seed` has been used,
241
+ the function will change from being seeded to being unseeded.
242
+ - If an integer is passed by position, the random stream will change.
243
+ - If `np.random` or an instance of `RandomState` is passed by position,
244
+ an error will be raised.
245
+
246
+ We suggest that projects consider deprecating positional use of
247
+ `random_state`/`rng` (i.e., change their function signatures to
248
+ ``def my_func(..., *, rng=None)``); that might not make sense
249
+ for all projects, so this SPEC does not make that
250
+ recommendation, neither does this decorator enforce it.
251
+
252
+ Parameters
253
+ ----------
254
+ old_name : str
255
+ The old name of the PRNG argument (e.g. `seed` or `random_state`).
256
+ position_num : int, optional
257
+ The (0-indexed) position of the old PRNG argument (if accepted by position).
258
+ Maintainers are welcome to eliminate this argument and use, for example,
259
+ `inspect`, if preferred.
260
+ end_version : str, optional
261
+ The full version number of the library when the behavior described in
262
+ `DeprecationWarning`s and `FutureWarning`s will take effect. If left
263
+ unspecified, no warnings will be emitted by the decorator.
264
+ replace_doc : bool, default: True
265
+ Whether the decorator should replace the documentation for parameter `rng` with
266
+ `_rng_desc` (defined above), which documents both new `rng` keyword behavior
267
+ and typical legacy `random_state`/`seed` behavior. If True, manually replace
268
+ the first paragraph of the function's old `random_state`/`seed` documentation
269
+ with the desired *final* `rng` documentation; this way, no changes to
270
+ documentation are needed when the decorator is removed. Documentation of `rng`
271
+ after the first blank line is preserved. Use False if the function's old
272
+ `random_state`/`seed` behavior does not match that described by `_rng_desc`.
273
+
274
+ """
275
+ NEW_NAME = "rng"
276
+
277
+ cmn_msg = (
278
+ "To silence this warning and ensure consistent behavior in SciPy "
279
+ f"{end_version}, control the RNG using argument `{NEW_NAME}`. Arguments passed "
280
+ f"to keyword `{NEW_NAME}` will be validated by `np.random.default_rng`, so the "
281
+ "behavior corresponding with a given value may change compared to use of "
282
+ f"`{old_name}`. For example, "
283
+ "1) `None` will result in unpredictable random numbers, "
284
+ "2) an integer will result in a different stream of random numbers, (with the "
285
+ "same distribution), and "
286
+ "3) `np.random` or `RandomState` instances will result in an error. "
287
+ "See the documentation of `default_rng` for more information."
288
+ )
289
+
290
+ def decorator(fun):
291
+ @functools.wraps(fun)
292
+ def wrapper(*args, **kwargs):
293
+ # Determine how PRNG was passed
294
+ as_old_kwarg = old_name in kwargs
295
+ as_new_kwarg = NEW_NAME in kwargs
296
+ as_pos_arg = position_num is not None and len(args) >= position_num + 1
297
+ emit_warning = end_version is not None
298
+
299
+ # Can only specify PRNG one of the three ways
300
+ if int(as_old_kwarg) + int(as_new_kwarg) + int(as_pos_arg) > 1:
301
+ message = (
302
+ f"{fun.__name__}() got multiple values for "
303
+ f"argument now known as `{NEW_NAME}`. Specify one of "
304
+ f"`{NEW_NAME}` or `{old_name}`."
305
+ )
306
+ raise TypeError(message)
307
+
308
+ # Check whether global random state has been set
309
+ global_seed_set = np.random.mtrand._rand._bit_generator._seed_seq is None
310
+
311
+ if as_old_kwarg: # warn about deprecated use of old kwarg
312
+ kwargs[NEW_NAME] = kwargs.pop(old_name)
313
+ if emit_warning:
314
+ message = (
315
+ f"Use of keyword argument `{old_name}` is "
316
+ f"deprecated and replaced by `{NEW_NAME}`. "
317
+ f"Support for `{old_name}` will be removed "
318
+ f"in SciPy {end_version}. "
319
+ ) + cmn_msg
320
+ warnings.warn(message, DeprecationWarning, stacklevel=2)
321
+
322
+ elif as_pos_arg:
323
+ # Warn about changing meaning of positional arg
324
+
325
+ # Note that this decorator does not deprecate positional use of the
326
+ # argument; it only warns that the behavior will change in the future.
327
+ # Simultaneously transitioning to keyword-only use is another option.
328
+
329
+ arg = args[position_num]
330
+ # If the argument is None and the global seed wasn't set, or if the
331
+ # argument is one of a few new classes, the user will not notice change
332
+ # in behavior.
333
+ ok_classes = (
334
+ np.random.Generator,
335
+ np.random.SeedSequence,
336
+ np.random.BitGenerator,
337
+ )
338
+ if (arg is None and not global_seed_set) or isinstance(arg, ok_classes):
339
+ pass
340
+ elif emit_warning:
341
+ message = (
342
+ f"Positional use of `{NEW_NAME}` (formerly known as "
343
+ f"`{old_name}`) is still allowed, but the behavior is "
344
+ "changing: the argument will be normalized using "
345
+ f"`np.random.default_rng` beginning in SciPy {end_version}, "
346
+ "and the resulting `Generator` will be used to generate "
347
+ "random numbers."
348
+ ) + cmn_msg
349
+ warnings.warn(message, FutureWarning, stacklevel=2)
350
+
351
+ elif as_new_kwarg: # no warnings; this is the preferred use
352
+ # After the removal of the decorator, normalization with
353
+ # np.random.default_rng will be done inside the decorated function
354
+ kwargs[NEW_NAME] = np.random.default_rng(kwargs[NEW_NAME])
355
+
356
+ elif global_seed_set and emit_warning:
357
+ # Emit FutureWarning if `np.random.seed` was used and no PRNG was passed
358
+ message = (
359
+ "The NumPy global RNG was seeded by calling "
360
+ f"`np.random.seed`. Beginning in {end_version}, this "
361
+ "function will no longer use the global RNG."
362
+ ) + cmn_msg
363
+ warnings.warn(message, FutureWarning, stacklevel=2)
364
+
365
+ return fun(*args, **kwargs)
366
+
367
+ # Add the old parameter name to the function signature
368
+ wrapped_signature = inspect.signature(fun)
369
+ wrapper.__signature__ = wrapped_signature.replace(parameters=[
370
+ *wrapped_signature.parameters.values(),
371
+ inspect.Parameter(old_name, inspect.Parameter.KEYWORD_ONLY, default=None),
372
+ ])
373
+
374
+ if replace_doc:
375
+ doc = FunctionDoc(wrapper)
376
+ parameter_names = [param.name for param in doc['Parameters']]
377
+ if 'rng' in parameter_names:
378
+ _type = "{None, int, `numpy.random.Generator`}, optional"
379
+ _desc = _rng_desc.replace("{old_name}", old_name)
380
+ old_doc = doc['Parameters'][parameter_names.index('rng')].desc
381
+ old_doc_keep = old_doc[old_doc.index("") + 1:] if "" in old_doc else []
382
+ new_doc = [_desc] + old_doc_keep
383
+ _rng_parameter_doc = Parameter('rng', _type, new_doc)
384
+ doc['Parameters'][parameter_names.index('rng')] = _rng_parameter_doc
385
+ doc = str(doc).split("\n", 1)[1].lstrip(" \n") # remove signature
386
+ wrapper.__doc__ = str(doc)
387
+ return wrapper
388
+
389
+ return decorator
390
+
391
+
392
+ # copy-pasted from scikit-learn utils/validation.py
393
+ def check_random_state(seed):
394
+ """Turn `seed` into a `np.random.RandomState` instance.
395
+
396
+ Parameters
397
+ ----------
398
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
399
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
400
+ singleton is used.
401
+ If `seed` is an int, a new ``RandomState`` instance is used,
402
+ seeded with `seed`.
403
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
404
+ that instance is used.
405
+
406
+ Returns
407
+ -------
408
+ seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
409
+ Random number generator.
410
+
411
+ """
412
+ if seed is None or seed is np.random:
413
+ return np.random.mtrand._rand
414
+ if isinstance(seed, numbers.Integral | np.integer):
415
+ return np.random.RandomState(seed)
416
+ if isinstance(seed, np.random.RandomState | np.random.Generator):
417
+ return seed
418
+
419
+ raise ValueError(f"'{seed}' cannot be used to seed a numpy.random.RandomState"
420
+ " instance")
421
+
422
+
423
+ def _asarray_validated(a, check_finite=True,
424
+ sparse_ok=False, objects_ok=False, mask_ok=False,
425
+ as_inexact=False):
426
+ """
427
+ Helper function for SciPy argument validation.
428
+
429
+ Many SciPy linear algebra functions do support arbitrary array-like
430
+ input arguments. Examples of commonly unsupported inputs include
431
+ matrices containing inf/nan, sparse matrix representations, and
432
+ matrices with complicated elements.
433
+
434
+ Parameters
435
+ ----------
436
+ a : array_like
437
+ The array-like input.
438
+ check_finite : bool, optional
439
+ Whether to check that the input matrices contain only finite numbers.
440
+ Disabling may give a performance gain, but may result in problems
441
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
442
+ Default: True
443
+ sparse_ok : bool, optional
444
+ True if scipy sparse matrices are allowed.
445
+ objects_ok : bool, optional
446
+ True if arrays with dype('O') are allowed.
447
+ mask_ok : bool, optional
448
+ True if masked arrays are allowed.
449
+ as_inexact : bool, optional
450
+ True to convert the input array to a np.inexact dtype.
451
+
452
+ Returns
453
+ -------
454
+ ret : ndarray
455
+ The converted validated array.
456
+
457
+ """
458
+ if not sparse_ok:
459
+ if issparse(a):
460
+ msg = ('Sparse arrays/matrices are not supported by this function. '
461
+ 'Perhaps one of the `scipy.sparse.linalg` functions '
462
+ 'would work instead.')
463
+ raise ValueError(msg)
464
+ if not mask_ok:
465
+ if np.ma.isMaskedArray(a):
466
+ raise ValueError('masked arrays are not supported')
467
+ toarray = np.asarray_chkfinite if check_finite else np.asarray
468
+ a = toarray(a)
469
+ if not objects_ok:
470
+ if a.dtype is np.dtype('O'):
471
+ raise ValueError('object arrays are not supported')
472
+ if as_inexact:
473
+ if not np.issubdtype(a.dtype, np.inexact):
474
+ a = toarray(a, dtype=np.float64)
475
+ return a
476
+
477
+
478
+ def _validate_int(k, name, minimum=None):
479
+ """
480
+ Validate a scalar integer.
481
+
482
+ This function can be used to validate an argument to a function
483
+ that expects the value to be an integer. It uses `operator.index`
484
+ to validate the value (so, for example, k=2.0 results in a
485
+ TypeError).
486
+
487
+ Parameters
488
+ ----------
489
+ k : int
490
+ The value to be validated.
491
+ name : str
492
+ The name of the parameter.
493
+ minimum : int, optional
494
+ An optional lower bound.
495
+ """
496
+ try:
497
+ k = operator.index(k)
498
+ except TypeError:
499
+ raise TypeError(f'{name} must be an integer.') from None
500
+ if minimum is not None and k < minimum:
501
+ raise ValueError(f'{name} must be an integer not less '
502
+ f'than {minimum}') from None
503
+ return k
504
+
505
+
506
+ # Add a replacement for inspect.getfullargspec()/
507
+ # The version below is borrowed from Django,
508
+ # https://github.com/django/django/pull/4846.
509
+
510
+ # Note an inconsistency between inspect.getfullargspec(func) and
511
+ # inspect.signature(func). If `func` is a bound method, the latter does *not*
512
+ # list `self` as a first argument, while the former *does*.
513
+ # Hence, cook up a common ground replacement: `getfullargspec_no_self` which
514
+ # mimics `inspect.getfullargspec` but does not list `self`.
515
+ #
516
+ # This way, the caller code does not need to know whether it uses a legacy
517
+ # .getfullargspec or a bright and shiny .signature.
518
+
519
+ FullArgSpec = namedtuple('FullArgSpec',
520
+ ['args', 'varargs', 'varkw', 'defaults',
521
+ 'kwonlyargs', 'kwonlydefaults', 'annotations'])
522
+
523
+
524
+ def getfullargspec_no_self(func):
525
+ """inspect.getfullargspec replacement using inspect.signature.
526
+
527
+ If func is a bound method, do not list the 'self' parameter.
528
+
529
+ Parameters
530
+ ----------
531
+ func : callable
532
+ A callable to inspect
533
+
534
+ Returns
535
+ -------
536
+ fullargspec : FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
537
+ kwonlydefaults, annotations)
538
+
539
+ NOTE: if the first argument of `func` is self, it is *not*, I repeat
540
+ *not*, included in fullargspec.args.
541
+ This is done for consistency between inspect.getargspec() under
542
+ Python 2.x, and inspect.signature() under Python 3.x.
543
+
544
+ """
545
+ sig = wrapped_inspect_signature(func)
546
+ args = [
547
+ p.name for p in sig.parameters.values()
548
+ if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD,
549
+ inspect.Parameter.POSITIONAL_ONLY]
550
+ ]
551
+ varargs = [
552
+ p.name for p in sig.parameters.values()
553
+ if p.kind == inspect.Parameter.VAR_POSITIONAL
554
+ ]
555
+ varargs = varargs[0] if varargs else None
556
+ varkw = [
557
+ p.name for p in sig.parameters.values()
558
+ if p.kind == inspect.Parameter.VAR_KEYWORD
559
+ ]
560
+ varkw = varkw[0] if varkw else None
561
+ defaults = tuple(
562
+ p.default for p in sig.parameters.values()
563
+ if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
564
+ p.default is not p.empty)
565
+ ) or None
566
+ kwonlyargs = [
567
+ p.name for p in sig.parameters.values()
568
+ if p.kind == inspect.Parameter.KEYWORD_ONLY
569
+ ]
570
+ kwdefaults = {p.name: p.default for p in sig.parameters.values()
571
+ if p.kind == inspect.Parameter.KEYWORD_ONLY and
572
+ p.default is not p.empty}
573
+ annotations = {p.name: p.annotation for p in sig.parameters.values()
574
+ if p.annotation is not p.empty}
575
+ return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
576
+ kwdefaults or None, annotations)
577
+
578
+
579
+ class _FunctionWrapper:
580
+ """
581
+ Object to wrap user's function, allowing picklability
582
+ """
583
+ def __init__(self, f, args):
584
+ self.f = f
585
+ self.args = [] if args is None else args
586
+
587
+ def __call__(self, x):
588
+ return self.f(x, *self.args)
589
+
590
+
591
+ class _ScalarFunctionWrapper:
592
+ """
593
+ Object to wrap scalar user function, allowing picklability
594
+ """
595
+ def __init__(self, f, args=None):
596
+ self.f = f
597
+ self.args = [] if args is None else args
598
+ self.nfev = 0
599
+
600
+ def __call__(self, x):
601
+ # Send a copy because the user may overwrite it.
602
+ # The user of this class might want `x` to remain unchanged.
603
+ fx = self.f(np.copy(x), *self.args)
604
+ self.nfev += 1
605
+
606
+ # Make sure the function returns a true scalar
607
+ if not np.isscalar(fx):
608
+ try:
609
+ fx = np.asarray(fx).item()
610
+ except (TypeError, ValueError) as e:
611
+ raise ValueError(
612
+ "The user-provided objective function "
613
+ "must return a scalar value."
614
+ ) from e
615
+ return fx
616
+
617
+ class MapWrapper:
618
+ """
619
+ Parallelisation wrapper for working with map-like callables, such as
620
+ `multiprocessing.Pool.map`.
621
+
622
+ Parameters
623
+ ----------
624
+ pool : int or map-like callable
625
+ If `pool` is an integer, then it specifies the number of threads to
626
+ use for parallelization. If ``int(pool) == 1``, then no parallel
627
+ processing is used and the map builtin is used.
628
+ If ``pool == -1``, then the pool will utilize all available CPUs.
629
+ If `pool` is a map-like callable that follows the same
630
+ calling sequence as the built-in map function, then this callable is
631
+ used for parallelization.
632
+ """
633
+ def __init__(self, pool=1):
634
+ self.pool = None
635
+ self._mapfunc = map
636
+ self._own_pool = False
637
+
638
+ if callable(pool):
639
+ self.pool = pool
640
+ self._mapfunc = self.pool
641
+ else:
642
+ from multiprocessing import get_context, get_start_method
643
+
644
+ method = get_start_method(allow_none=True)
645
+
646
+ if method is None and os.name=='posix' and sys.version_info < (3, 14):
647
+ # Python 3.13 and older used "fork" on posix, which can lead to
648
+ # deadlocks. This backports that fix to older Python versions.
649
+ method = 'forkserver'
650
+
651
+ # user supplies a number
652
+ if int(pool) == -1:
653
+ # use as many processors as possible
654
+ self.pool = get_context(method=method).Pool()
655
+ self._mapfunc = self.pool.map
656
+ self._own_pool = True
657
+ elif int(pool) == 1:
658
+ pass
659
+ elif int(pool) > 1:
660
+ # use the number of processors requested
661
+ self.pool = get_context(method=method).Pool(processes=int(pool))
662
+ self._mapfunc = self.pool.map
663
+ self._own_pool = True
664
+ else:
665
+ raise RuntimeError("Number of workers specified must be -1,"
666
+ " an int >= 1, or an object with a 'map' "
667
+ "method")
668
+
669
+ def __enter__(self):
670
+ return self
671
+
672
+ def terminate(self):
673
+ if self._own_pool:
674
+ self.pool.terminate()
675
+
676
+ def join(self):
677
+ if self._own_pool:
678
+ self.pool.join()
679
+
680
+ def close(self):
681
+ if self._own_pool:
682
+ self.pool.close()
683
+
684
+ def __exit__(self, exc_type, exc_value, traceback):
685
+ if self._own_pool:
686
+ self.pool.close()
687
+ self.pool.terminate()
688
+
689
+ def __call__(self, func, iterable):
690
+ # only accept one iterable because that's all Pool.map accepts
691
+ try:
692
+ return self._mapfunc(func, iterable)
693
+ except TypeError as e:
694
+ # wrong number of arguments
695
+ raise TypeError("The map-like callable must be of the"
696
+ " form f(func, iterable)") from e
697
+
698
+
699
+ def _workers_wrapper(func):
700
+ """
701
+ Wrapper to deal with setup-cleanup of workers outside a user function via a
702
+ ContextManager. It saves having to do the setup/tear down with within that
703
+ function, which can be messy.
704
+ """
705
+ @functools.wraps(func)
706
+ def inner(*args, **kwds):
707
+ kwargs = kwds.copy()
708
+ if 'workers' not in kwargs:
709
+ _workers = map
710
+ elif 'workers' in kwargs and kwargs['workers'] is None:
711
+ _workers = map
712
+ else:
713
+ _workers = kwargs['workers']
714
+
715
+ with MapWrapper(_workers) as mf:
716
+ kwargs['workers'] = mf
717
+ return func(*args, **kwargs)
718
+
719
+ return inner
720
+
721
+
722
+ def rng_integers(gen, low, high=None, size=None, dtype='int64',
723
+ endpoint=False):
724
+ """
725
+ Return random integers from low (inclusive) to high (exclusive), or if
726
+ endpoint=True, low (inclusive) to high (inclusive). Replaces
727
+ `RandomState.randint` (with endpoint=False) and
728
+ `RandomState.random_integers` (with endpoint=True).
729
+
730
+ Return random integers from the "discrete uniform" distribution of the
731
+ specified dtype. If high is None (the default), then results are from
732
+ 0 to low.
733
+
734
+ Parameters
735
+ ----------
736
+ gen : {None, np.random.RandomState, np.random.Generator}
737
+ Random number generator. If None, then the np.random.RandomState
738
+ singleton is used.
739
+ low : int or array-like of ints
740
+ Lowest (signed) integers to be drawn from the distribution (unless
741
+ high=None, in which case this parameter is 0 and this value is used
742
+ for high).
743
+ high : int or array-like of ints
744
+ If provided, one above the largest (signed) integer to be drawn from
745
+ the distribution (see above for behavior if high=None). If array-like,
746
+ must contain integer values.
747
+ size : array-like of ints, optional
748
+ Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
749
+ samples are drawn. Default is None, in which case a single value is
750
+ returned.
751
+ dtype : {str, dtype}, optional
752
+ Desired dtype of the result. All dtypes are determined by their name,
753
+ i.e., 'int64', 'int', etc, so byteorder is not available and a specific
754
+ precision may have different C types depending on the platform.
755
+ The default value is 'int64'.
756
+ endpoint : bool, optional
757
+ If True, sample from the interval [low, high] instead of the default
758
+ [low, high) Defaults to False.
759
+
760
+ Returns
761
+ -------
762
+ out: int or ndarray of ints
763
+ size-shaped array of random integers from the appropriate distribution,
764
+ or a single such random int if size not provided.
765
+ """
766
+ if isinstance(gen, np.random.Generator):
767
+ return gen.integers(low, high=high, size=size, dtype=dtype,
768
+ endpoint=endpoint)
769
+ else:
770
+ if gen is None:
771
+ # default is RandomState singleton used by np.random.
772
+ gen = np.random.mtrand._rand
773
+ if endpoint:
774
+ # inclusive of endpoint
775
+ # remember that low and high can be arrays, so don't modify in
776
+ # place
777
+ if high is None:
778
+ return gen.randint(low + 1, size=size, dtype=dtype)
779
+ if high is not None:
780
+ return gen.randint(low, high=high + 1, size=size, dtype=dtype)
781
+
782
+ # exclusive
783
+ return gen.randint(low, high=high, size=size, dtype=dtype)
784
+
785
+
786
+ @contextmanager
787
+ def _fixed_default_rng(seed=1638083107694713882823079058616272161):
788
+ """Context with a fixed np.random.default_rng seed."""
789
+ orig_fun = np.random.default_rng
790
+ np.random.default_rng = lambda seed=seed: orig_fun(seed)
791
+ try:
792
+ yield
793
+ finally:
794
+ np.random.default_rng = orig_fun
795
+
796
+
797
+ @contextmanager
798
+ def ignore_warns(expected_warning, *, match=None):
799
+ with warnings.catch_warnings():
800
+ warnings.filterwarnings("ignore", match, expected_warning)
801
+ yield
802
+
803
+
804
+ def _rng_html_rewrite(func):
805
+ """Rewrite the HTML rendering of ``np.random.default_rng``.
806
+
807
+ This is intended to decorate
808
+ ``numpydoc.docscrape_sphinx.SphinxDocString._str_examples``.
809
+
810
+ Examples are only run by Sphinx when there are plot involved. Even so,
811
+ it does not change the result values getting printed.
812
+ """
813
+ # hexadecimal or number seed, case-insensitive
814
+ pattern = re.compile(r'np.random.default_rng\((0x[0-9A-F]+|\d+)\)', re.I)
815
+
816
+ def _wrapped(*args, **kwargs):
817
+ res = func(*args, **kwargs)
818
+ lines = [
819
+ re.sub(pattern, 'np.random.default_rng()', line)
820
+ for line in res
821
+ ]
822
+ return lines
823
+
824
+ return _wrapped
825
+
826
+
827
+ def _argmin(a, keepdims=False, axis=None):
828
+ """
829
+ argmin with a `keepdims` parameter.
830
+
831
+ See https://github.com/numpy/numpy/issues/8710
832
+
833
+ If axis is not None, a.shape[axis] must be greater than 0.
834
+ """
835
+ res = np.argmin(a, axis=axis)
836
+ if keepdims and axis is not None:
837
+ res = np.expand_dims(res, axis=axis)
838
+ return res
839
+
840
+
841
+ def _contains_nan(
842
+ a: Array,
843
+ nan_policy: Literal["propagate", "raise", "omit"] = "propagate",
844
+ *,
845
+ xp_omit_okay: bool = False,
846
+ xp: ModuleType | None = None,
847
+ ) -> Array | bool:
848
+ # Regarding `xp_omit_okay`: Temporarily, while `_axis_nan_policy` does not
849
+ # handle non-NumPy arrays, most functions that call `_contains_nan` want
850
+ # it to raise an error if `nan_policy='omit'` and `xp` is not `np`.
851
+ # Some functions support `nan_policy='omit'` natively, so setting this to
852
+ # `True` prevents the error from being raised.
853
+ policies = {"propagate", "raise", "omit"}
854
+ if nan_policy not in policies:
855
+ msg = f"nan_policy must be one of {policies}."
856
+ raise ValueError(msg)
857
+
858
+ if xp_size(a) == 0:
859
+ return False
860
+
861
+ if xp is None:
862
+ xp = array_namespace(a)
863
+
864
+ if xp.isdtype(a.dtype, "real floating"):
865
+ # Faster and less memory-intensive than xp.any(xp.isnan(a)), and unlike other
866
+ # reductions, `max`/`min` won't return NaN unless there is a NaN in the data.
867
+ contains_nan = xp.isnan(xp.max(a))
868
+ elif xp.isdtype(a.dtype, "complex floating"):
869
+ # Typically `real` and `imag` produce views; otherwise, `xp.any(xp.isnan(a))`
870
+ # would be more efficient.
871
+ contains_nan = xp.isnan(xp.max(xp.real(a))) | xp.isnan(xp.max(xp.imag(a)))
872
+ elif is_numpy(xp) and np.issubdtype(a.dtype, object):
873
+ contains_nan = False
874
+ for el in a.ravel():
875
+ # isnan doesn't work on non-numeric elements
876
+ if np.issubdtype(type(el), np.number) and np.isnan(el):
877
+ contains_nan = True
878
+ break
879
+ else:
880
+ # Only `object` and `inexact` arrays can have NaNs
881
+ return False
882
+
883
+ # The implicit call to bool(contains_nan) must happen after testing
884
+ # nan_policy to prevent lazy and device-bound xps from raising in the
885
+ # default policy='propagate' case.
886
+ if nan_policy == 'raise':
887
+ if is_lazy_array(a):
888
+ msg = "nan_policy='raise' is not supported for lazy arrays."
889
+ raise TypeError(msg)
890
+ if contains_nan:
891
+ msg = "The input contains nan values"
892
+ raise ValueError(msg)
893
+ elif nan_policy == 'omit' and not xp_omit_okay and not is_numpy(xp):
894
+ if is_lazy_array(a):
895
+ msg = "nan_policy='omit' is not supported for lazy arrays."
896
+ raise TypeError(msg)
897
+
898
+ return contains_nan
899
+
900
+
901
+ def _rename_parameter(old_name, new_name, dep_version=None):
902
+ """
903
+ Generate decorator for backward-compatible keyword renaming.
904
+
905
+ Apply the decorator generated by `_rename_parameter` to functions with a
906
+ recently renamed parameter to maintain backward-compatibility.
907
+
908
+ After decoration, the function behaves as follows:
909
+ If only the new parameter is passed into the function, behave as usual.
910
+ If only the old parameter is passed into the function (as a keyword), raise
911
+ a DeprecationWarning if `dep_version` is provided, and behave as usual
912
+ otherwise.
913
+ If both old and new parameters are passed into the function, raise a
914
+ DeprecationWarning if `dep_version` is provided, and raise the appropriate
915
+ TypeError (function got multiple values for argument).
916
+
917
+ Parameters
918
+ ----------
919
+ old_name : str
920
+ Old name of parameter
921
+ new_name : str
922
+ New name of parameter
923
+ dep_version : str, optional
924
+ Version of SciPy in which old parameter was deprecated in the format
925
+ 'X.Y.Z'. If supplied, the deprecation message will indicate that
926
+ support for the old parameter will be removed in version 'X.Y+2.Z'
927
+
928
+ Notes
929
+ -----
930
+ Untested with functions that accept *args. Probably won't work as written.
931
+
932
+ """
933
+ def decorator(fun):
934
+ @functools.wraps(fun)
935
+ def wrapper(*args, **kwargs):
936
+ if old_name in kwargs:
937
+ if dep_version:
938
+ end_version = dep_version.split('.')
939
+ end_version[1] = str(int(end_version[1]) + 2)
940
+ end_version = '.'.join(end_version)
941
+ message = (f"Use of keyword argument `{old_name}` is "
942
+ f"deprecated and replaced by `{new_name}`. "
943
+ f"Support for `{old_name}` will be removed "
944
+ f"in SciPy {end_version}.")
945
+ warnings.warn(message, DeprecationWarning, stacklevel=2)
946
+ if new_name in kwargs:
947
+ message = (f"{fun.__name__}() got multiple values for "
948
+ f"argument now known as `{new_name}`")
949
+ raise TypeError(message)
950
+ kwargs[new_name] = kwargs.pop(old_name)
951
+ return fun(*args, **kwargs)
952
+ return wrapper
953
+ return decorator
954
+
955
+
956
+ def _rng_spawn(rng, n_children):
957
+ # spawns independent RNGs from a parent RNG
958
+ bg = rng._bit_generator
959
+ ss = bg._seed_seq
960
+ child_rngs = [np.random.Generator(type(bg)(child_ss))
961
+ for child_ss in ss.spawn(n_children)]
962
+ return child_rngs
963
+
964
+
965
+ def _get_nan(*data, shape=(), xp=None):
966
+ xp = array_namespace(*data) if xp is None else xp
967
+ # Get NaN of appropriate dtype for data
968
+ dtype = xp_result_type(*data, force_floating=True, xp=xp)
969
+ device = xp_result_device(*data)
970
+ res = xp.full(shape, xp.nan, dtype=dtype, device=device)
971
+ if not shape:
972
+ res = res[()]
973
+ # whenever mdhaber/marray#89 is resolved, could just return `res`
974
+ return res.data if is_marray(xp) else res
975
+
976
+
977
+ def normalize_axis_index(axis, ndim):
978
+ # Check if `axis` is in the correct range and normalize it
979
+ if axis < -ndim or axis >= ndim:
980
+ msg = f"axis {axis} is out of bounds for array of dimension {ndim}"
981
+ raise AxisError(msg)
982
+
983
+ if axis < 0:
984
+ axis = axis + ndim
985
+ return axis
986
+
987
+
988
+ def _call_callback_maybe_halt(callback, res):
989
+ """Call wrapped callback; return True if algorithm should stop.
990
+
991
+ Parameters
992
+ ----------
993
+ callback : callable or None
994
+ A user-provided callback wrapped with `_wrap_callback`
995
+ res : OptimizeResult
996
+ Information about the current iterate
997
+
998
+ Returns
999
+ -------
1000
+ halt : bool
1001
+ True if minimization should stop
1002
+
1003
+ """
1004
+ if callback is None:
1005
+ return False
1006
+ try:
1007
+ callback(res)
1008
+ return False
1009
+ except StopIteration:
1010
+ callback.stop_iteration = True
1011
+ return True
1012
+
1013
+
1014
+ class _RichResult(dict):
1015
+ """ Container for multiple outputs with pretty-printing """
1016
+ def __getattr__(self, name):
1017
+ try:
1018
+ return self[name]
1019
+ except KeyError as e:
1020
+ raise AttributeError(name) from e
1021
+
1022
+ __setattr__ = dict.__setitem__ # type: ignore[assignment]
1023
+ __delattr__ = dict.__delitem__ # type: ignore[assignment]
1024
+
1025
+ def __repr__(self):
1026
+ order_keys = ['message', 'success', 'status', 'fun', 'funl', 'x', 'xl',
1027
+ 'col_ind', 'nit', 'lower', 'upper', 'eqlin', 'ineqlin',
1028
+ 'converged', 'flag', 'function_calls', 'iterations',
1029
+ 'root']
1030
+ order_keys = getattr(self, '_order_keys', order_keys)
1031
+ # 'slack', 'con' are redundant with residuals
1032
+ # 'crossover_nit' is probably not interesting to most users
1033
+ omit_keys = {'slack', 'con', 'crossover_nit', '_order_keys'}
1034
+
1035
+ def key(item):
1036
+ try:
1037
+ return order_keys.index(item[0].lower())
1038
+ except ValueError: # item not in list
1039
+ return np.inf
1040
+
1041
+ def omit_redundant(items):
1042
+ for item in items:
1043
+ if item[0] in omit_keys:
1044
+ continue
1045
+ yield item
1046
+
1047
+ def item_sorter(d):
1048
+ return sorted(omit_redundant(d.items()), key=key)
1049
+
1050
+ if self.keys():
1051
+ return _dict_formatter(self, sorter=item_sorter)
1052
+ else:
1053
+ return self.__class__.__name__ + "()"
1054
+
1055
+ def __dir__(self):
1056
+ return list(self.keys())
1057
+
1058
+
1059
+ def _indenter(s, n=0):
1060
+ """
1061
+ Ensures that lines after the first are indented by the specified amount
1062
+ """
1063
+ split = s.split("\n")
1064
+ indent = " "*n
1065
+ return ("\n" + indent).join(split)
1066
+
1067
+
1068
+ def _float_formatter_10(x):
1069
+ """
1070
+ Returns a string representation of a float with exactly ten characters
1071
+ """
1072
+ if np.isposinf(x):
1073
+ return " inf"
1074
+ elif np.isneginf(x):
1075
+ return " -inf"
1076
+ elif np.isnan(x):
1077
+ return " nan"
1078
+ return np.format_float_scientific(x, precision=3, pad_left=2, unique=False)
1079
+
1080
+
1081
+ def _dict_formatter(d, n=0, mplus=1, sorter=None):
1082
+ """
1083
+ Pretty printer for dictionaries
1084
+
1085
+ `n` keeps track of the starting indentation;
1086
+ lines are indented by this much after a line break.
1087
+ `mplus` is additional left padding applied to keys
1088
+ """
1089
+ if isinstance(d, dict):
1090
+ m = max(map(len, list(d.keys()))) + mplus # width to print keys
1091
+ s = '\n'.join([k.rjust(m) + ': ' + # right justified, width m
1092
+ _indenter(_dict_formatter(v, m+n+2, 0, sorter), m+2)
1093
+ for k, v in sorter(d)]) # +2 for ': '
1094
+ else:
1095
+ # By default, NumPy arrays print with linewidth=76. `n` is
1096
+ # the indent at which a line begins printing, so it is subtracted
1097
+ # from the default to avoid exceeding 76 characters total.
1098
+ # `edgeitems` is the number of elements to include before and after
1099
+ # ellipses when arrays are not shown in full.
1100
+ # `threshold` is the maximum number of elements for which an
1101
+ # array is shown in full.
1102
+ # These values tend to work well for use with OptimizeResult.
1103
+ with np.printoptions(linewidth=76-n, edgeitems=2, threshold=12,
1104
+ formatter={'float_kind': _float_formatter_10}):
1105
+ s = str(d)
1106
+ return s
1107
+
1108
+
1109
+ _batch_note = """
1110
+ The documentation is written assuming array arguments are of specified
1111
+ "core" shapes. However, array argument(s) of this function may have additional
1112
+ "batch" dimensions prepended to the core shape. In this case, the array is treated
1113
+ as a batch of lower-dimensional slices; see :ref:`linalg_batch` for details.
1114
+ Note that calls with zero-size batches are unsupported and will raise a ``ValueError``.
1115
+ """
1116
+
1117
+
1118
+ def _apply_over_batch(*argdefs):
1119
+ """
1120
+ Factory for decorator that applies a function over batched arguments.
1121
+
1122
+ Array arguments may have any number of core dimensions (typically 0,
1123
+ 1, or 2) and any broadcastable batch shapes. There may be any
1124
+ number of array outputs of any number of dimensions. Assumptions
1125
+ right now - which are satisfied by all functions of interest in `linalg` -
1126
+ are that all array inputs are consecutive keyword or positional arguments,
1127
+ and that the wrapped function returns either a single array or a tuple of
1128
+ arrays. It's only as general as it needs to be right now - it can be extended.
1129
+
1130
+ Parameters
1131
+ ----------
1132
+ *argdefs : tuple of (str, int)
1133
+ Definitions of array arguments: the keyword name of the argument, and
1134
+ the number of core dimensions.
1135
+
1136
+ Example:
1137
+ --------
1138
+ `linalg.eig` accepts two matrices as the first two arguments `a` and `b`, where
1139
+ `b` is optional, and returns one array or a tuple of arrays, depending on the
1140
+ values of other positional or keyword arguments. To generate a wrapper that applies
1141
+ the function over batches of `a` and optionally `b` :
1142
+
1143
+ >>> _apply_over_batch(('a', 2), ('b', 2))
1144
+ """
1145
+ names, ndims = list(zip(*argdefs))
1146
+ n_arrays = len(names)
1147
+
1148
+ def decorator(f):
1149
+ @functools.wraps(f)
1150
+ def wrapper(*args, **kwargs):
1151
+ args = list(args)
1152
+
1153
+ # Ensure all arrays in `arrays`, other arguments in `other_args`/`kwargs`
1154
+ arrays, other_args = args[:n_arrays], args[n_arrays:]
1155
+ for i, name in enumerate(names):
1156
+ if name in kwargs:
1157
+ if i + 1 <= len(args):
1158
+ raise ValueError(f'{f.__name__}() got multiple values '
1159
+ f'for argument `{name}`.')
1160
+ else:
1161
+ arrays.append(kwargs.pop(name))
1162
+
1163
+ xp = array_namespace(*arrays)
1164
+
1165
+ # Determine core and batch shapes
1166
+ batch_shapes = []
1167
+ core_shapes = []
1168
+ for i, (array, ndim) in enumerate(zip(arrays, ndims)):
1169
+ array = None if array is None else xp.asarray(array)
1170
+ shape = () if array is None else array.shape
1171
+
1172
+ if ndim == "1|2": # special case for `solve`, etc.
1173
+ ndim = 2 if array.ndim >= 2 else 1
1174
+
1175
+ arrays[i] = array
1176
+ batch_shapes.append(shape[:-ndim] if ndim > 0 else shape)
1177
+ core_shapes.append(shape[-ndim:] if ndim > 0 else ())
1178
+
1179
+ # Early exit if call is not batched
1180
+ if not any(batch_shapes):
1181
+ return f(*arrays, *other_args, **kwargs)
1182
+
1183
+ # Determine broadcasted batch shape
1184
+ batch_shape = np.broadcast_shapes(*batch_shapes) # Gives OK error message
1185
+
1186
+ # We can't support zero-size batches right now because without data with
1187
+ # which to call the function, the decorator doesn't even know the *number*
1188
+ # of outputs, let alone their core shapes or dtypes.
1189
+ if math.prod(batch_shape) == 0:
1190
+ message = f'`{f.__name__}` does not support zero-size batches.'
1191
+ raise ValueError(message)
1192
+
1193
+ # Broadcast arrays to appropriate shape
1194
+ for i, (array, core_shape) in enumerate(zip(arrays, core_shapes)):
1195
+ if array is None:
1196
+ continue
1197
+ arrays[i] = xp.broadcast_to(array, batch_shape + core_shape)
1198
+
1199
+ # Main loop
1200
+ results = []
1201
+ for index in np.ndindex(batch_shape):
1202
+ result = f(*((array[index] if array is not None else None)
1203
+ for array in arrays), *other_args, **kwargs)
1204
+ # Assume `result` is either a tuple or single array. This is easily
1205
+ # generalized by allowing the contributor to pass an `unpack_result`
1206
+ # callable to the decorator factory.
1207
+ result = (result,) if not isinstance(result, tuple) else result
1208
+ results.append(result)
1209
+ results = list(zip(*results))
1210
+
1211
+ # Reshape results
1212
+ for i, result in enumerate(results):
1213
+ result = xp.stack(result)
1214
+ core_shape = result.shape[1:]
1215
+ results[i] = xp.reshape(result, batch_shape + core_shape)
1216
+
1217
+ # Assume `result` should be a single array if there is only one element or
1218
+ # a `tuple` otherwise. This is easily generalized by allowing the
1219
+ # contributor to pass an `pack_result` callable to the decorator factory.
1220
+ return results[0] if len(results) == 1 else results
1221
+
1222
+ doc = FunctionDoc(wrapper)
1223
+ doc['Extended Summary'].append(_batch_note.rstrip())
1224
+ wrapper.__doc__ = str(doc).split("\n", 1)[1].lstrip(" \n") # remove signature
1225
+
1226
+ return wrapper
1227
+ return decorator
1228
+
1229
+
1230
+ def np_vecdot(x1, x2, /, *, axis=-1):
1231
+ # `np.vecdot` has advantages (e.g. see gh-22462), so let's use it when
1232
+ # available. As functions are translated to Array API, `np_vecdot` can be
1233
+ # replaced with `xp.vecdot`.
1234
+ if np.__version__ > "2.0":
1235
+ return np.vecdot(x1, x2, axis=axis)
1236
+ else:
1237
+ # of course there are other fancy ways of doing this (e.g. `einsum`)
1238
+ # but let's keep it simple since it's temporary
1239
+ return np.sum(x1 * x2, axis=axis)
1240
+
1241
+
1242
+ def _dedent_for_py313(s):
1243
+ """Apply textwrap.dedent to s for Python versions 3.13 or later."""
1244
+ return s if sys.version_info < (3, 13) else textwrap.dedent(s)
1245
+
1246
+
1247
+ def broadcastable(shape_a: tuple[int, ...], shape_b: tuple[int, ...]) -> bool:
1248
+ """Check if two shapes are broadcastable."""
1249
+ return all(
1250
+ (m == n) or (m == 1) or (n == 1) for m, n in zip(shape_a[::-1], shape_b[::-1])
1251
+ )
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/deprecation.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from inspect import Parameter, signature
2
+ import functools
3
+ import warnings
4
+ from importlib import import_module
5
+ from scipy._lib._docscrape import FunctionDoc
6
+
7
+
8
+ __all__ = ["_deprecated"]
9
+
10
+
11
+ # Object to use as default value for arguments to be deprecated. This should
12
+ # be used over 'None' as the user could parse 'None' as a positional argument
13
+ _NoValue = object()
14
+
15
+ def _sub_module_deprecation(*, sub_package, module, private_modules, all,
16
+ attribute, correct_module=None, dep_version="1.16.0"):
17
+ """Helper function for deprecating modules that are public but were
18
+ intended to be private.
19
+
20
+ Parameters
21
+ ----------
22
+ sub_package : str
23
+ Subpackage the module belongs to eg. stats
24
+ module : str
25
+ Public but intended private module to deprecate
26
+ private_modules : list
27
+ Private replacement(s) for `module`; should contain the
28
+ content of ``all``, possibly spread over several modules.
29
+ all : list
30
+ ``__all__`` belonging to `module`
31
+ attribute : str
32
+ The attribute in `module` being accessed
33
+ correct_module : str, optional
34
+ Module in `sub_package` that `attribute` should be imported from.
35
+ Default is that `attribute` should be imported from ``scipy.sub_package``.
36
+ dep_version : str, optional
37
+ Version in which deprecated attributes will be removed.
38
+ """
39
+ if correct_module is not None:
40
+ correct_import = f"scipy.{sub_package}.{correct_module}"
41
+ else:
42
+ correct_import = f"scipy.{sub_package}"
43
+
44
+ if attribute not in all:
45
+ raise AttributeError(
46
+ f"`scipy.{sub_package}.{module}` has no attribute `{attribute}`; "
47
+ f"furthermore, `scipy.{sub_package}.{module}` is deprecated "
48
+ f"and will be removed in SciPy 2.0.0."
49
+ )
50
+
51
+ attr = getattr(import_module(correct_import), attribute, None)
52
+
53
+ if attr is not None:
54
+ message = (
55
+ f"Please import `{attribute}` from the `{correct_import}` namespace; "
56
+ f"the `scipy.{sub_package}.{module}` namespace is deprecated "
57
+ f"and will be removed in SciPy 2.0.0."
58
+ )
59
+ else:
60
+ message = (
61
+ f"`scipy.{sub_package}.{module}.{attribute}` is deprecated along with "
62
+ f"the `scipy.{sub_package}.{module}` namespace. "
63
+ f"`scipy.{sub_package}.{module}.{attribute}` will be removed "
64
+ f"in SciPy {dep_version}, and the `scipy.{sub_package}.{module}` namespace "
65
+ f"will be removed in SciPy 2.0.0."
66
+ )
67
+
68
+ warnings.warn(message, category=DeprecationWarning, stacklevel=3)
69
+
70
+ for module in private_modules:
71
+ try:
72
+ return getattr(import_module(f"scipy.{sub_package}.{module}"), attribute)
73
+ except AttributeError as e:
74
+ # still raise an error if the attribute isn't in any of the expected
75
+ # private modules
76
+ if module == private_modules[-1]:
77
+ raise e
78
+ continue
79
+
80
+
81
+ def _deprecated(msg, stacklevel=2):
82
+ """Deprecate a function by emitting a warning on use."""
83
+ def wrap(fun):
84
+ if isinstance(fun, type):
85
+ warnings.warn(
86
+ f"Trying to deprecate class {fun!r}",
87
+ category=RuntimeWarning, stacklevel=2)
88
+ return fun
89
+
90
+ @functools.wraps(fun)
91
+ def call(*args, **kwargs):
92
+ warnings.warn(msg, category=DeprecationWarning,
93
+ stacklevel=stacklevel)
94
+ return fun(*args, **kwargs)
95
+ call.__doc__ = fun.__doc__
96
+ return call
97
+
98
+ return wrap
99
+
100
+
101
+ class _DeprecationHelperStr:
102
+ """
103
+ Helper class used by deprecate_cython_api
104
+ """
105
+ def __init__(self, content, message):
106
+ self._content = content
107
+ self._message = message
108
+
109
+ def __hash__(self):
110
+ return hash(self._content)
111
+
112
+ def __eq__(self, other):
113
+ res = (self._content == other)
114
+ if res:
115
+ warnings.warn(self._message, category=DeprecationWarning,
116
+ stacklevel=2)
117
+ return res
118
+
119
+
120
+ def deprecate_cython_api(module, routine_name, new_name=None, message=None):
121
+ """
122
+ Deprecate an exported cdef function in a public Cython API module.
123
+
124
+ Only functions can be deprecated; typedefs etc. cannot.
125
+
126
+ Parameters
127
+ ----------
128
+ module : module
129
+ Public Cython API module (e.g. scipy.linalg.cython_blas).
130
+ routine_name : str
131
+ Name of the routine to deprecate. May also be a fused-type
132
+ routine (in which case its all specializations are deprecated).
133
+ new_name : str
134
+ New name to include in the deprecation warning message
135
+ message : str
136
+ Additional text in the deprecation warning message
137
+
138
+ Examples
139
+ --------
140
+ Usually, this function would be used in the top-level of the
141
+ module ``.pyx`` file:
142
+
143
+ >>> from scipy._lib.deprecation import deprecate_cython_api
144
+ >>> import scipy.linalg.cython_blas as mod
145
+ >>> deprecate_cython_api(mod, "dgemm", "dgemm_new",
146
+ ... message="Deprecated in Scipy 1.5.0")
147
+ >>> del deprecate_cython_api, mod
148
+
149
+ After this, Cython modules that use the deprecated function emit a
150
+ deprecation warning when they are imported.
151
+
152
+ """
153
+ old_name = f"{module.__name__}.{routine_name}"
154
+
155
+ if new_name is None:
156
+ depdoc = f"`{old_name}` is deprecated!"
157
+ else:
158
+ depdoc = f"`{old_name}` is deprecated, use `{new_name}` instead!"
159
+
160
+ if message is not None:
161
+ depdoc += "\n" + message
162
+
163
+ d = module.__pyx_capi__
164
+
165
+ # Check if the function is a fused-type function with a mangled name
166
+ j = 0
167
+ has_fused = False
168
+ while True:
169
+ fused_name = f"__pyx_fuse_{j}{routine_name}"
170
+ if fused_name in d:
171
+ has_fused = True
172
+ d[_DeprecationHelperStr(fused_name, depdoc)] = d.pop(fused_name)
173
+ j += 1
174
+ else:
175
+ break
176
+
177
+ # If not, apply deprecation to the named routine
178
+ if not has_fused:
179
+ d[_DeprecationHelperStr(routine_name, depdoc)] = d.pop(routine_name)
180
+
181
+
182
+ # taken from scikit-learn, see
183
+ # https://github.com/scikit-learn/scikit-learn/blob/1.3.0/sklearn/utils/validation.py#L38
184
+ def _deprecate_positional_args(func=None, *, version=None,
185
+ deprecated_args=None, custom_message=""):
186
+ """Decorator for methods that issues warnings for positional arguments.
187
+
188
+ Using the keyword-only argument syntax in pep 3102, arguments after the
189
+ * will issue a warning when passed as a positional argument.
190
+
191
+ Parameters
192
+ ----------
193
+ func : callable, default=None
194
+ Function to check arguments on.
195
+ version : callable, default=None
196
+ The version when positional arguments will result in error.
197
+ deprecated_args : set of str, optional
198
+ Arguments to deprecate - whether passed by position or keyword.
199
+ custom_message : str, optional
200
+ Custom message to add to deprecation warning and documentation.
201
+ """
202
+ if version is None:
203
+ msg = "Need to specify a version where signature will be changed"
204
+ raise ValueError(msg)
205
+
206
+ deprecated_args = set() if deprecated_args is None else set(deprecated_args)
207
+
208
+ def _inner_deprecate_positional_args(f):
209
+ sig = signature(f)
210
+ kwonly_args = []
211
+ all_args = []
212
+
213
+ for name, param in sig.parameters.items():
214
+ if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
215
+ all_args.append(name)
216
+ elif param.kind == Parameter.KEYWORD_ONLY:
217
+ kwonly_args.append(name)
218
+
219
+ def warn_deprecated_args(kwargs):
220
+ intersection = deprecated_args.intersection(kwargs)
221
+ if intersection:
222
+ message = (f"Arguments {intersection} are deprecated, whether passed "
223
+ "by position or keyword. They will be removed in SciPy "
224
+ f"{version}. ")
225
+ message += custom_message
226
+ warnings.warn(message, category=DeprecationWarning, stacklevel=3)
227
+
228
+ @functools.wraps(f)
229
+ def inner_f(*args, **kwargs):
230
+
231
+ extra_args = len(args) - len(all_args)
232
+ if extra_args <= 0:
233
+ warn_deprecated_args(kwargs)
234
+ return f(*args, **kwargs)
235
+
236
+ # extra_args > 0
237
+ kwonly_extra_args = set(kwonly_args[:extra_args]) - deprecated_args
238
+ args_msg = ", ".join(kwonly_extra_args)
239
+ warnings.warn(
240
+ (
241
+ f"You are passing as positional arguments: {args_msg}. "
242
+ "Please change your invocation to use keyword arguments. "
243
+ f"From SciPy {version}, passing these as positional "
244
+ "arguments will result in an error."
245
+ ),
246
+ DeprecationWarning,
247
+ stacklevel=2,
248
+ )
249
+ kwargs.update(zip(sig.parameters, args))
250
+ warn_deprecated_args(kwargs)
251
+ return f(**kwargs)
252
+
253
+ doc = FunctionDoc(inner_f)
254
+ kwonly_extra_args = set(kwonly_args) - deprecated_args
255
+ admonition = f"""
256
+ .. deprecated:: {version}
257
+ Use of argument(s) ``{kwonly_extra_args}`` by position is deprecated; beginning in
258
+ SciPy {version}, these will be keyword-only. """
259
+ if deprecated_args:
260
+ admonition += (f"Argument(s) ``{deprecated_args}`` are deprecated, whether "
261
+ "passed by position or keyword; they will be removed in "
262
+ f"SciPy {version}. ")
263
+ admonition += custom_message
264
+ doc['Extended Summary'] += [admonition]
265
+
266
+ doc = str(doc).split("\n", 1)[1].lstrip(" \n") # remove signature
267
+ inner_f.__doc__ = str(doc)
268
+
269
+ return inner_f
270
+
271
+ if func is not None:
272
+ return _inner_deprecate_positional_args(func)
273
+
274
+ return _inner_deprecate_positional_args
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/doccer.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utilities to allow inserting docstring fragments for common
2
+ parameters into function and method docstrings."""
3
+
4
+ from collections.abc import Callable, Iterable, Mapping
5
+ from typing import Protocol, TypeVar
6
+ import sys
7
+
8
+ __all__ = [
9
+ "docformat",
10
+ "inherit_docstring_from",
11
+ "indentcount_lines",
12
+ "filldoc",
13
+ "unindent_dict",
14
+ "unindent_string",
15
+ "extend_notes_in_docstring",
16
+ "replace_notes_in_docstring",
17
+ "doc_replace",
18
+ ]
19
+
20
+ _F = TypeVar("_F", bound=Callable[..., object])
21
+
22
+
23
+ class Decorator(Protocol):
24
+ """A decorator of a function."""
25
+
26
+ def __call__(self, func: _F, /) -> _F: ...
27
+
28
+
29
+ def docformat(docstring: str, docdict: Mapping[str, str] | None = None) -> str:
30
+ """Fill a function docstring from variables in dictionary.
31
+
32
+ Adapt the indent of the inserted docs
33
+
34
+ Parameters
35
+ ----------
36
+ docstring : str
37
+ A docstring from a function, possibly with dict formatting strings.
38
+ docdict : dict[str, str], optional
39
+ A dictionary with keys that match the dict formatting strings
40
+ and values that are docstring fragments to be inserted. The
41
+ indentation of the inserted docstrings is set to match the
42
+ minimum indentation of the ``docstring`` by adding this
43
+ indentation to all lines of the inserted string, except the
44
+ first.
45
+
46
+ Returns
47
+ -------
48
+ docstring : str
49
+ string with requested ``docdict`` strings inserted.
50
+
51
+ Examples
52
+ --------
53
+ >>> docformat(' Test string with %(value)s', {'value':'inserted value'})
54
+ ' Test string with inserted value'
55
+ >>> docstring = 'First line\\n Second line\\n %(value)s'
56
+ >>> inserted_string = "indented\\nstring"
57
+ >>> docdict = {'value': inserted_string}
58
+ >>> docformat(docstring, docdict)
59
+ 'First line\\n Second line\\n indented\\n string'
60
+ """
61
+ if not docstring:
62
+ return docstring
63
+ if docdict is None:
64
+ docdict = {}
65
+ if not docdict:
66
+ return docstring
67
+ lines = docstring.expandtabs().splitlines()
68
+ # Find the minimum indent of the main docstring, after first line
69
+ if len(lines) < 2:
70
+ icount = 0
71
+ else:
72
+ icount = indentcount_lines(lines[1:])
73
+ indent = " " * icount
74
+ # Insert this indent to dictionary docstrings
75
+ indented = {}
76
+ for name, dstr in docdict.items():
77
+ lines = dstr.expandtabs().splitlines()
78
+ indented[name] = ("\n" + indent).join(lines)
79
+ return docstring % indented
80
+
81
+
82
+ def inherit_docstring_from(cls: object) -> Decorator:
83
+ """This decorator modifies the decorated function's docstring by
84
+ replacing occurrences of '%(super)s' with the docstring of the
85
+ method of the same name from the class `cls`.
86
+
87
+ If the decorated method has no docstring, it is simply given the
88
+ docstring of `cls`s method.
89
+
90
+ Parameters
91
+ ----------
92
+ cls : type or object
93
+ A class with a method with the same name as the decorated method.
94
+ The docstring of the method in this class replaces '%(super)s' in the
95
+ docstring of the decorated method.
96
+
97
+ Returns
98
+ -------
99
+ decfunc : function
100
+ The decorator function that modifies the __doc__ attribute
101
+ of its argument.
102
+
103
+ Examples
104
+ --------
105
+ In the following, the docstring for Bar.func created using the
106
+ docstring of `Foo.func`.
107
+
108
+ >>> class Foo:
109
+ ... def func(self):
110
+ ... '''Do something useful.'''
111
+ ... return
112
+ ...
113
+ >>> class Bar(Foo):
114
+ ... @inherit_docstring_from(Foo)
115
+ ... def func(self):
116
+ ... '''%(super)s
117
+ ... Do it fast.
118
+ ... '''
119
+ ... return
120
+ ...
121
+ >>> b = Bar()
122
+ >>> b.func.__doc__
123
+ 'Do something useful.\n Do it fast.\n '
124
+ """
125
+
126
+ def _doc(func: _F) -> _F:
127
+ cls_docstring = getattr(cls, func.__name__).__doc__
128
+ func_docstring = func.__doc__
129
+ if func_docstring is None:
130
+ func.__doc__ = cls_docstring
131
+ else:
132
+ new_docstring = func_docstring % dict(super=cls_docstring)
133
+ func.__doc__ = new_docstring
134
+ return func
135
+
136
+ return _doc
137
+
138
+
139
+ def extend_notes_in_docstring(cls: object, notes: str) -> Decorator:
140
+ """This decorator replaces the decorated function's docstring
141
+ with the docstring from corresponding method in `cls`.
142
+ It extends the 'Notes' section of that docstring to include
143
+ the given `notes`.
144
+
145
+ Parameters
146
+ ----------
147
+ cls : type or object
148
+ A class with a method with the same name as the decorated method.
149
+ The docstring of the method in this class replaces the docstring of the
150
+ decorated method.
151
+ notes : str
152
+ Additional notes to append to the 'Notes' section of the docstring.
153
+
154
+ Returns
155
+ -------
156
+ decfunc : function
157
+ The decorator function that modifies the __doc__ attribute
158
+ of its argument.
159
+ """
160
+
161
+ def _doc(func: _F) -> _F:
162
+ cls_docstring = getattr(cls, func.__name__).__doc__
163
+ # If python is called with -OO option,
164
+ # there is no docstring
165
+ if cls_docstring is None:
166
+ return func
167
+ end_of_notes = cls_docstring.find(" References\n")
168
+ if end_of_notes == -1:
169
+ end_of_notes = cls_docstring.find(" Examples\n")
170
+ if end_of_notes == -1:
171
+ end_of_notes = len(cls_docstring)
172
+ func.__doc__ = (
173
+ cls_docstring[:end_of_notes] + notes + cls_docstring[end_of_notes:]
174
+ )
175
+ return func
176
+
177
+ return _doc
178
+
179
+
180
+ def replace_notes_in_docstring(cls: object, notes: str) -> Decorator:
181
+ """This decorator replaces the decorated function's docstring
182
+ with the docstring from corresponding method in `cls`.
183
+ It replaces the 'Notes' section of that docstring with
184
+ the given `notes`.
185
+
186
+ Parameters
187
+ ----------
188
+ cls : type or object
189
+ A class with a method with the same name as the decorated method.
190
+ The docstring of the method in this class replaces the docstring of the
191
+ decorated method.
192
+ notes : str
193
+ The notes to replace the existing 'Notes' section with.
194
+
195
+ Returns
196
+ -------
197
+ decfunc : function
198
+ The decorator function that modifies the __doc__ attribute
199
+ of its argument.
200
+ """
201
+
202
+ def _doc(func: _F) -> _F:
203
+ cls_docstring = getattr(cls, func.__name__).__doc__
204
+ notes_header = " Notes\n -----\n"
205
+ # If python is called with -OO option,
206
+ # there is no docstring
207
+ if cls_docstring is None:
208
+ return func
209
+ start_of_notes = cls_docstring.find(notes_header)
210
+ end_of_notes = cls_docstring.find(" References\n")
211
+ if end_of_notes == -1:
212
+ end_of_notes = cls_docstring.find(" Examples\n")
213
+ if end_of_notes == -1:
214
+ end_of_notes = len(cls_docstring)
215
+ func.__doc__ = (
216
+ cls_docstring[: start_of_notes + len(notes_header)]
217
+ + notes
218
+ + cls_docstring[end_of_notes:]
219
+ )
220
+ return func
221
+
222
+ return _doc
223
+
224
+
225
+ def indentcount_lines(lines: Iterable[str]) -> int:
226
+ """Minimum indent for all lines in line list
227
+
228
+ Parameters
229
+ ----------
230
+ lines : Iterable[str]
231
+ The lines to find the minimum indent of.
232
+
233
+ Returns
234
+ -------
235
+ indent : int
236
+ The minimum indent.
237
+
238
+
239
+ Examples
240
+ --------
241
+ >>> lines = [' one', ' two', ' three']
242
+ >>> indentcount_lines(lines)
243
+ 1
244
+ >>> lines = []
245
+ >>> indentcount_lines(lines)
246
+ 0
247
+ >>> lines = [' one']
248
+ >>> indentcount_lines(lines)
249
+ 1
250
+ >>> indentcount_lines([' '])
251
+ 0
252
+ """
253
+ indentno = sys.maxsize
254
+ for line in lines:
255
+ stripped = line.lstrip()
256
+ if stripped:
257
+ indentno = min(indentno, len(line) - len(stripped))
258
+ if indentno == sys.maxsize:
259
+ return 0
260
+ return indentno
261
+
262
+
263
+ def filldoc(docdict: Mapping[str, str], unindent_params: bool = True) -> Decorator:
264
+ """Return docstring decorator using docdict variable dictionary.
265
+
266
+ Parameters
267
+ ----------
268
+ docdict : dict[str, str]
269
+ A dictionary containing name, docstring fragment pairs.
270
+ unindent_params : bool, optional
271
+ If True, strip common indentation from all parameters in docdict.
272
+ Default is False.
273
+
274
+ Returns
275
+ -------
276
+ decfunc : function
277
+ The decorator function that applies dictionary to its
278
+ argument's __doc__ attribute.
279
+ """
280
+ if unindent_params:
281
+ docdict = unindent_dict(docdict)
282
+
283
+ def decorate(func: _F) -> _F:
284
+ # __doc__ may be None for optimized Python (-OO)
285
+ doc = func.__doc__ or ""
286
+ func.__doc__ = docformat(doc, docdict)
287
+ return func
288
+
289
+ return decorate
290
+
291
+
292
+ def unindent_dict(docdict: Mapping[str, str]) -> dict[str, str]:
293
+ """Unindent all strings in a docdict.
294
+
295
+ Parameters
296
+ ----------
297
+ docdict : dict[str, str]
298
+ A dictionary with string values to unindent.
299
+
300
+ Returns
301
+ -------
302
+ docdict : dict[str, str]
303
+ The `docdict` dictionary but each of its string values are unindented.
304
+ """
305
+ can_dict: dict[str, str] = {}
306
+ for name, dstr in docdict.items():
307
+ can_dict[name] = unindent_string(dstr)
308
+ return can_dict
309
+
310
+
311
+ def unindent_string(docstring: str) -> str:
312
+ """Set docstring to minimum indent for all lines, including first.
313
+
314
+ Parameters
315
+ ----------
316
+ docstring : str
317
+ The input docstring to unindent.
318
+
319
+ Returns
320
+ -------
321
+ docstring : str
322
+ The unindented docstring.
323
+
324
+ Examples
325
+ --------
326
+ >>> unindent_string(' two')
327
+ 'two'
328
+ >>> unindent_string(' two\\n three')
329
+ 'two\\n three'
330
+ """
331
+ lines = docstring.expandtabs().splitlines()
332
+ icount = indentcount_lines(lines)
333
+ if icount == 0:
334
+ return docstring
335
+ return "\n".join([line[icount:] for line in lines])
336
+
337
+
338
+ def doc_replace(obj: object, oldval: str, newval: str) -> Decorator:
339
+ """Decorator to take the docstring from obj, with oldval replaced by newval
340
+
341
+ Equivalent to ``func.__doc__ = obj.__doc__.replace(oldval, newval)``
342
+
343
+ Parameters
344
+ ----------
345
+ obj : object
346
+ A class or object whose docstring will be used as the basis for the
347
+ replacement operation.
348
+ oldval : str
349
+ The string to search for in the docstring.
350
+ newval : str
351
+ The string to replace `oldval` with in the docstring.
352
+
353
+ Returns
354
+ -------
355
+ decfunc : function
356
+ A decorator function that replaces occurrences of `oldval` with `newval`
357
+ in the docstring of the decorated function.
358
+ """
359
+ # __doc__ may be None for optimized Python (-OO)
360
+ doc = (obj.__doc__ or "").replace(oldval, newval)
361
+
362
+ def inner(func: _F) -> _F:
363
+ func.__doc__ = doc
364
+ return func
365
+
366
+ return inner
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/messagestream.cpython-312-x86_64-linux-gnu.so ADDED
Binary file (83.3 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/uarray.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """`uarray` provides functions for generating multimethods that dispatch to
2
+ multiple different backends
3
+
4
+ This should be imported, rather than `_uarray` so that an installed version could
5
+ be used instead, if available. This means that users can call
6
+ `uarray.set_backend` directly instead of going through SciPy.
7
+
8
+ """
9
+
10
+
11
+ # Prefer an installed version of uarray, if available
12
+ try:
13
+ import uarray as _uarray
14
+ except ImportError:
15
+ _has_uarray = False
16
+ else:
17
+ from scipy._lib._pep440 import Version as _Version
18
+
19
+ _has_uarray = _Version(_uarray.__version__) >= _Version("0.8")
20
+ del _uarray
21
+ del _Version
22
+
23
+
24
+ if _has_uarray:
25
+ from uarray import * # noqa: F403
26
+ from uarray import _Function
27
+ else:
28
+ from ._uarray import * # noqa: F403
29
+ from ._uarray import _Function # noqa: F401
30
+
31
+ del _has_uarray
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/cluster/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =========================================
3
+ Clustering package (:mod:`scipy.cluster`)
4
+ =========================================
5
+
6
+ .. currentmodule:: scipy.cluster
7
+
8
+ Clustering algorithms are useful in information theory, target detection,
9
+ communications, compression, and other areas. The `vq` module only
10
+ supports vector quantization and the k-means algorithms.
11
+
12
+ The `hierarchy` module provides functions for hierarchical and
13
+ agglomerative clustering. Its features include generating hierarchical
14
+ clusters from distance matrices,
15
+ calculating statistics on clusters, cutting linkages
16
+ to generate flat clusters, and visualizing clusters with dendrograms.
17
+
18
+ .. toctree::
19
+ :maxdepth: 1
20
+
21
+ cluster.vq
22
+ cluster.hierarchy
23
+
24
+ """
25
+ __all__ = ['vq', 'hierarchy']
26
+
27
+ from . import vq, hierarchy
28
+
29
+ from scipy._lib._testutils import PytestTester
30
+ test = PytestTester(__name__)
31
+ del PytestTester
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/cluster/hierarchy.py ADDED
The diff for this file is too large to render. See raw diff
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/cluster/vq.py ADDED
@@ -0,0 +1,832 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
3
+ ====================================================================
4
+
5
+ Provides routines for k-means clustering, generating code books
6
+ from k-means models and quantizing vectors by comparing them with
7
+ centroids in a code book.
8
+
9
+ .. autosummary::
10
+ :toctree: generated/
11
+
12
+ whiten -- Normalize a group of observations so each feature has unit variance
13
+ vq -- Calculate code book membership of a set of observation vectors
14
+ kmeans -- Perform k-means on a set of observation vectors forming k clusters
15
+ kmeans2 -- A different implementation of k-means with more methods
16
+ -- for initializing centroids
17
+
18
+ Background information
19
+ ----------------------
20
+ The k-means algorithm takes as input the number of clusters to
21
+ generate, k, and a set of observation vectors to cluster. It
22
+ returns a set of centroids, one for each of the k clusters. An
23
+ observation vector is classified with the cluster number or
24
+ centroid index of the centroid closest to it.
25
+
26
+ A vector v belongs to cluster i if it is closer to centroid i than
27
+ any other centroid. If v belongs to i, we say centroid i is the
28
+ dominating centroid of v. The k-means algorithm tries to
29
+ minimize distortion, which is defined as the sum of the squared distances
30
+ between each observation vector and its dominating centroid.
31
+ The minimization is achieved by iteratively reclassifying
32
+ the observations into clusters and recalculating the centroids until
33
+ a configuration is reached in which the centroids are stable. One can
34
+ also define a maximum number of iterations.
35
+
36
+ Since vector quantization is a natural application for k-means,
37
+ information theory terminology is often used. The centroid index
38
+ or cluster index is also referred to as a "code" and the table
39
+ mapping codes to centroids and, vice versa, is often referred to as a
40
+ "code book". The result of k-means, a set of centroids, can be
41
+ used to quantize vectors. Quantization aims to find an encoding of
42
+ vectors that reduces the expected distortion.
43
+
44
+ All routines expect obs to be an M by N array, where the rows are
45
+ the observation vectors. The codebook is a k by N array, where the
46
+ ith row is the centroid of code word i. The observation vectors
47
+ and centroids have the same feature dimension.
48
+
49
+ As an example, suppose we wish to compress a 24-bit color image
50
+ (each pixel is represented by one byte for red, one for blue, and
51
+ one for green) before sending it over the web. By using a smaller
52
+ 8-bit encoding, we can reduce the amount of data by two
53
+ thirds. Ideally, the colors for each of the 256 possible 8-bit
54
+ encoding values should be chosen to minimize distortion of the
55
+ color. Running k-means with k=256 generates a code book of 256
56
+ codes, which fills up all possible 8-bit sequences. Instead of
57
+ sending a 3-byte value for each pixel, the 8-bit centroid index
58
+ (or code word) of the dominating centroid is transmitted. The code
59
+ book is also sent over the wire so each 8-bit code can be
60
+ translated back to a 24-bit pixel value representation. If the
61
+ image of interest was of an ocean, we would expect many 24-bit
62
+ blues to be represented by 8-bit codes. If it was an image of a
63
+ human face, more flesh-tone colors would be represented in the
64
+ code book.
65
+
66
+ """
67
+ import warnings
68
+ import numpy as np
69
+ from collections import deque
70
+ from scipy._lib._array_api import (_asarray, array_namespace, is_lazy_array,
71
+ xp_capabilities, xp_copy, xp_size)
72
+ from scipy._lib._util import (check_random_state, rng_integers,
73
+ _transition_to_rng)
74
+ from scipy._lib import array_api_extra as xpx
75
+ from scipy.spatial.distance import cdist
76
+
77
+ from . import _vq
78
+
79
+ __docformat__ = 'restructuredtext'
80
+
81
+ __all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
82
+
83
+
84
+ class ClusterError(Exception):
85
+ pass
86
+
87
+
88
+ @xp_capabilities()
89
+ def whiten(obs, check_finite=None):
90
+ """
91
+ Normalize a group of observations on a per feature basis.
92
+
93
+ Before running k-means, it is beneficial to rescale each feature
94
+ dimension of the observation set by its standard deviation (i.e. "whiten"
95
+ it - as in "white noise" where each frequency has equal power).
96
+ Each feature is divided by its standard deviation across all observations
97
+ to give it unit variance.
98
+
99
+ Parameters
100
+ ----------
101
+ obs : ndarray
102
+ Each row of the array is an observation. The
103
+ columns are the features seen during each observation::
104
+
105
+ # f0 f1 f2
106
+ obs = [[ 1., 1., 1.], #o0
107
+ [ 2., 2., 2.], #o1
108
+ [ 3., 3., 3.], #o2
109
+ [ 4., 4., 4.]] #o3
110
+
111
+ check_finite : bool, optional
112
+ Whether to check that the input matrices contain only finite numbers.
113
+ Disabling may give a performance gain, but may result in problems
114
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
115
+ Default: True for eager backends and False for lazy ones.
116
+
117
+ Returns
118
+ -------
119
+ result : ndarray
120
+ Contains the values in `obs` scaled by the standard deviation
121
+ of each column.
122
+
123
+ Examples
124
+ --------
125
+ >>> import numpy as np
126
+ >>> from scipy.cluster.vq import whiten
127
+ >>> features = np.array([[1.9, 2.3, 1.7],
128
+ ... [1.5, 2.5, 2.2],
129
+ ... [0.8, 0.6, 1.7,]])
130
+ >>> whiten(features)
131
+ array([[ 4.17944278, 2.69811351, 7.21248917],
132
+ [ 3.29956009, 2.93273208, 9.33380951],
133
+ [ 1.75976538, 0.7038557 , 7.21248917]])
134
+
135
+ """
136
+ xp = array_namespace(obs)
137
+ if check_finite is None:
138
+ check_finite = not is_lazy_array(obs)
139
+ obs = _asarray(obs, check_finite=check_finite, xp=xp)
140
+ std_dev = xp.std(obs, axis=0)
141
+ zero_std_mask = std_dev == 0
142
+ std_dev = xpx.at(std_dev, zero_std_mask).set(1.0)
143
+ if check_finite and xp.any(zero_std_mask):
144
+ warnings.warn("Some columns have standard deviation zero. "
145
+ "The values of these columns will not change.",
146
+ RuntimeWarning, stacklevel=2)
147
+ return obs / std_dev
148
+
149
+
150
+ @xp_capabilities(cpu_only=True, reason="uses spatial.distance.cdist",
151
+ jax_jit=False, allow_dask_compute=True)
152
+ def vq(obs, code_book, check_finite=True):
153
+ """
154
+ Assign codes from a code book to observations.
155
+
156
+ Assigns a code from a code book to each observation. Each
157
+ observation vector in the 'M' by 'N' `obs` array is compared with the
158
+ centroids in the code book and assigned the code of the closest
159
+ centroid.
160
+
161
+ The features in `obs` should have unit variance, which can be
162
+ achieved by passing them through the whiten function. The code
163
+ book can be created with the k-means algorithm or a different
164
+ encoding algorithm.
165
+
166
+ Parameters
167
+ ----------
168
+ obs : ndarray
169
+ Each row of the 'M' x 'N' array is an observation. The columns are
170
+ the "features" seen during each observation. The features must be
171
+ whitened first using the whiten function or something equivalent.
172
+ code_book : ndarray
173
+ The code book is usually generated using the k-means algorithm.
174
+ Each row of the array holds a different code, and the columns are
175
+ the features of the code::
176
+
177
+ # f0 f1 f2 f3
178
+ code_book = [[ 1., 2., 3., 4.], #c0
179
+ [ 1., 2., 3., 4.], #c1
180
+ [ 1., 2., 3., 4.]] #c2
181
+
182
+ check_finite : bool, optional
183
+ Whether to check that the input matrices contain only finite numbers.
184
+ Disabling may give a performance gain, but may result in problems
185
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
186
+ Default: True
187
+
188
+ Returns
189
+ -------
190
+ code : ndarray
191
+ A length M array holding the code book index for each observation.
192
+ dist : ndarray
193
+ The distortion (distance) between the observation and its nearest
194
+ code.
195
+
196
+ Examples
197
+ --------
198
+ >>> import numpy as np
199
+ >>> from scipy.cluster.vq import vq
200
+ >>> code_book = np.array([[1., 1., 1.],
201
+ ... [2., 2., 2.]])
202
+ >>> features = np.array([[1.9, 2.3, 1.7],
203
+ ... [1.5, 2.5, 2.2],
204
+ ... [0.8, 0.6, 1.7]])
205
+ >>> vq(features, code_book)
206
+ (array([1, 1, 0], dtype=int32), array([0.43588989, 0.73484692, 0.83066239]))
207
+
208
+ """
209
+ xp = array_namespace(obs, code_book)
210
+ obs = _asarray(obs, xp=xp, check_finite=check_finite)
211
+ code_book = _asarray(code_book, xp=xp, check_finite=check_finite)
212
+ ct = xp.result_type(obs, code_book)
213
+
214
+ if xp.isdtype(ct, kind='real floating'):
215
+ c_obs = xp.astype(obs, ct, copy=False)
216
+ c_code_book = xp.astype(code_book, ct, copy=False)
217
+ c_obs = np.asarray(c_obs)
218
+ c_code_book = np.asarray(c_code_book)
219
+ result = _vq.vq(c_obs, c_code_book)
220
+ return xp.asarray(result[0]), xp.asarray(result[1])
221
+ return py_vq(obs, code_book, check_finite=False)
222
+
223
+
224
+ def py_vq(obs, code_book, check_finite=True):
225
+ """ Python version of vq algorithm.
226
+
227
+ The algorithm computes the Euclidean distance between each
228
+ observation and every frame in the code_book.
229
+
230
+ Parameters
231
+ ----------
232
+ obs : ndarray
233
+ Expects a rank 2 array. Each row is one observation.
234
+ code_book : ndarray
235
+ Code book to use. Same format than obs. Should have same number of
236
+ features (e.g., columns) than obs.
237
+ check_finite : bool, optional
238
+ Whether to check that the input matrices contain only finite numbers.
239
+ Disabling may give a performance gain, but may result in problems
240
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
241
+ Default: True
242
+
243
+ Returns
244
+ -------
245
+ code : ndarray
246
+ code[i] gives the label of the ith obversation; its code is
247
+ code_book[code[i]].
248
+ mind_dist : ndarray
249
+ min_dist[i] gives the distance between the ith observation and its
250
+ corresponding code.
251
+
252
+ Notes
253
+ -----
254
+ This function is slower than the C version but works for
255
+ all input types. If the inputs have the wrong types for the
256
+ C versions of the function, this one is called as a last resort.
257
+
258
+ It is about 20 times slower than the C version.
259
+
260
+ """
261
+ xp = array_namespace(obs, code_book)
262
+ obs = _asarray(obs, xp=xp, check_finite=check_finite)
263
+ code_book = _asarray(code_book, xp=xp, check_finite=check_finite)
264
+
265
+ if obs.ndim != code_book.ndim:
266
+ raise ValueError("Observation and code_book should have the same rank")
267
+
268
+ if obs.ndim == 1:
269
+ obs = obs[:, xp.newaxis]
270
+ code_book = code_book[:, xp.newaxis]
271
+
272
+ # Once `cdist` has array API support, this `xp.asarray` call can be removed
273
+ dist = xp.asarray(cdist(obs, code_book))
274
+ code = xp.argmin(dist, axis=1)
275
+ min_dist = xp.min(dist, axis=1)
276
+ return code, min_dist
277
+
278
+
279
+ def _kmeans(obs, guess, thresh=1e-5, xp=None):
280
+ """ "raw" version of k-means.
281
+
282
+ Returns
283
+ -------
284
+ code_book
285
+ The lowest distortion codebook found.
286
+ avg_dist
287
+ The average distance a observation is from a code in the book.
288
+ Lower means the code_book matches the data better.
289
+
290
+ See Also
291
+ --------
292
+ kmeans : wrapper around k-means
293
+
294
+ Examples
295
+ --------
296
+ Note: not whitened in this example.
297
+
298
+ >>> import numpy as np
299
+ >>> from scipy.cluster.vq import _kmeans
300
+ >>> features = np.array([[ 1.9,2.3],
301
+ ... [ 1.5,2.5],
302
+ ... [ 0.8,0.6],
303
+ ... [ 0.4,1.8],
304
+ ... [ 1.0,1.0]])
305
+ >>> book = np.array((features[0],features[2]))
306
+ >>> _kmeans(features,book)
307
+ (array([[ 1.7 , 2.4 ],
308
+ [ 0.73333333, 1.13333333]]), 0.40563916697728591)
309
+
310
+ """
311
+ xp = np if xp is None else xp
312
+ code_book = guess
313
+ diff = xp.inf
314
+ prev_avg_dists = deque([diff], maxlen=2)
315
+
316
+ np_obs = np.asarray(obs)
317
+ while diff > thresh:
318
+ # compute membership and distances between obs and code_book
319
+ obs_code, distort = vq(obs, code_book, check_finite=False)
320
+ prev_avg_dists.append(xp.mean(distort, axis=-1))
321
+ # recalc code_book as centroids of associated obs
322
+ obs_code = np.asarray(obs_code)
323
+ code_book, has_members = _vq.update_cluster_means(np_obs, obs_code,
324
+ code_book.shape[0])
325
+ code_book = code_book[has_members]
326
+ code_book = xp.asarray(code_book)
327
+ diff = xp.abs(prev_avg_dists[0] - prev_avg_dists[1])
328
+
329
+ return code_book, prev_avg_dists[1]
330
+
331
+
332
+ @xp_capabilities(cpu_only=True, jax_jit=False, allow_dask_compute=True)
333
+ @_transition_to_rng("seed")
334
+ def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True,
335
+ *, rng=None):
336
+ """
337
+ Performs k-means on a set of observation vectors forming k clusters.
338
+
339
+ The k-means algorithm adjusts the classification of the observations
340
+ into clusters and updates the cluster centroids until the position of
341
+ the centroids is stable over successive iterations. In this
342
+ implementation of the algorithm, the stability of the centroids is
343
+ determined by comparing the absolute value of the change in the average
344
+ Euclidean distance between the observations and their corresponding
345
+ centroids against a threshold. This yields
346
+ a code book mapping centroids to codes and vice versa.
347
+
348
+ Parameters
349
+ ----------
350
+ obs : ndarray
351
+ Each row of the M by N array is an observation vector. The
352
+ columns are the features seen during each observation.
353
+ The features must be whitened first with the `whiten` function.
354
+
355
+ k_or_guess : int or ndarray
356
+ The number of centroids to generate. A code is assigned to
357
+ each centroid, which is also the row index of the centroid
358
+ in the code_book matrix generated.
359
+
360
+ The initial k centroids are chosen by randomly selecting
361
+ observations from the observation matrix. Alternatively,
362
+ passing a k by N array specifies the initial k centroids.
363
+
364
+ iter : int, optional
365
+ The number of times to run k-means, returning the codebook
366
+ with the lowest distortion. This argument is ignored if
367
+ initial centroids are specified with an array for the
368
+ ``k_or_guess`` parameter. This parameter does not represent the
369
+ number of iterations of the k-means algorithm.
370
+
371
+ thresh : float, optional
372
+ Terminates the k-means algorithm if the change in
373
+ distortion since the last k-means iteration is less than
374
+ or equal to threshold.
375
+
376
+ check_finite : bool, optional
377
+ Whether to check that the input matrices contain only finite numbers.
378
+ Disabling may give a performance gain, but may result in problems
379
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
380
+ Default: True
381
+ rng : `numpy.random.Generator`, optional
382
+ Pseudorandom number generator state. When `rng` is None, a new
383
+ `numpy.random.Generator` is created using entropy from the
384
+ operating system. Types other than `numpy.random.Generator` are
385
+ passed to `numpy.random.default_rng` to instantiate a ``Generator``.
386
+
387
+ Returns
388
+ -------
389
+ codebook : ndarray
390
+ A k by N array of k centroids. The ith centroid
391
+ codebook[i] is represented with the code i. The centroids
392
+ and codes generated represent the lowest distortion seen,
393
+ not necessarily the globally minimal distortion.
394
+ Note that the number of centroids is not necessarily the same as the
395
+ ``k_or_guess`` parameter, because centroids assigned to no observations
396
+ are removed during iterations.
397
+
398
+ distortion : float
399
+ The mean (non-squared) Euclidean distance between the observations
400
+ passed and the centroids generated. Note the difference to the standard
401
+ definition of distortion in the context of the k-means algorithm, which
402
+ is the sum of the squared distances.
403
+
404
+ See Also
405
+ --------
406
+ kmeans2 : a different implementation of k-means clustering
407
+ with more methods for generating initial centroids but without
408
+ using a distortion change threshold as a stopping criterion.
409
+
410
+ whiten : must be called prior to passing an observation matrix
411
+ to kmeans.
412
+
413
+ Notes
414
+ -----
415
+ For more functionalities or optimal performance, you can use
416
+ `sklearn.cluster.KMeans <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_.
417
+ `This <https://hdbscan.readthedocs.io/en/latest/performance_and_scalability.html#comparison-of-high-performance-implementations>`_
418
+ is a benchmark result of several implementations.
419
+
420
+ Examples
421
+ --------
422
+ >>> import numpy as np
423
+ >>> from scipy.cluster.vq import vq, kmeans, whiten
424
+ >>> import matplotlib.pyplot as plt
425
+ >>> features = np.array([[ 1.9,2.3],
426
+ ... [ 1.5,2.5],
427
+ ... [ 0.8,0.6],
428
+ ... [ 0.4,1.8],
429
+ ... [ 0.1,0.1],
430
+ ... [ 0.2,1.8],
431
+ ... [ 2.0,0.5],
432
+ ... [ 0.3,1.5],
433
+ ... [ 1.0,1.0]])
434
+ >>> whitened = whiten(features)
435
+ >>> book = np.array((whitened[0],whitened[2]))
436
+ >>> kmeans(whitened,book)
437
+ (array([[ 2.3110306 , 2.86287398], # random
438
+ [ 0.93218041, 1.24398691]]), 0.85684700941625547)
439
+
440
+ >>> codes = 3
441
+ >>> kmeans(whitened,codes)
442
+ (array([[ 2.3110306 , 2.86287398], # random
443
+ [ 1.32544402, 0.65607529],
444
+ [ 0.40782893, 2.02786907]]), 0.5196582527686241)
445
+
446
+ >>> # Create 50 datapoints in two clusters a and b
447
+ >>> pts = 50
448
+ >>> rng = np.random.default_rng()
449
+ >>> a = rng.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
450
+ >>> b = rng.multivariate_normal([30, 10],
451
+ ... [[10, 2], [2, 1]],
452
+ ... size=pts)
453
+ >>> features = np.concatenate((a, b))
454
+ >>> # Whiten data
455
+ >>> whitened = whiten(features)
456
+ >>> # Find 2 clusters in the data
457
+ >>> codebook, distortion = kmeans(whitened, 2)
458
+ >>> # Plot whitened data and cluster centers in red
459
+ >>> plt.scatter(whitened[:, 0], whitened[:, 1])
460
+ >>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
461
+ >>> plt.show()
462
+
463
+ """
464
+ if isinstance(k_or_guess, int):
465
+ xp = array_namespace(obs)
466
+ else:
467
+ xp = array_namespace(obs, k_or_guess)
468
+ obs = _asarray(obs, xp=xp, check_finite=check_finite)
469
+ guess = _asarray(k_or_guess, xp=xp, check_finite=check_finite)
470
+ if iter < 1:
471
+ raise ValueError(f"iter must be at least 1, got {iter}")
472
+
473
+ # Determine whether a count (scalar) or an initial guess (array) was passed.
474
+ if xp_size(guess) != 1:
475
+ if xp_size(guess) < 1:
476
+ raise ValueError(f"Asked for 0 clusters. Initial book was {guess}")
477
+ return _kmeans(obs, guess, thresh=thresh, xp=xp)
478
+
479
+ # k_or_guess is a scalar, now verify that it's an integer
480
+ k = int(guess)
481
+ if k != guess:
482
+ raise ValueError("If k_or_guess is a scalar, it must be an integer.")
483
+ if k < 1:
484
+ raise ValueError(f"Asked for {k} clusters.")
485
+
486
+ rng = check_random_state(rng)
487
+
488
+ # initialize best distance value to a large value
489
+ best_dist = xp.inf
490
+ for i in range(iter):
491
+ # the initial code book is randomly selected from observations
492
+ guess = _kpoints(obs, k, rng, xp)
493
+ book, dist = _kmeans(obs, guess, thresh=thresh, xp=xp)
494
+ if dist < best_dist:
495
+ best_book = book
496
+ best_dist = dist
497
+ return best_book, best_dist
498
+
499
+
500
+ def _kpoints(data, k, rng, xp):
501
+ """Pick k points at random in data (one row = one observation).
502
+
503
+ Parameters
504
+ ----------
505
+ data : ndarray
506
+ Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
507
+ dimensional data, rank 2 multidimensional data, in which case one
508
+ row is one observation.
509
+ k : int
510
+ Number of samples to generate.
511
+ rng : `numpy.random.Generator` or `numpy.random.RandomState`
512
+ Random number generator.
513
+
514
+ Returns
515
+ -------
516
+ x : ndarray
517
+ A 'k' by 'N' containing the initial centroids
518
+
519
+ """
520
+ idx = rng.choice(data.shape[0], size=int(k), replace=False)
521
+ # convert to array with default integer dtype (avoids numpy#25607)
522
+ idx = xp.asarray(idx, dtype=xp.asarray([1]).dtype)
523
+ return xp.take(data, idx, axis=0)
524
+
525
+
526
+ def _krandinit(data, k, rng, xp):
527
+ """Returns k samples of a random variable whose parameters depend on data.
528
+
529
+ More precisely, it returns k observations sampled from a Gaussian random
530
+ variable whose mean and covariances are the ones estimated from the data.
531
+
532
+ Parameters
533
+ ----------
534
+ data : ndarray
535
+ Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
536
+ data, rank 2 multidimensional data, in which case one
537
+ row is one observation.
538
+ k : int
539
+ Number of samples to generate.
540
+ rng : `numpy.random.Generator` or `numpy.random.RandomState`
541
+ Random number generator.
542
+
543
+ Returns
544
+ -------
545
+ x : ndarray
546
+ A 'k' by 'N' containing the initial centroids
547
+
548
+ """
549
+ mu = xp.mean(data, axis=0)
550
+ k = np.asarray(k)
551
+
552
+ if data.ndim == 1:
553
+ _cov = xpx.cov(data, xp=xp)
554
+ x = rng.standard_normal(size=k)
555
+ x = xp.asarray(x)
556
+ x *= xp.sqrt(_cov)
557
+ elif data.shape[1] > data.shape[0]:
558
+ # initialize when the covariance matrix is rank deficient
559
+ _, s, vh = xp.linalg.svd(data - mu, full_matrices=False)
560
+ x = rng.standard_normal(size=(k, xp_size(s)))
561
+ x = xp.asarray(x)
562
+ sVh = s[:, None] * vh / xp.sqrt(data.shape[0] - xp.asarray(1.))
563
+ x = x @ sVh
564
+ else:
565
+ _cov = xpx.atleast_nd(xpx.cov(data.T, xp=xp), ndim=2, xp=xp)
566
+
567
+ # k rows, d cols (one row = one obs)
568
+ # Generate k sample of a random variable ~ Gaussian(mu, cov)
569
+ x = rng.standard_normal(size=(k, xp_size(mu)))
570
+ x = xp.asarray(x)
571
+ x = x @ xp.linalg.cholesky(_cov).T
572
+
573
+ x += mu
574
+ return x
575
+
576
+
577
+ def _kpp(data, k, rng, xp):
578
+ """ Picks k points in the data based on the kmeans++ method.
579
+
580
+ Parameters
581
+ ----------
582
+ data : ndarray
583
+ Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
584
+ data, rank 2 multidimensional data, in which case one
585
+ row is one observation.
586
+ k : int
587
+ Number of samples to generate.
588
+ rng : `numpy.random.Generator` or `numpy.random.RandomState`
589
+ Random number generator.
590
+
591
+ Returns
592
+ -------
593
+ init : ndarray
594
+ A 'k' by 'N' containing the initial centroids.
595
+
596
+ References
597
+ ----------
598
+ .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
599
+ careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
600
+ on Discrete Algorithms, 2007.
601
+ """
602
+
603
+ ndim = len(data.shape)
604
+ if ndim == 1:
605
+ data = data[:, None]
606
+
607
+ dims = data.shape[1]
608
+
609
+ init = xp.empty((int(k), dims))
610
+
611
+ for i in range(k):
612
+ if i == 0:
613
+ data_idx = rng_integers(rng, data.shape[0])
614
+ else:
615
+ D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0)
616
+ probs = D2/D2.sum()
617
+ cumprobs = probs.cumsum()
618
+ r = rng.uniform()
619
+ cumprobs = np.asarray(cumprobs)
620
+ data_idx = int(np.searchsorted(cumprobs, r))
621
+
622
+ init = xpx.at(init)[i, :].set(data[data_idx, :])
623
+
624
+ if ndim == 1:
625
+ init = init[:, 0]
626
+ return init
627
+
628
+
629
+ _valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp}
630
+
631
+
632
+ def _missing_warn():
633
+ """Print a warning when called."""
634
+ warnings.warn("One of the clusters is empty. "
635
+ "Re-run kmeans with a different initialization.",
636
+ stacklevel=3)
637
+
638
+
639
+ def _missing_raise():
640
+ """Raise a ClusterError when called."""
641
+ raise ClusterError("One of the clusters is empty. "
642
+ "Re-run kmeans with a different initialization.")
643
+
644
+
645
+ _valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
646
+
647
+
648
+ @xp_capabilities(cpu_only=True, jax_jit=False, allow_dask_compute=True)
649
+ @_transition_to_rng("seed")
650
+ def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
651
+ missing='warn', check_finite=True, *, rng=None):
652
+ """
653
+ Classify a set of observations into k clusters using the k-means algorithm.
654
+
655
+ The algorithm attempts to minimize the Euclidean distance between
656
+ observations and centroids. Several initialization methods are
657
+ included.
658
+
659
+ Parameters
660
+ ----------
661
+ data : ndarray
662
+ A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
663
+ 'M' array of 'M' 1-D observations.
664
+ k : int or ndarray
665
+ The number of clusters to form as well as the number of
666
+ centroids to generate. If `minit` initialization string is
667
+ 'matrix', or if a ndarray is given instead, it is
668
+ interpreted as initial cluster to use instead.
669
+ iter : int, optional
670
+ Number of iterations of the k-means algorithm to run. Note
671
+ that this differs in meaning from the iters parameter to
672
+ the kmeans function.
673
+ thresh : float, optional
674
+ (not used yet)
675
+ minit : str, optional
676
+ Method for initialization. Available methods are 'random',
677
+ 'points', '++' and 'matrix':
678
+
679
+ 'random': generate k centroids from a Gaussian with mean and
680
+ variance estimated from the data.
681
+
682
+ 'points': choose k observations (rows) at random from data for
683
+ the initial centroids.
684
+
685
+ '++': choose k observations accordingly to the kmeans++ method
686
+ (careful seeding)
687
+
688
+ 'matrix': interpret the k parameter as a k by M (or length k
689
+ array for 1-D data) array of initial centroids.
690
+ missing : str, optional
691
+ Method to deal with empty clusters. Available methods are
692
+ 'warn' and 'raise':
693
+
694
+ 'warn': give a warning and continue.
695
+
696
+ 'raise': raise an ClusterError and terminate the algorithm.
697
+ check_finite : bool, optional
698
+ Whether to check that the input matrices contain only finite numbers.
699
+ Disabling may give a performance gain, but may result in problems
700
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
701
+ Default: True
702
+ rng : `numpy.random.Generator`, optional
703
+ Pseudorandom number generator state. When `rng` is None, a new
704
+ `numpy.random.Generator` is created using entropy from the
705
+ operating system. Types other than `numpy.random.Generator` are
706
+ passed to `numpy.random.default_rng` to instantiate a ``Generator``.
707
+
708
+ Returns
709
+ -------
710
+ centroid : ndarray
711
+ A 'k' by 'N' array of centroids found at the last iteration of
712
+ k-means.
713
+ label : ndarray
714
+ label[i] is the code or index of the centroid the
715
+ ith observation is closest to.
716
+
717
+ See Also
718
+ --------
719
+ kmeans
720
+
721
+ References
722
+ ----------
723
+ .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
724
+ careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
725
+ on Discrete Algorithms, 2007.
726
+
727
+ Examples
728
+ --------
729
+ >>> from scipy.cluster.vq import kmeans2
730
+ >>> import matplotlib.pyplot as plt
731
+ >>> import numpy as np
732
+
733
+ Create z, an array with shape (100, 2) containing a mixture of samples
734
+ from three multivariate normal distributions.
735
+
736
+ >>> rng = np.random.default_rng()
737
+ >>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45)
738
+ >>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30)
739
+ >>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25)
740
+ >>> z = np.concatenate((a, b, c))
741
+ >>> rng.shuffle(z)
742
+
743
+ Compute three clusters.
744
+
745
+ >>> centroid, label = kmeans2(z, 3, minit='points')
746
+ >>> centroid
747
+ array([[ 2.22274463, -0.61666946], # may vary
748
+ [ 0.54069047, 5.86541444],
749
+ [ 6.73846769, 4.01991898]])
750
+
751
+ How many points are in each cluster?
752
+
753
+ >>> counts = np.bincount(label)
754
+ >>> counts
755
+ array([29, 51, 20]) # may vary
756
+
757
+ Plot the clusters.
758
+
759
+ >>> w0 = z[label == 0]
760
+ >>> w1 = z[label == 1]
761
+ >>> w2 = z[label == 2]
762
+ >>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0')
763
+ >>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1')
764
+ >>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2')
765
+ >>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids')
766
+ >>> plt.axis('equal')
767
+ >>> plt.legend(shadow=True)
768
+ >>> plt.show()
769
+
770
+ """
771
+ if int(iter) < 1:
772
+ raise ValueError(f"Invalid iter ({iter}), must be a positive integer.")
773
+ try:
774
+ miss_meth = _valid_miss_meth[missing]
775
+ except KeyError as e:
776
+ raise ValueError(f"Unknown missing method {missing!r}") from e
777
+
778
+ if isinstance(k, int):
779
+ xp = array_namespace(data)
780
+ else:
781
+ xp = array_namespace(data, k)
782
+ data = _asarray(data, xp=xp, check_finite=check_finite)
783
+ code_book = xp_copy(k, xp=xp)
784
+ if data.ndim == 1:
785
+ d = 1
786
+ elif data.ndim == 2:
787
+ d = data.shape[1]
788
+ else:
789
+ raise ValueError("Input of rank > 2 is not supported.")
790
+
791
+ if xp_size(data) < 1 or xp_size(code_book) < 1:
792
+ raise ValueError("Empty input is not supported.")
793
+
794
+ # If k is not a single value, it should be compatible with data's shape
795
+ if minit == 'matrix' or xp_size(code_book) > 1:
796
+ if data.ndim != code_book.ndim:
797
+ raise ValueError("k array doesn't match data rank")
798
+ nc = code_book.shape[0]
799
+ if data.ndim > 1 and code_book.shape[1] != d:
800
+ raise ValueError("k array doesn't match data dimension")
801
+ else:
802
+ nc = int(code_book)
803
+
804
+ if nc < 1:
805
+ raise ValueError(
806
+ f"Cannot ask kmeans2 for {nc} clusters (k was {code_book})"
807
+ )
808
+ elif nc != code_book:
809
+ warnings.warn("k was not an integer, was converted.", stacklevel=2)
810
+
811
+ try:
812
+ init_meth = _valid_init_meth[minit]
813
+ except KeyError as e:
814
+ raise ValueError(f"Unknown init method {minit!r}") from e
815
+ else:
816
+ rng = check_random_state(rng)
817
+ code_book = init_meth(data, code_book, rng, xp)
818
+
819
+ data = np.asarray(data)
820
+ code_book = np.asarray(code_book)
821
+ for _ in range(iter):
822
+ # Compute the nearest neighbor for each obs using the current code book
823
+ label = vq(data, code_book, check_finite=check_finite)[0]
824
+ # Update the code book by computing centroids
825
+ new_code_book, has_members = _vq.update_cluster_means(data, label, nc)
826
+ if not has_members.all():
827
+ miss_meth()
828
+ # Set the empty clusters to their previous positions
829
+ new_code_book[~has_members] = code_book[~has_members]
830
+ code_book = new_code_book
831
+
832
+ return xp.asarray(code_book), xp.asarray(label)
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/constants/__init__.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ ==================================
3
+ Constants (:mod:`scipy.constants`)
4
+ ==================================
5
+
6
+ .. currentmodule:: scipy.constants
7
+
8
+ Physical and mathematical constants and units.
9
+
10
+
11
+ Mathematical constants
12
+ ======================
13
+
14
+ ================ =================================================================
15
+ ``pi`` Pi
16
+ ``golden`` Golden ratio
17
+ ``golden_ratio`` Golden ratio
18
+ ================ =================================================================
19
+
20
+
21
+ Physical constants
22
+ ==================
23
+ The following physical constants are available as attributes of `scipy.constants`.
24
+ All units are `SI <https://en.wikipedia.org/wiki/International_System_of_Units>`_.
25
+
26
+ =========================== ================================================================ ===============
27
+ Attribute Quantity Units
28
+ =========================== ================================================================ ===============
29
+ ``c`` speed of light in vacuum m s^-1
30
+ ``speed_of_light`` speed of light in vacuum m s^-1
31
+ ``mu_0`` the magnetic constant :math:`\mu_0` N A^-2
32
+ ``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0` F m^-1
33
+ ``h`` the Planck constant :math:`h` J Hz^-1
34
+ ``Planck`` the Planck constant :math:`h` J Hz^-1
35
+ ``hbar`` the reduced Planck constant, :math:`\hbar = h/(2\pi)` J s
36
+ ``G`` Newtonian constant of gravitation m^3 kg^-1 s^-2
37
+ ``gravitational_constant`` Newtonian constant of gravitation m^3 kg^-1 s^-2
38
+ ``g`` standard acceleration of gravity m s^-2
39
+ ``e`` elementary charge C
40
+ ``elementary_charge`` elementary charge C
41
+ ``R`` molar gas constant J mol^-1 K^-1
42
+ ``gas_constant`` molar gas constant J mol^-1 K^-1
43
+ ``alpha`` fine-structure constant (unitless)
44
+ ``fine_structure`` fine-structure constant (unitless)
45
+ ``N_A`` Avogadro constant mol^-1
46
+ ``Avogadro`` Avogadro constant mol^-1
47
+ ``k`` Boltzmann constant J K^-1
48
+ ``Boltzmann`` Boltzmann constant J K^-1
49
+ ``sigma`` Stefan-Boltzmann constant :math:`\sigma` W m^-2 K^-4
50
+ ``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma` W m^-2 K^-4
51
+ ``Wien`` Wien wavelength displacement law constant m K
52
+ ``Rydberg`` Rydberg constant m^-1
53
+ ``m_e`` electron mass kg
54
+ ``electron_mass`` electron mass kg
55
+ ``m_p`` proton mass kg
56
+ ``proton_mass`` proton mass kg
57
+ ``m_n`` neutron mass kg
58
+ ``neutron_mass`` neutron mass kg
59
+ =========================== ================================================================ ===============
60
+
61
+
62
+ Constants database
63
+ ------------------
64
+
65
+ In addition to the above variables, :mod:`scipy.constants` also contains the
66
+ 2022 CODATA recommended values [CODATA2022]_ database containing more physical
67
+ constants.
68
+
69
+ .. autosummary::
70
+ :toctree: generated/
71
+
72
+ value -- Value in physical_constants indexed by key
73
+ unit -- Unit in physical_constants indexed by key
74
+ precision -- Relative precision in physical_constants indexed by key
75
+ find -- Return list of physical_constant keys with a given string
76
+ ConstantWarning -- Constant sought not in newest CODATA data set
77
+
78
+ .. data:: physical_constants
79
+
80
+ Dictionary of physical constants, of the format
81
+ ``physical_constants[name] = (value, unit, uncertainty)``.
82
+ The CODATA database uses ellipses to indicate that a value is defined
83
+ (exactly) in terms of others but cannot be represented exactly with the
84
+ allocated number of digits. In these cases, SciPy calculates the derived
85
+ value and reports it to the full precision of a Python ``float``. Although
86
+ ``physical_constants`` lists the uncertainty as ``0.0`` to indicate that
87
+ the CODATA value is exact, the value in ``physical_constants`` is still
88
+ subject to the truncation error inherent in double-precision representation.
89
+
90
+ Available constants:
91
+
92
+ ====================================================================== ====
93
+ %(constant_names)s
94
+ ====================================================================== ====
95
+
96
+
97
+ Units
98
+ =====
99
+
100
+ SI prefixes
101
+ -----------
102
+
103
+ ============ =================================================================
104
+ ``quetta`` :math:`10^{30}`
105
+ ``ronna`` :math:`10^{27}`
106
+ ``yotta`` :math:`10^{24}`
107
+ ``zetta`` :math:`10^{21}`
108
+ ``exa`` :math:`10^{18}`
109
+ ``peta`` :math:`10^{15}`
110
+ ``tera`` :math:`10^{12}`
111
+ ``giga`` :math:`10^{9}`
112
+ ``mega`` :math:`10^{6}`
113
+ ``kilo`` :math:`10^{3}`
114
+ ``hecto`` :math:`10^{2}`
115
+ ``deka`` :math:`10^{1}`
116
+ ``deci`` :math:`10^{-1}`
117
+ ``centi`` :math:`10^{-2}`
118
+ ``milli`` :math:`10^{-3}`
119
+ ``micro`` :math:`10^{-6}`
120
+ ``nano`` :math:`10^{-9}`
121
+ ``pico`` :math:`10^{-12}`
122
+ ``femto`` :math:`10^{-15}`
123
+ ``atto`` :math:`10^{-18}`
124
+ ``zepto`` :math:`10^{-21}`
125
+ ``yocto`` :math:`10^{-24}`
126
+ ``ronto`` :math:`10^{-27}`
127
+ ``quecto`` :math:`10^{-30}`
128
+ ============ =================================================================
129
+
130
+ Binary prefixes
131
+ ---------------
132
+
133
+ ============ =================================================================
134
+ ``kibi`` :math:`2^{10}`
135
+ ``mebi`` :math:`2^{20}`
136
+ ``gibi`` :math:`2^{30}`
137
+ ``tebi`` :math:`2^{40}`
138
+ ``pebi`` :math:`2^{50}`
139
+ ``exbi`` :math:`2^{60}`
140
+ ``zebi`` :math:`2^{70}`
141
+ ``yobi`` :math:`2^{80}`
142
+ ============ =================================================================
143
+
144
+ Mass
145
+ ----
146
+
147
+ ================= ============================================================
148
+ ``gram`` :math:`10^{-3}` kg
149
+ ``metric_ton`` :math:`10^{3}` kg
150
+ ``grain`` one grain in kg
151
+ ``lb`` one pound (avoirdupous) in kg
152
+ ``pound`` one pound (avoirdupous) in kg
153
+ ``blob`` one inch version of a slug in kg (added in 1.0.0)
154
+ ``slinch`` one inch version of a slug in kg (added in 1.0.0)
155
+ ``slug`` one slug in kg (added in 1.0.0)
156
+ ``oz`` one ounce in kg
157
+ ``ounce`` one ounce in kg
158
+ ``stone`` one stone in kg
159
+ ``grain`` one grain in kg
160
+ ``long_ton`` one long ton in kg
161
+ ``short_ton`` one short ton in kg
162
+ ``troy_ounce`` one Troy ounce in kg
163
+ ``troy_pound`` one Troy pound in kg
164
+ ``carat`` one carat in kg
165
+ ``m_u`` atomic mass constant (in kg)
166
+ ``u`` atomic mass constant (in kg)
167
+ ``atomic_mass`` atomic mass constant (in kg)
168
+ ================= ============================================================
169
+
170
+ Angle
171
+ -----
172
+
173
+ ================= ============================================================
174
+ ``degree`` degree in radians
175
+ ``arcmin`` arc minute in radians
176
+ ``arcminute`` arc minute in radians
177
+ ``arcsec`` arc second in radians
178
+ ``arcsecond`` arc second in radians
179
+ ================= ============================================================
180
+
181
+
182
+ Time
183
+ ----
184
+
185
+ ================= ============================================================
186
+ ``minute`` one minute in seconds
187
+ ``hour`` one hour in seconds
188
+ ``day`` one day in seconds
189
+ ``week`` one week in seconds
190
+ ``year`` one year (365 days) in seconds
191
+ ``Julian_year`` one Julian year (365.25 days) in seconds
192
+ ================= ============================================================
193
+
194
+
195
+ Length
196
+ ------
197
+
198
+ ===================== ============================================================
199
+ ``inch`` one inch in meters
200
+ ``foot`` one foot in meters
201
+ ``yard`` one yard in meters
202
+ ``mile`` one mile in meters
203
+ ``mil`` one mil in meters
204
+ ``pt`` one point in meters
205
+ ``point`` one point in meters
206
+ ``survey_foot`` one survey foot in meters
207
+ ``survey_mile`` one survey mile in meters
208
+ ``nautical_mile`` one nautical mile in meters
209
+ ``fermi`` one Fermi in meters
210
+ ``angstrom`` one Angstrom in meters
211
+ ``micron`` one micron in meters
212
+ ``au`` one astronomical unit in meters
213
+ ``astronomical_unit`` one astronomical unit in meters
214
+ ``light_year`` one light year in meters
215
+ ``parsec`` one parsec in meters
216
+ ===================== ============================================================
217
+
218
+ Pressure
219
+ --------
220
+
221
+ ================= ============================================================
222
+ ``atm`` standard atmosphere in pascals
223
+ ``atmosphere`` standard atmosphere in pascals
224
+ ``bar`` one bar in pascals
225
+ ``torr`` one torr (mmHg) in pascals
226
+ ``mmHg`` one torr (mmHg) in pascals
227
+ ``psi`` one psi in pascals
228
+ ================= ============================================================
229
+
230
+ Area
231
+ ----
232
+
233
+ ================= ============================================================
234
+ ``hectare`` one hectare in square meters
235
+ ``acre`` one acre in square meters
236
+ ================= ============================================================
237
+
238
+
239
+ Volume
240
+ ------
241
+
242
+ =================== ========================================================
243
+ ``liter`` one liter in cubic meters
244
+ ``litre`` one liter in cubic meters
245
+ ``gallon`` one gallon (US) in cubic meters
246
+ ``gallon_US`` one gallon (US) in cubic meters
247
+ ``gallon_imp`` one gallon (UK) in cubic meters
248
+ ``fluid_ounce`` one fluid ounce (US) in cubic meters
249
+ ``fluid_ounce_US`` one fluid ounce (US) in cubic meters
250
+ ``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
251
+ ``bbl`` one barrel in cubic meters
252
+ ``barrel`` one barrel in cubic meters
253
+ =================== ========================================================
254
+
255
+ Speed
256
+ -----
257
+
258
+ ================== ==========================================================
259
+ ``kmh`` kilometers per hour in meters per second
260
+ ``mph`` miles per hour in meters per second
261
+ ``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second
262
+ ``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second
263
+ ``knot`` one knot in meters per second
264
+ ================== ==========================================================
265
+
266
+
267
+ Temperature
268
+ -----------
269
+
270
+ ===================== =======================================================
271
+ ``zero_Celsius`` zero of Celsius scale in Kelvin
272
+ ``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
273
+ ===================== =======================================================
274
+
275
+ .. autosummary::
276
+ :toctree: generated/
277
+
278
+ convert_temperature
279
+
280
+ Energy
281
+ ------
282
+
283
+ ==================== =======================================================
284
+ ``eV`` one electron volt in Joules
285
+ ``electron_volt`` one electron volt in Joules
286
+ ``calorie`` one calorie (thermochemical) in Joules
287
+ ``calorie_th`` one calorie (thermochemical) in Joules
288
+ ``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
289
+ ``erg`` one erg in Joules
290
+ ``Btu`` one British thermal unit (International Steam Table) in Joules
291
+ ``Btu_IT`` one British thermal unit (International Steam Table) in Joules
292
+ ``Btu_th`` one British thermal unit (thermochemical) in Joules
293
+ ``ton_TNT`` one ton of TNT in Joules
294
+ ==================== =======================================================
295
+
296
+ Power
297
+ -----
298
+
299
+ ==================== =======================================================
300
+ ``hp`` one horsepower in watts
301
+ ``horsepower`` one horsepower in watts
302
+ ==================== =======================================================
303
+
304
+ Force
305
+ -----
306
+
307
+ ==================== =======================================================
308
+ ``dyn`` one dyne in newtons
309
+ ``dyne`` one dyne in newtons
310
+ ``lbf`` one pound force in newtons
311
+ ``pound_force`` one pound force in newtons
312
+ ``kgf`` one kilogram force in newtons
313
+ ``kilogram_force`` one kilogram force in newtons
314
+ ==================== =======================================================
315
+
316
+ Optics
317
+ ------
318
+
319
+ .. autosummary::
320
+ :toctree: generated/
321
+
322
+ lambda2nu
323
+ nu2lambda
324
+
325
+ References
326
+ ==========
327
+
328
+ .. [CODATA2022] CODATA Recommended Values of the Fundamental
329
+ Physical Constants 2022.
330
+
331
+ https://physics.nist.gov/cuu/Constants/
332
+
333
+ """ # noqa: E501
334
+ # Modules contributed by BasSw (wegwerp@gmail.com)
335
+ from ._codata import *
336
+ from ._constants import *
337
+ from ._codata import _obsolete_constants, physical_constants
338
+
339
+ # Deprecated namespaces, to be removed in v2.0.0
340
+ from . import codata, constants
341
+
342
+ _constant_names_list = [(_k.lower(), _k, _v)
343
+ for _k, _v in physical_constants.items()
344
+ if _k not in _obsolete_constants]
345
+ _constant_names = "\n".join(["``{}``{} {} {}".format(_x[1], " "*(66-len(_x[1])),
346
+ _x[2][0], _x[2][1])
347
+ for _x in sorted(_constant_names_list)])
348
+ if __doc__:
349
+ __doc__ = __doc__ % dict(constant_names=_constant_names)
350
+
351
+ del _constant_names
352
+ del _constant_names_list
353
+
354
+ __all__ = [s for s in dir() if not s.startswith('_')]
355
+
356
+ from scipy._lib._testutils import PytestTester
357
+ test = PytestTester(__name__)
358
+ del PytestTester
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/constants/_codata.py ADDED
The diff for this file is too large to render. See raw diff
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/constants/_constants.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Collection of physical constants and conversion factors.
3
+
4
+ Most constants are in SI units, so you can do
5
+ print '10 mile per minute is', 10*mile/minute, 'm/s or', 10*mile/(minute*knot), 'knots'
6
+
7
+ The list is not meant to be comprehensive, but just convenient for everyday use.
8
+ """
9
+
10
+ import math as _math
11
+ from typing import TYPE_CHECKING, Any
12
+
13
+ from ._codata import value as _cd
14
+
15
+ if TYPE_CHECKING:
16
+ import numpy.typing as npt
17
+
18
+ from scipy._lib._array_api import array_namespace, _asarray, xp_capabilities
19
+
20
+
21
+ """
22
+ BasSw 2006
23
+ physical constants: imported from CODATA
24
+ unit conversion: see e.g., NIST special publication 811
25
+ Use at own risk: double-check values before calculating your Mars orbit-insertion burn.
26
+ Some constants exist in a few variants, which are marked with suffixes.
27
+ The ones without any suffix should be the most common ones.
28
+ """
29
+
30
+ __all__ = [
31
+ 'Avogadro', 'Boltzmann', 'Btu', 'Btu_IT', 'Btu_th', 'G',
32
+ 'Julian_year', 'N_A', 'Planck', 'R', 'Rydberg',
33
+ 'Stefan_Boltzmann', 'Wien', 'acre', 'alpha',
34
+ 'angstrom', 'arcmin', 'arcminute', 'arcsec',
35
+ 'arcsecond', 'astronomical_unit', 'atm',
36
+ 'atmosphere', 'atomic_mass', 'atto', 'au', 'bar',
37
+ 'barrel', 'bbl', 'blob', 'c', 'calorie',
38
+ 'calorie_IT', 'calorie_th', 'carat', 'centi',
39
+ 'convert_temperature', 'day', 'deci', 'degree',
40
+ 'degree_Fahrenheit', 'deka', 'dyn', 'dyne', 'e',
41
+ 'eV', 'electron_mass', 'electron_volt',
42
+ 'elementary_charge', 'epsilon_0', 'erg',
43
+ 'exa', 'exbi', 'femto', 'fermi', 'fine_structure',
44
+ 'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp',
45
+ 'foot', 'g', 'gallon', 'gallon_US', 'gallon_imp',
46
+ 'gas_constant', 'gibi', 'giga', 'golden', 'golden_ratio',
47
+ 'grain', 'gram', 'gravitational_constant', 'h', 'hbar',
48
+ 'hectare', 'hecto', 'horsepower', 'hour', 'hp',
49
+ 'inch', 'k', 'kgf', 'kibi', 'kilo', 'kilogram_force',
50
+ 'kmh', 'knot', 'lambda2nu', 'lb', 'lbf',
51
+ 'light_year', 'liter', 'litre', 'long_ton', 'm_e',
52
+ 'm_n', 'm_p', 'm_u', 'mach', 'mebi', 'mega',
53
+ 'metric_ton', 'micro', 'micron', 'mil', 'mile',
54
+ 'milli', 'minute', 'mmHg', 'mph', 'mu_0', 'nano',
55
+ 'nautical_mile', 'neutron_mass', 'nu2lambda',
56
+ 'ounce', 'oz', 'parsec', 'pebi', 'peta',
57
+ 'pi', 'pico', 'point', 'pound', 'pound_force',
58
+ 'proton_mass', 'psi', 'pt', 'quecto', 'quetta', 'ronna', 'ronto',
59
+ 'short_ton', 'sigma', 'slinch', 'slug', 'speed_of_light',
60
+ 'speed_of_sound', 'stone', 'survey_foot',
61
+ 'survey_mile', 'tebi', 'tera', 'ton_TNT',
62
+ 'torr', 'troy_ounce', 'troy_pound', 'u',
63
+ 'week', 'yard', 'year', 'yobi', 'yocto',
64
+ 'yotta', 'zebi', 'zepto', 'zero_Celsius', 'zetta'
65
+ ]
66
+
67
+
68
+ # mathematical constants
69
+ pi = _math.pi
70
+ golden = golden_ratio = (1 + _math.sqrt(5)) / 2
71
+
72
+ # SI prefixes
73
+ quetta = 1e30
74
+ ronna = 1e27
75
+ yotta = 1e24
76
+ zetta = 1e21
77
+ exa = 1e18
78
+ peta = 1e15
79
+ tera = 1e12
80
+ giga = 1e9
81
+ mega = 1e6
82
+ kilo = 1e3
83
+ hecto = 1e2
84
+ deka = 1e1
85
+ deci = 1e-1
86
+ centi = 1e-2
87
+ milli = 1e-3
88
+ micro = 1e-6
89
+ nano = 1e-9
90
+ pico = 1e-12
91
+ femto = 1e-15
92
+ atto = 1e-18
93
+ zepto = 1e-21
94
+ yocto = 1e-24
95
+ ronto = 1e-27
96
+ quecto = 1e-30
97
+
98
+ # binary prefixes
99
+ kibi = 2**10
100
+ mebi = 2**20
101
+ gibi = 2**30
102
+ tebi = 2**40
103
+ pebi = 2**50
104
+ exbi = 2**60
105
+ zebi = 2**70
106
+ yobi = 2**80
107
+
108
+ # physical constants
109
+ c = speed_of_light = _cd('speed of light in vacuum')
110
+ mu_0 = _cd('vacuum mag. permeability')
111
+ epsilon_0 = _cd('vacuum electric permittivity')
112
+ h = Planck = _cd('Planck constant')
113
+ hbar = _cd('reduced Planck constant')
114
+ G = gravitational_constant = _cd('Newtonian constant of gravitation')
115
+ g = _cd('standard acceleration of gravity')
116
+ e = elementary_charge = _cd('elementary charge')
117
+ R = gas_constant = _cd('molar gas constant')
118
+ alpha = fine_structure = _cd('fine-structure constant')
119
+ N_A = Avogadro = _cd('Avogadro constant')
120
+ k = Boltzmann = _cd('Boltzmann constant')
121
+ sigma = Stefan_Boltzmann = _cd('Stefan-Boltzmann constant')
122
+ Wien = _cd('Wien wavelength displacement law constant')
123
+ Rydberg = _cd('Rydberg constant')
124
+
125
+ # mass in kg
126
+ gram = 1e-3
127
+ metric_ton = 1e3
128
+ grain = 64.79891e-6
129
+ lb = pound = 7000 * grain # avoirdupois
130
+ blob = slinch = pound * g / 0.0254 # lbf*s**2/in (added in 1.0.0)
131
+ slug = blob / 12 # lbf*s**2/foot (added in 1.0.0)
132
+ oz = ounce = pound / 16
133
+ stone = 14 * pound
134
+ long_ton = 2240 * pound
135
+ short_ton = 2000 * pound
136
+
137
+ troy_ounce = 480 * grain # only for metals / gems
138
+ troy_pound = 12 * troy_ounce
139
+ carat = 200e-6
140
+
141
+ m_e = electron_mass = _cd('electron mass')
142
+ m_p = proton_mass = _cd('proton mass')
143
+ m_n = neutron_mass = _cd('neutron mass')
144
+ m_u = u = atomic_mass = _cd('atomic mass constant')
145
+
146
+ # angle in rad
147
+ degree = pi / 180
148
+ arcmin = arcminute = degree / 60
149
+ arcsec = arcsecond = arcmin / 60
150
+
151
+ # time in second
152
+ minute = 60.0
153
+ hour = 60 * minute
154
+ day = 24 * hour
155
+ week = 7 * day
156
+ year = 365 * day
157
+ Julian_year = 365.25 * day
158
+
159
+ # length in meter
160
+ inch = 0.0254
161
+ foot = 12 * inch
162
+ yard = 3 * foot
163
+ mile = 1760 * yard
164
+ mil = inch / 1000
165
+ pt = point = inch / 72 # typography
166
+ survey_foot = 1200.0 / 3937
167
+ survey_mile = 5280 * survey_foot
168
+ nautical_mile = 1852.0
169
+ fermi = 1e-15
170
+ angstrom = 1e-10
171
+ micron = 1e-6
172
+ au = astronomical_unit = 149597870700.0
173
+ light_year = Julian_year * c
174
+ parsec = au / arcsec
175
+
176
+ # pressure in pascal
177
+ atm = atmosphere = _cd('standard atmosphere')
178
+ bar = 1e5
179
+ torr = mmHg = atm / 760
180
+ psi = pound * g / (inch * inch)
181
+
182
+ # area in meter**2
183
+ hectare = 1e4
184
+ acre = 43560 * foot**2
185
+
186
+ # volume in meter**3
187
+ litre = liter = 1e-3
188
+ gallon = gallon_US = 231 * inch**3 # US
189
+ # pint = gallon_US / 8
190
+ fluid_ounce = fluid_ounce_US = gallon_US / 128
191
+ bbl = barrel = 42 * gallon_US # for oil
192
+
193
+ gallon_imp = 4.54609e-3 # UK
194
+ fluid_ounce_imp = gallon_imp / 160
195
+
196
+ # speed in meter per second
197
+ kmh = 1e3 / hour
198
+ mph = mile / hour
199
+ # approx value of mach at 15 degrees in 1 atm. Is this a common value?
200
+ mach = speed_of_sound = 340.5
201
+ knot = nautical_mile / hour
202
+
203
+ # temperature in kelvin
204
+ zero_Celsius = 273.15
205
+ degree_Fahrenheit = 1/1.8 # only for differences
206
+
207
+ # energy in joule
208
+ eV = electron_volt = elementary_charge # * 1 Volt
209
+ calorie = calorie_th = 4.184
210
+ calorie_IT = 4.1868
211
+ erg = 1e-7
212
+ Btu_th = pound * degree_Fahrenheit * calorie_th / gram
213
+ Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram
214
+ ton_TNT = 1e9 * calorie_th
215
+ # Wh = watt_hour
216
+
217
+ # power in watt
218
+ hp = horsepower = 550 * foot * pound * g
219
+
220
+ # force in newton
221
+ dyn = dyne = 1e-5
222
+ lbf = pound_force = pound * g
223
+ kgf = kilogram_force = g # * 1 kg
224
+
225
+ # functions for conversions that are not linear
226
+
227
+
228
+ @xp_capabilities()
229
+ def convert_temperature(
230
+ val: "npt.ArrayLike",
231
+ old_scale: str,
232
+ new_scale: str,
233
+ ) -> Any:
234
+ """
235
+ Convert from a temperature scale to another one among Celsius, Kelvin,
236
+ Fahrenheit, and Rankine scales.
237
+
238
+ Parameters
239
+ ----------
240
+ val : array_like
241
+ Value(s) of the temperature(s) to be converted expressed in the
242
+ original scale.
243
+ old_scale : str
244
+ Specifies as a string the original scale from which the temperature
245
+ value(s) will be converted. Supported scales are Celsius ('Celsius',
246
+ 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'),
247
+ Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine
248
+ ('Rankine', 'rankine', 'R', 'r').
249
+ new_scale : str
250
+ Specifies as a string the new scale to which the temperature
251
+ value(s) will be converted. Supported scales are Celsius ('Celsius',
252
+ 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'),
253
+ Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine
254
+ ('Rankine', 'rankine', 'R', 'r').
255
+
256
+ Returns
257
+ -------
258
+ res : float or array of floats
259
+ Value(s) of the converted temperature(s) expressed in the new scale.
260
+
261
+ Notes
262
+ -----
263
+ .. versionadded:: 0.18.0
264
+
265
+ Examples
266
+ --------
267
+ >>> from scipy.constants import convert_temperature
268
+ >>> import numpy as np
269
+ >>> convert_temperature(np.array([-40, 40]), 'Celsius', 'Kelvin')
270
+ array([ 233.15, 313.15])
271
+
272
+ """
273
+ xp = array_namespace(val)
274
+ _val = _asarray(val, xp=xp, subok=True)
275
+ # Convert from `old_scale` to Kelvin
276
+ if old_scale.lower() in ['celsius', 'c']:
277
+ tempo = _val + zero_Celsius
278
+ elif old_scale.lower() in ['kelvin', 'k']:
279
+ tempo = _val
280
+ elif old_scale.lower() in ['fahrenheit', 'f']:
281
+ tempo = (_val - 32) * 5 / 9 + zero_Celsius
282
+ elif old_scale.lower() in ['rankine', 'r']:
283
+ tempo = _val * 5 / 9
284
+ else:
285
+ raise NotImplementedError(f"{old_scale=} is unsupported: supported scales "
286
+ "are Celsius, Kelvin, Fahrenheit, and "
287
+ "Rankine")
288
+ # and from Kelvin to `new_scale`.
289
+ if new_scale.lower() in ['celsius', 'c']:
290
+ res = tempo - zero_Celsius
291
+ elif new_scale.lower() in ['kelvin', 'k']:
292
+ res = tempo
293
+ elif new_scale.lower() in ['fahrenheit', 'f']:
294
+ res = (tempo - zero_Celsius) * 9 / 5 + 32
295
+ elif new_scale.lower() in ['rankine', 'r']:
296
+ res = tempo * 9 / 5
297
+ else:
298
+ raise NotImplementedError(f"{new_scale=} is unsupported: supported "
299
+ "scales are 'Celsius', 'Kelvin', "
300
+ "'Fahrenheit', and 'Rankine'")
301
+
302
+ return res
303
+
304
+
305
+ # optics
306
+
307
+
308
+ @xp_capabilities()
309
+ def lambda2nu(lambda_: "npt.ArrayLike") -> Any:
310
+ """
311
+ Convert wavelength to optical frequency
312
+
313
+ Parameters
314
+ ----------
315
+ lambda_ : array_like
316
+ Wavelength(s) to be converted.
317
+
318
+ Returns
319
+ -------
320
+ nu : float or array of floats
321
+ Equivalent optical frequency.
322
+
323
+ Notes
324
+ -----
325
+ Computes ``nu = c / lambda`` where c = 299792458.0, i.e., the
326
+ (vacuum) speed of light in meters/second.
327
+
328
+ Examples
329
+ --------
330
+ >>> from scipy.constants import lambda2nu, speed_of_light
331
+ >>> import numpy as np
332
+ >>> lambda2nu(np.array((1, speed_of_light)))
333
+ array([ 2.99792458e+08, 1.00000000e+00])
334
+
335
+ """
336
+ xp = array_namespace(lambda_)
337
+ return c / _asarray(lambda_, xp=xp, subok=True)
338
+
339
+
340
+ @xp_capabilities()
341
+ def nu2lambda(nu: "npt.ArrayLike") -> Any:
342
+ """
343
+ Convert optical frequency to wavelength.
344
+
345
+ Parameters
346
+ ----------
347
+ nu : array_like
348
+ Optical frequency to be converted.
349
+
350
+ Returns
351
+ -------
352
+ lambda : float or array of floats
353
+ Equivalent wavelength(s).
354
+
355
+ Notes
356
+ -----
357
+ Computes ``lambda = c / nu`` where c = 299792458.0, i.e., the
358
+ (vacuum) speed of light in meters/second.
359
+
360
+ Examples
361
+ --------
362
+ >>> from scipy.constants import nu2lambda, speed_of_light
363
+ >>> import numpy as np
364
+ >>> nu2lambda(np.array((1, speed_of_light)))
365
+ array([ 2.99792458e+08, 1.00000000e+00])
366
+
367
+ """
368
+ xp = array_namespace(nu)
369
+ return c / _asarray(nu, xp=xp, subok=True)
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/constants/codata.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.constants` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'physical_constants', 'value', 'unit', 'precision', 'find',
9
+ 'ConstantWarning', 'k', 'c',
10
+
11
+ ]
12
+
13
+
14
+ def __dir__():
15
+ return __all__
16
+
17
+
18
+ def __getattr__(name):
19
+ return _sub_module_deprecation(sub_package="constants", module="codata",
20
+ private_modules=["_codata"], all=__all__,
21
+ attribute=name)
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/constants/constants.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.constants` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'Avogadro', 'Boltzmann', 'Btu', 'Btu_IT', 'Btu_th', 'G',
10
+ 'Julian_year', 'N_A', 'Planck', 'R', 'Rydberg',
11
+ 'Stefan_Boltzmann', 'Wien', 'acre', 'alpha',
12
+ 'angstrom', 'arcmin', 'arcminute', 'arcsec',
13
+ 'arcsecond', 'astronomical_unit', 'atm',
14
+ 'atmosphere', 'atomic_mass', 'atto', 'au', 'bar',
15
+ 'barrel', 'bbl', 'blob', 'c', 'calorie',
16
+ 'calorie_IT', 'calorie_th', 'carat', 'centi',
17
+ 'convert_temperature', 'day', 'deci', 'degree',
18
+ 'degree_Fahrenheit', 'deka', 'dyn', 'dyne', 'e',
19
+ 'eV', 'electron_mass', 'electron_volt',
20
+ 'elementary_charge', 'epsilon_0', 'erg',
21
+ 'exa', 'exbi', 'femto', 'fermi', 'fine_structure',
22
+ 'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp',
23
+ 'foot', 'g', 'gallon', 'gallon_US', 'gallon_imp',
24
+ 'gas_constant', 'gibi', 'giga', 'golden', 'golden_ratio',
25
+ 'grain', 'gram', 'gravitational_constant', 'h', 'hbar',
26
+ 'hectare', 'hecto', 'horsepower', 'hour', 'hp',
27
+ 'inch', 'k', 'kgf', 'kibi', 'kilo', 'kilogram_force',
28
+ 'kmh', 'knot', 'lambda2nu', 'lb', 'lbf',
29
+ 'light_year', 'liter', 'litre', 'long_ton', 'm_e',
30
+ 'm_n', 'm_p', 'm_u', 'mach', 'mebi', 'mega',
31
+ 'metric_ton', 'micro', 'micron', 'mil', 'mile',
32
+ 'milli', 'minute', 'mmHg', 'mph', 'mu_0', 'nano',
33
+ 'nautical_mile', 'neutron_mass', 'nu2lambda',
34
+ 'ounce', 'oz', 'parsec', 'pebi', 'peta',
35
+ 'pi', 'pico', 'point', 'pound', 'pound_force',
36
+ 'proton_mass', 'psi', 'pt', 'short_ton',
37
+ 'sigma', 'slinch', 'slug', 'speed_of_light',
38
+ 'speed_of_sound', 'stone', 'survey_foot',
39
+ 'survey_mile', 'tebi', 'tera', 'ton_TNT',
40
+ 'torr', 'troy_ounce', 'troy_pound', 'u',
41
+ 'week', 'yard', 'year', 'yobi', 'yocto',
42
+ 'yotta', 'zebi', 'zepto', 'zero_Celsius', 'zetta'
43
+ ]
44
+
45
+
46
+ def __dir__():
47
+ return __all__
48
+
49
+
50
+ def __getattr__(name):
51
+ return _sub_module_deprecation(sub_package="constants", module="constants",
52
+ private_modules=["_constants"], all=__all__,
53
+ attribute=name)
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (3.05 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/__pycache__/_download_all.cpython-312.pyc ADDED
Binary file (2.81 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-312.pyc ADDED
Binary file (7.68 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/__pycache__/_registry.cpython-312.pyc ADDED
Binary file (839 Bytes). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/__pycache__/_utils.cpython-312.pyc ADDED
Binary file (3.32 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/tests/__init__.py ADDED
File without changes
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (212 Bytes). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-312.pyc ADDED
Binary file (6.28 kB). View file
 
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/datasets/tests/test_data.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy.datasets._registry import registry
2
+ from scipy.datasets._fetchers import data_fetcher
3
+ from scipy.datasets._utils import _clear_cache
4
+ from scipy.datasets import ascent, face, electrocardiogram, download_all
5
+ from numpy.testing import assert_equal, assert_almost_equal
6
+ import os
7
+ from threading import get_ident
8
+ import pytest
9
+
10
+ try:
11
+ import pooch
12
+ except ImportError:
13
+ raise ImportError("Missing optional dependency 'pooch' required "
14
+ "for scipy.datasets module. Please use pip or "
15
+ "conda to install 'pooch'.")
16
+
17
+
18
+ data_dir = data_fetcher.path # type: ignore
19
+
20
+
21
+ def _has_hash(path, expected_hash):
22
+ """Check if the provided path has the expected hash."""
23
+ if not os.path.exists(path):
24
+ return False
25
+ return pooch.file_hash(path) == expected_hash
26
+
27
+
28
+ class TestDatasets:
29
+
30
+ @pytest.fixture(scope='module', autouse=True)
31
+ def test_download_all(self):
32
+ # This fixture requires INTERNET CONNECTION
33
+
34
+ # test_setup phase
35
+ download_all()
36
+
37
+ yield
38
+
39
+ @pytest.mark.fail_slow(10)
40
+ def test_existence_all(self):
41
+ assert len(os.listdir(data_dir)) >= len(registry)
42
+
43
+ def test_ascent(self):
44
+ assert_equal(ascent().shape, (512, 512))
45
+
46
+ # hash check
47
+ assert _has_hash(os.path.join(data_dir, "ascent.dat"),
48
+ registry["ascent.dat"])
49
+
50
+ def test_face(self):
51
+ assert_equal(face().shape, (768, 1024, 3))
52
+
53
+ # hash check
54
+ assert _has_hash(os.path.join(data_dir, "face.dat"),
55
+ registry["face.dat"])
56
+
57
+ def test_electrocardiogram(self):
58
+ # Test shape, dtype and stats of signal
59
+ ecg = electrocardiogram()
60
+ assert_equal(ecg.dtype, float)
61
+ assert_equal(ecg.shape, (108000,))
62
+ assert_almost_equal(ecg.mean(), -0.16510875)
63
+ assert_almost_equal(ecg.std(), 0.5992473991177294)
64
+
65
+ # hash check
66
+ assert _has_hash(os.path.join(data_dir, "ecg.dat"),
67
+ registry["ecg.dat"])
68
+
69
+
70
+ def test_clear_cache(tmp_path):
71
+ # Note: `tmp_path` is a pytest fixture, it handles cleanup
72
+ thread_basepath = tmp_path / str(get_ident())
73
+ thread_basepath.mkdir()
74
+
75
+ dummy_basepath = thread_basepath / "dummy_cache_dir"
76
+ dummy_basepath.mkdir()
77
+
78
+ # Create three dummy dataset files for dummy dataset methods
79
+ dummy_method_map = {}
80
+ for i in range(4):
81
+ dummy_method_map[f"data{i}"] = [f"data{i}.dat"]
82
+ data_filepath = dummy_basepath / f"data{i}.dat"
83
+ data_filepath.write_text("")
84
+
85
+ # clear files associated to single dataset method data0
86
+ # also test callable argument instead of list of callables
87
+ def data0():
88
+ pass
89
+ _clear_cache(datasets=data0, cache_dir=dummy_basepath,
90
+ method_map=dummy_method_map)
91
+ assert not os.path.exists(dummy_basepath/"data0.dat")
92
+
93
+ # clear files associated to multiple dataset methods "data3" and "data4"
94
+ def data1():
95
+ pass
96
+
97
+ def data2():
98
+ pass
99
+ _clear_cache(datasets=[data1, data2], cache_dir=dummy_basepath,
100
+ method_map=dummy_method_map)
101
+ assert not os.path.exists(dummy_basepath/"data1.dat")
102
+ assert not os.path.exists(dummy_basepath/"data2.dat")
103
+
104
+ # clear multiple dataset files "data3_0.dat" and "data3_1.dat"
105
+ # associated with dataset method "data3"
106
+ def data4():
107
+ pass
108
+ # create files
109
+ (dummy_basepath / "data4_0.dat").write_text("")
110
+ (dummy_basepath / "data4_1.dat").write_text("")
111
+
112
+ dummy_method_map["data4"] = ["data4_0.dat", "data4_1.dat"]
113
+ _clear_cache(datasets=[data4], cache_dir=dummy_basepath,
114
+ method_map=dummy_method_map)
115
+ assert not os.path.exists(dummy_basepath/"data4_0.dat")
116
+ assert not os.path.exists(dummy_basepath/"data4_1.dat")
117
+
118
+ # wrong dataset method should raise ValueError since it
119
+ # doesn't exist in the dummy_method_map
120
+ def data5():
121
+ pass
122
+ with pytest.raises(ValueError):
123
+ _clear_cache(datasets=[data5], cache_dir=dummy_basepath,
124
+ method_map=dummy_method_map)
125
+
126
+ # remove all dataset cache
127
+ _clear_cache(datasets=None, cache_dir=dummy_basepath)
128
+ assert not os.path.exists(dummy_basepath)
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/differentiate/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (876 Bytes). View file