Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/__init__.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_array_api.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_array_api_compat_vendor.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_array_api_docs_tables.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_array_api_no_0d.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_array_api_override.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_bunch.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_pep440.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_public_api.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_sparse.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_testutils.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_util.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/deprecation.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/doccer.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/uarray.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_uarray/LICENSE +29 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_uarray/__init__.py +116 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_uarray/_backend.py +707 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/__init__.py +22 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/_internal.py +77 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__init__.py +1 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_fft.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/_aliases.py +702 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/_fft.py +213 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/_helpers.py +1079 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/_linalg.py +230 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/_typing.py +189 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py +24 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_info.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/fft.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-312.pyc +0 -0
- URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py +168 -0
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (584 Bytes). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_array_api.cpython-312.pyc
ADDED
|
Binary file (38.2 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_array_api_compat_vendor.cpython-312.pyc
ADDED
|
Binary file (352 Bytes). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_array_api_docs_tables.cpython-312.pyc
ADDED
|
Binary file (9.78 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_array_api_no_0d.cpython-312.pyc
ADDED
|
Binary file (4.1 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_array_api_override.cpython-312.pyc
ADDED
|
Binary file (5.7 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_bunch.cpython-312.pyc
ADDED
|
Binary file (9.04 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-312.pyc
ADDED
|
Binary file (9.42 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-312.pyc
ADDED
|
Binary file (8.32 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-312.pyc
ADDED
|
Binary file (32.6 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-312.pyc
ADDED
|
Binary file (14.9 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-312.pyc
ADDED
|
Binary file (3.62 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_pep440.cpython-312.pyc
ADDED
|
Binary file (18.8 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_public_api.cpython-312.pyc
ADDED
|
Binary file (1.13 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_sparse.cpython-312.pyc
ADDED
|
Binary file (1.33 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_testutils.cpython-312.pyc
ADDED
|
Binary file (16.4 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-312.pyc
ADDED
|
Binary file (3.23 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/_util.cpython-312.pyc
ADDED
|
Binary file (56.2 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/deprecation.cpython-312.pyc
ADDED
|
Binary file (11.3 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/doccer.cpython-312.pyc
ADDED
|
Binary file (12.6 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/__pycache__/uarray.cpython-312.pyc
ADDED
|
Binary file (940 Bytes). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_uarray/LICENSE
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
BSD 3-Clause License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2018, Quansight-Labs
|
| 4 |
+
All rights reserved.
|
| 5 |
+
|
| 6 |
+
Redistribution and use in source and binary forms, with or without
|
| 7 |
+
modification, are permitted provided that the following conditions are met:
|
| 8 |
+
|
| 9 |
+
* Redistributions of source code must retain the above copyright notice, this
|
| 10 |
+
list of conditions and the following disclaimer.
|
| 11 |
+
|
| 12 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
| 13 |
+
this list of conditions and the following disclaimer in the documentation
|
| 14 |
+
and/or other materials provided with the distribution.
|
| 15 |
+
|
| 16 |
+
* Neither the name of the copyright holder nor the names of its
|
| 17 |
+
contributors may be used to endorse or promote products derived from
|
| 18 |
+
this software without specific prior written permission.
|
| 19 |
+
|
| 20 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 21 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 22 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 23 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 24 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 25 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 26 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 27 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 28 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 29 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_uarray/__init__.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
.. note:
|
| 3 |
+
If you are looking for overrides for NumPy-specific methods, see the
|
| 4 |
+
documentation for :obj:`unumpy`. This page explains how to write
|
| 5 |
+
back-ends and multimethods.
|
| 6 |
+
|
| 7 |
+
``uarray`` is built around a back-end protocol, and overridable multimethods.
|
| 8 |
+
It is necessary to define multimethods for back-ends to be able to override them.
|
| 9 |
+
See the documentation of :obj:`generate_multimethod` on how to write multimethods.
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
Let's start with the simplest:
|
| 14 |
+
|
| 15 |
+
``__ua_domain__`` defines the back-end *domain*. The domain consists of period-
|
| 16 |
+
separated string consisting of the modules you extend plus the submodule. For
|
| 17 |
+
example, if a submodule ``module2.submodule`` extends ``module1``
|
| 18 |
+
(i.e., it exposes dispatchables marked as types available in ``module1``),
|
| 19 |
+
then the domain string should be ``"module1.module2.submodule"``.
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
For the purpose of this demonstration, we'll be creating an object and setting
|
| 23 |
+
its attributes directly. However, note that you can use a module or your own type
|
| 24 |
+
as a backend as well.
|
| 25 |
+
|
| 26 |
+
>>> class Backend: pass
|
| 27 |
+
>>> be = Backend()
|
| 28 |
+
>>> be.__ua_domain__ = "ua_examples"
|
| 29 |
+
|
| 30 |
+
It might be useful at this point to sidetrack to the documentation of
|
| 31 |
+
:obj:`generate_multimethod` to find out how to generate a multimethod
|
| 32 |
+
overridable by :obj:`uarray`. Needless to say, writing a backend and
|
| 33 |
+
creating multimethods are mostly orthogonal activities, and knowing
|
| 34 |
+
one doesn't necessarily require knowledge of the other, although it
|
| 35 |
+
is certainly helpful. We expect core API designers/specifiers to write the
|
| 36 |
+
multimethods, and implementors to override them. But, as is often the case,
|
| 37 |
+
similar people write both.
|
| 38 |
+
|
| 39 |
+
Without further ado, here's an example multimethod:
|
| 40 |
+
|
| 41 |
+
>>> import uarray as ua
|
| 42 |
+
>>> from uarray import Dispatchable
|
| 43 |
+
>>> def override_me(a, b):
|
| 44 |
+
... return Dispatchable(a, int),
|
| 45 |
+
>>> def override_replacer(args, kwargs, dispatchables):
|
| 46 |
+
... return (dispatchables[0], args[1]), {}
|
| 47 |
+
>>> overridden_me = ua.generate_multimethod(
|
| 48 |
+
... override_me, override_replacer, "ua_examples"
|
| 49 |
+
... )
|
| 50 |
+
|
| 51 |
+
Next comes the part about overriding the multimethod. This requires
|
| 52 |
+
the ``__ua_function__`` protocol, and the ``__ua_convert__``
|
| 53 |
+
protocol. The ``__ua_function__`` protocol has the signature
|
| 54 |
+
``(method, args, kwargs)`` where ``method`` is the passed
|
| 55 |
+
multimethod, ``args``/``kwargs`` specify the arguments and ``dispatchables``
|
| 56 |
+
is the list of converted dispatchables passed in.
|
| 57 |
+
|
| 58 |
+
>>> def __ua_function__(method, args, kwargs):
|
| 59 |
+
... return method.__name__, args, kwargs
|
| 60 |
+
>>> be.__ua_function__ = __ua_function__
|
| 61 |
+
|
| 62 |
+
The other protocol of interest is the ``__ua_convert__`` protocol. It has the
|
| 63 |
+
signature ``(dispatchables, coerce)``. When ``coerce`` is ``False``, conversion
|
| 64 |
+
between the formats should ideally be an ``O(1)`` operation, but it means that
|
| 65 |
+
no memory copying should be involved, only views of the existing data.
|
| 66 |
+
|
| 67 |
+
>>> def __ua_convert__(dispatchables, coerce):
|
| 68 |
+
... for d in dispatchables:
|
| 69 |
+
... if d.type is int:
|
| 70 |
+
... if coerce and d.coercible:
|
| 71 |
+
... yield str(d.value)
|
| 72 |
+
... else:
|
| 73 |
+
... yield d.value
|
| 74 |
+
>>> be.__ua_convert__ = __ua_convert__
|
| 75 |
+
|
| 76 |
+
Now that we have defined the backend, the next thing to do is to call the multimethod.
|
| 77 |
+
|
| 78 |
+
>>> with ua.set_backend(be):
|
| 79 |
+
... overridden_me(1, "2")
|
| 80 |
+
('override_me', (1, '2'), {})
|
| 81 |
+
|
| 82 |
+
Note that the marked type has no effect on the actual type of the passed object.
|
| 83 |
+
We can also coerce the type of the input.
|
| 84 |
+
|
| 85 |
+
>>> with ua.set_backend(be, coerce=True):
|
| 86 |
+
... overridden_me(1, "2")
|
| 87 |
+
... overridden_me(1.0, "2")
|
| 88 |
+
('override_me', ('1', '2'), {})
|
| 89 |
+
('override_me', ('1.0', '2'), {})
|
| 90 |
+
|
| 91 |
+
Another feature is that if you remove ``__ua_convert__``, the arguments are not
|
| 92 |
+
converted at all and it's up to the backend to handle that.
|
| 93 |
+
|
| 94 |
+
>>> del be.__ua_convert__
|
| 95 |
+
>>> with ua.set_backend(be):
|
| 96 |
+
... overridden_me(1, "2")
|
| 97 |
+
('override_me', (1, '2'), {})
|
| 98 |
+
|
| 99 |
+
You also have the option to return ``NotImplemented``, in which case processing moves on
|
| 100 |
+
to the next back-end, which in this case, doesn't exist. The same applies to
|
| 101 |
+
``__ua_convert__``.
|
| 102 |
+
|
| 103 |
+
>>> be.__ua_function__ = lambda *a, **kw: NotImplemented
|
| 104 |
+
>>> with ua.set_backend(be):
|
| 105 |
+
... overridden_me(1, "2")
|
| 106 |
+
Traceback (most recent call last):
|
| 107 |
+
...
|
| 108 |
+
uarray.BackendNotImplementedError: ...
|
| 109 |
+
|
| 110 |
+
The last possibility is if we don't have ``__ua_convert__``, in which case the job is
|
| 111 |
+
left up to ``__ua_function__``, but putting things back into arrays after conversion
|
| 112 |
+
will not be possible.
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
from ._backend import *
|
| 116 |
+
__version__ = '0.8.8.dev0+aa94c5a4.scipy'
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (4.74 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-312.pyc
ADDED
|
Binary file (25.1 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/_uarray/_backend.py
ADDED
|
@@ -0,0 +1,707 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import typing
|
| 2 |
+
import types
|
| 3 |
+
import inspect
|
| 4 |
+
import functools
|
| 5 |
+
from . import _uarray
|
| 6 |
+
import copyreg
|
| 7 |
+
import pickle
|
| 8 |
+
import contextlib
|
| 9 |
+
import threading
|
| 10 |
+
|
| 11 |
+
from ._uarray import ( # type: ignore
|
| 12 |
+
BackendNotImplementedError,
|
| 13 |
+
_Function,
|
| 14 |
+
_SkipBackendContext,
|
| 15 |
+
_SetBackendContext,
|
| 16 |
+
_BackendState,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
__all__ = [
|
| 20 |
+
"set_backend",
|
| 21 |
+
"set_global_backend",
|
| 22 |
+
"skip_backend",
|
| 23 |
+
"register_backend",
|
| 24 |
+
"determine_backend",
|
| 25 |
+
"determine_backend_multi",
|
| 26 |
+
"clear_backends",
|
| 27 |
+
"create_multimethod",
|
| 28 |
+
"generate_multimethod",
|
| 29 |
+
"_Function",
|
| 30 |
+
"BackendNotImplementedError",
|
| 31 |
+
"Dispatchable",
|
| 32 |
+
"wrap_single_convertor",
|
| 33 |
+
"wrap_single_convertor_instance",
|
| 34 |
+
"all_of_type",
|
| 35 |
+
"mark_as",
|
| 36 |
+
"set_state",
|
| 37 |
+
"get_state",
|
| 38 |
+
"reset_state",
|
| 39 |
+
"_BackendState",
|
| 40 |
+
"_SkipBackendContext",
|
| 41 |
+
"_SetBackendContext",
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
+
ArgumentExtractorType = typing.Callable[..., tuple["Dispatchable", ...]]
|
| 45 |
+
ArgumentReplacerType = typing.Callable[
|
| 46 |
+
[tuple, dict, tuple], tuple[tuple, dict]
|
| 47 |
+
]
|
| 48 |
+
|
| 49 |
+
def unpickle_function(mod_name, qname, self_):
|
| 50 |
+
import importlib
|
| 51 |
+
|
| 52 |
+
try:
|
| 53 |
+
module = importlib.import_module(mod_name)
|
| 54 |
+
qname = qname.split(".")
|
| 55 |
+
func = module
|
| 56 |
+
for q in qname:
|
| 57 |
+
func = getattr(func, q)
|
| 58 |
+
|
| 59 |
+
if self_ is not None:
|
| 60 |
+
func = types.MethodType(func, self_)
|
| 61 |
+
|
| 62 |
+
return func
|
| 63 |
+
except (ImportError, AttributeError) as e:
|
| 64 |
+
from pickle import UnpicklingError
|
| 65 |
+
|
| 66 |
+
raise UnpicklingError from e
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def pickle_function(func):
|
| 70 |
+
mod_name = getattr(func, "__module__", None)
|
| 71 |
+
qname = getattr(func, "__qualname__", None)
|
| 72 |
+
self_ = getattr(func, "__self__", None)
|
| 73 |
+
|
| 74 |
+
try:
|
| 75 |
+
test = unpickle_function(mod_name, qname, self_)
|
| 76 |
+
except pickle.UnpicklingError:
|
| 77 |
+
test = None
|
| 78 |
+
|
| 79 |
+
if test is not func:
|
| 80 |
+
raise pickle.PicklingError(
|
| 81 |
+
f"Can't pickle {func}: it's not the same object as {test}"
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
return unpickle_function, (mod_name, qname, self_)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def pickle_state(state):
|
| 88 |
+
return _uarray._BackendState._unpickle, state._pickle()
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def pickle_set_backend_context(ctx):
|
| 92 |
+
return _SetBackendContext, ctx._pickle()
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def pickle_skip_backend_context(ctx):
|
| 96 |
+
return _SkipBackendContext, ctx._pickle()
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
copyreg.pickle(_Function, pickle_function)
|
| 100 |
+
copyreg.pickle(_uarray._BackendState, pickle_state)
|
| 101 |
+
copyreg.pickle(_SetBackendContext, pickle_set_backend_context)
|
| 102 |
+
copyreg.pickle(_SkipBackendContext, pickle_skip_backend_context)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def get_state():
|
| 106 |
+
"""
|
| 107 |
+
Returns an opaque object containing the current state of all the backends.
|
| 108 |
+
|
| 109 |
+
Can be used for synchronization between threads/processes.
|
| 110 |
+
|
| 111 |
+
See Also
|
| 112 |
+
--------
|
| 113 |
+
set_state
|
| 114 |
+
Sets the state returned by this function.
|
| 115 |
+
"""
|
| 116 |
+
return _uarray.get_state()
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
@contextlib.contextmanager
|
| 120 |
+
def reset_state():
|
| 121 |
+
"""
|
| 122 |
+
Returns a context manager that resets all state once exited.
|
| 123 |
+
|
| 124 |
+
See Also
|
| 125 |
+
--------
|
| 126 |
+
set_state
|
| 127 |
+
Context manager that sets the backend state.
|
| 128 |
+
get_state
|
| 129 |
+
Gets a state to be set by this context manager.
|
| 130 |
+
"""
|
| 131 |
+
with set_state(get_state()):
|
| 132 |
+
yield
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
@contextlib.contextmanager
|
| 136 |
+
def set_state(state):
|
| 137 |
+
"""
|
| 138 |
+
A context manager that sets the state of the backends to one returned by :obj:`get_state`.
|
| 139 |
+
|
| 140 |
+
See Also
|
| 141 |
+
--------
|
| 142 |
+
get_state
|
| 143 |
+
Gets a state to be set by this context manager.
|
| 144 |
+
""" # noqa: E501
|
| 145 |
+
old_state = get_state()
|
| 146 |
+
_uarray.set_state(state)
|
| 147 |
+
try:
|
| 148 |
+
yield
|
| 149 |
+
finally:
|
| 150 |
+
_uarray.set_state(old_state, True)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def create_multimethod(*args, **kwargs):
|
| 154 |
+
"""
|
| 155 |
+
Creates a decorator for generating multimethods.
|
| 156 |
+
|
| 157 |
+
This function creates a decorator that can be used with an argument
|
| 158 |
+
extractor in order to generate a multimethod. Other than for the
|
| 159 |
+
argument extractor, all arguments are passed on to
|
| 160 |
+
:obj:`generate_multimethod`.
|
| 161 |
+
|
| 162 |
+
See Also
|
| 163 |
+
--------
|
| 164 |
+
generate_multimethod
|
| 165 |
+
Generates a multimethod.
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
def wrapper(a):
|
| 169 |
+
return generate_multimethod(a, *args, **kwargs)
|
| 170 |
+
|
| 171 |
+
return wrapper
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def generate_multimethod(
|
| 175 |
+
argument_extractor: ArgumentExtractorType,
|
| 176 |
+
argument_replacer: ArgumentReplacerType,
|
| 177 |
+
domain: str,
|
| 178 |
+
default: typing.Callable | None = None,
|
| 179 |
+
):
|
| 180 |
+
"""
|
| 181 |
+
Generates a multimethod.
|
| 182 |
+
|
| 183 |
+
Parameters
|
| 184 |
+
----------
|
| 185 |
+
argument_extractor : ArgumentExtractorType
|
| 186 |
+
A callable which extracts the dispatchable arguments. Extracted arguments
|
| 187 |
+
should be marked by the :obj:`Dispatchable` class. It has the same signature
|
| 188 |
+
as the desired multimethod.
|
| 189 |
+
argument_replacer : ArgumentReplacerType
|
| 190 |
+
A callable with the signature (args, kwargs, dispatchables), which should also
|
| 191 |
+
return an (args, kwargs) pair with the dispatchables replaced inside the
|
| 192 |
+
args/kwargs.
|
| 193 |
+
domain : str
|
| 194 |
+
A string value indicating the domain of this multimethod.
|
| 195 |
+
default: Optional[Callable], optional
|
| 196 |
+
The default implementation of this multimethod, where ``None`` (the default)
|
| 197 |
+
specifies there is no default implementation.
|
| 198 |
+
|
| 199 |
+
Examples
|
| 200 |
+
--------
|
| 201 |
+
In this example, ``a`` is to be dispatched over, so we return it, while marking it
|
| 202 |
+
as an ``int``.
|
| 203 |
+
The trailing comma is needed because the args have to be returned as an iterable.
|
| 204 |
+
|
| 205 |
+
>>> def override_me(a, b):
|
| 206 |
+
... return Dispatchable(a, int),
|
| 207 |
+
|
| 208 |
+
Next, we define the argument replacer that replaces the dispatchables inside
|
| 209 |
+
args/kwargs with the supplied ones.
|
| 210 |
+
|
| 211 |
+
>>> def override_replacer(args, kwargs, dispatchables):
|
| 212 |
+
... return (dispatchables[0], args[1]), {}
|
| 213 |
+
|
| 214 |
+
Next, we define the multimethod.
|
| 215 |
+
|
| 216 |
+
>>> overridden_me = generate_multimethod(
|
| 217 |
+
... override_me, override_replacer, "ua_examples"
|
| 218 |
+
... )
|
| 219 |
+
|
| 220 |
+
Notice that there's no default implementation, unless you supply one.
|
| 221 |
+
|
| 222 |
+
>>> overridden_me(1, "a")
|
| 223 |
+
Traceback (most recent call last):
|
| 224 |
+
...
|
| 225 |
+
uarray.BackendNotImplementedError: ...
|
| 226 |
+
|
| 227 |
+
>>> overridden_me2 = generate_multimethod(
|
| 228 |
+
... override_me, override_replacer, "ua_examples", default=lambda x, y: (x, y)
|
| 229 |
+
... )
|
| 230 |
+
>>> overridden_me2(1, "a")
|
| 231 |
+
(1, 'a')
|
| 232 |
+
|
| 233 |
+
See Also
|
| 234 |
+
--------
|
| 235 |
+
uarray
|
| 236 |
+
See the module documentation for how to override the method by creating
|
| 237 |
+
backends.
|
| 238 |
+
"""
|
| 239 |
+
kw_defaults, arg_defaults, opts = get_defaults(argument_extractor)
|
| 240 |
+
ua_func = _Function(
|
| 241 |
+
argument_extractor,
|
| 242 |
+
argument_replacer,
|
| 243 |
+
domain,
|
| 244 |
+
arg_defaults,
|
| 245 |
+
kw_defaults,
|
| 246 |
+
default,
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
return functools.update_wrapper(ua_func, argument_extractor)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def set_backend(backend, coerce=False, only=False):
|
| 253 |
+
"""
|
| 254 |
+
A context manager that sets the preferred backend.
|
| 255 |
+
|
| 256 |
+
Parameters
|
| 257 |
+
----------
|
| 258 |
+
backend
|
| 259 |
+
The backend to set.
|
| 260 |
+
coerce
|
| 261 |
+
Whether or not to coerce to a specific backend's types. Implies ``only``.
|
| 262 |
+
only
|
| 263 |
+
Whether or not this should be the last backend to try.
|
| 264 |
+
|
| 265 |
+
See Also
|
| 266 |
+
--------
|
| 267 |
+
skip_backend: A context manager that allows skipping of backends.
|
| 268 |
+
set_global_backend: Set a single, global backend for a domain.
|
| 269 |
+
"""
|
| 270 |
+
tid = threading.get_native_id()
|
| 271 |
+
try:
|
| 272 |
+
return backend.__ua_cache__[tid, "set", coerce, only]
|
| 273 |
+
except AttributeError:
|
| 274 |
+
backend.__ua_cache__ = {}
|
| 275 |
+
except KeyError:
|
| 276 |
+
pass
|
| 277 |
+
|
| 278 |
+
ctx = _SetBackendContext(backend, coerce, only)
|
| 279 |
+
backend.__ua_cache__[tid, "set", coerce, only] = ctx
|
| 280 |
+
return ctx
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def skip_backend(backend):
|
| 284 |
+
"""
|
| 285 |
+
A context manager that allows one to skip a given backend from processing
|
| 286 |
+
entirely. This allows one to use another backend's code in a library that
|
| 287 |
+
is also a consumer of the same backend.
|
| 288 |
+
|
| 289 |
+
Parameters
|
| 290 |
+
----------
|
| 291 |
+
backend
|
| 292 |
+
The backend to skip.
|
| 293 |
+
|
| 294 |
+
See Also
|
| 295 |
+
--------
|
| 296 |
+
set_backend: A context manager that allows setting of backends.
|
| 297 |
+
set_global_backend: Set a single, global backend for a domain.
|
| 298 |
+
"""
|
| 299 |
+
tid = threading.get_native_id()
|
| 300 |
+
try:
|
| 301 |
+
return backend.__ua_cache__[tid, "skip"]
|
| 302 |
+
except AttributeError:
|
| 303 |
+
backend.__ua_cache__ = {}
|
| 304 |
+
except KeyError:
|
| 305 |
+
pass
|
| 306 |
+
|
| 307 |
+
ctx = _SkipBackendContext(backend)
|
| 308 |
+
backend.__ua_cache__[tid, "skip"] = ctx
|
| 309 |
+
return ctx
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def get_defaults(f):
|
| 313 |
+
sig = inspect.signature(f)
|
| 314 |
+
kw_defaults = {}
|
| 315 |
+
arg_defaults = []
|
| 316 |
+
opts = set()
|
| 317 |
+
for k, v in sig.parameters.items():
|
| 318 |
+
if v.default is not inspect.Parameter.empty:
|
| 319 |
+
kw_defaults[k] = v.default
|
| 320 |
+
if v.kind in (
|
| 321 |
+
inspect.Parameter.POSITIONAL_ONLY,
|
| 322 |
+
inspect.Parameter.POSITIONAL_OR_KEYWORD,
|
| 323 |
+
):
|
| 324 |
+
arg_defaults.append(v.default)
|
| 325 |
+
opts.add(k)
|
| 326 |
+
|
| 327 |
+
return kw_defaults, tuple(arg_defaults), opts
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def set_global_backend(backend, coerce=False, only=False, *, try_last=False):
|
| 331 |
+
"""
|
| 332 |
+
This utility method replaces the default backend for permanent use. It
|
| 333 |
+
will be tried in the list of backends automatically, unless the
|
| 334 |
+
``only`` flag is set on a backend. This will be the first tried
|
| 335 |
+
backend outside the :obj:`set_backend` context manager.
|
| 336 |
+
|
| 337 |
+
Note that this method is not thread-safe.
|
| 338 |
+
|
| 339 |
+
.. warning::
|
| 340 |
+
We caution library authors against using this function in
|
| 341 |
+
their code. We do *not* support this use-case. This function
|
| 342 |
+
is meant to be used only by users themselves, or by a reference
|
| 343 |
+
implementation, if one exists.
|
| 344 |
+
|
| 345 |
+
Parameters
|
| 346 |
+
----------
|
| 347 |
+
backend
|
| 348 |
+
The backend to register.
|
| 349 |
+
coerce : bool
|
| 350 |
+
Whether to coerce input types when trying this backend.
|
| 351 |
+
only : bool
|
| 352 |
+
If ``True``, no more backends will be tried if this fails.
|
| 353 |
+
Implied by ``coerce=True``.
|
| 354 |
+
try_last : bool
|
| 355 |
+
If ``True``, the global backend is tried after registered backends.
|
| 356 |
+
|
| 357 |
+
See Also
|
| 358 |
+
--------
|
| 359 |
+
set_backend: A context manager that allows setting of backends.
|
| 360 |
+
skip_backend: A context manager that allows skipping of backends.
|
| 361 |
+
"""
|
| 362 |
+
_uarray.set_global_backend(backend, coerce, only, try_last)
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def register_backend(backend):
|
| 366 |
+
"""
|
| 367 |
+
This utility method sets registers backend for permanent use. It
|
| 368 |
+
will be tried in the list of backends automatically, unless the
|
| 369 |
+
``only`` flag is set on a backend.
|
| 370 |
+
|
| 371 |
+
Note that this method is not thread-safe.
|
| 372 |
+
|
| 373 |
+
Parameters
|
| 374 |
+
----------
|
| 375 |
+
backend
|
| 376 |
+
The backend to register.
|
| 377 |
+
"""
|
| 378 |
+
_uarray.register_backend(backend)
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
def clear_backends(domain, registered=True, globals=False):
|
| 382 |
+
"""
|
| 383 |
+
This utility method clears registered backends.
|
| 384 |
+
|
| 385 |
+
.. warning::
|
| 386 |
+
We caution library authors against using this function in
|
| 387 |
+
their code. We do *not* support this use-case. This function
|
| 388 |
+
is meant to be used only by users themselves.
|
| 389 |
+
|
| 390 |
+
.. warning::
|
| 391 |
+
Do NOT use this method inside a multimethod call, or the
|
| 392 |
+
program is likely to crash.
|
| 393 |
+
|
| 394 |
+
Parameters
|
| 395 |
+
----------
|
| 396 |
+
domain : Optional[str]
|
| 397 |
+
The domain for which to de-register backends. ``None`` means
|
| 398 |
+
de-register for all domains.
|
| 399 |
+
registered : bool
|
| 400 |
+
Whether or not to clear registered backends. See :obj:`register_backend`.
|
| 401 |
+
globals : bool
|
| 402 |
+
Whether or not to clear global backends. See :obj:`set_global_backend`.
|
| 403 |
+
|
| 404 |
+
See Also
|
| 405 |
+
--------
|
| 406 |
+
register_backend : Register a backend globally.
|
| 407 |
+
set_global_backend : Set a global backend.
|
| 408 |
+
"""
|
| 409 |
+
_uarray.clear_backends(domain, registered, globals)
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
class Dispatchable:
|
| 413 |
+
"""
|
| 414 |
+
A utility class which marks an argument with a specific dispatch type.
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
Attributes
|
| 418 |
+
----------
|
| 419 |
+
value
|
| 420 |
+
The value of the Dispatchable.
|
| 421 |
+
|
| 422 |
+
type
|
| 423 |
+
The type of the Dispatchable.
|
| 424 |
+
|
| 425 |
+
Examples
|
| 426 |
+
--------
|
| 427 |
+
>>> x = Dispatchable(1, str)
|
| 428 |
+
>>> x
|
| 429 |
+
<Dispatchable: type=<class 'str'>, value=1>
|
| 430 |
+
|
| 431 |
+
See Also
|
| 432 |
+
--------
|
| 433 |
+
all_of_type
|
| 434 |
+
Marks all unmarked parameters of a function.
|
| 435 |
+
|
| 436 |
+
mark_as
|
| 437 |
+
Allows one to create a utility function to mark as a given type.
|
| 438 |
+
"""
|
| 439 |
+
|
| 440 |
+
def __init__(self, value, dispatch_type, coercible=True):
|
| 441 |
+
self.value = value
|
| 442 |
+
self.type = dispatch_type
|
| 443 |
+
self.coercible = coercible
|
| 444 |
+
|
| 445 |
+
def __getitem__(self, index):
|
| 446 |
+
return (self.type, self.value)[index]
|
| 447 |
+
|
| 448 |
+
def __str__(self):
|
| 449 |
+
return f"<{type(self).__name__}: type={self.type!r}, value={self.value!r}>"
|
| 450 |
+
|
| 451 |
+
__repr__ = __str__
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def mark_as(dispatch_type):
|
| 455 |
+
"""
|
| 456 |
+
Creates a utility function to mark something as a specific type.
|
| 457 |
+
|
| 458 |
+
Examples
|
| 459 |
+
--------
|
| 460 |
+
>>> mark_int = mark_as(int)
|
| 461 |
+
>>> mark_int(1)
|
| 462 |
+
<Dispatchable: type=<class 'int'>, value=1>
|
| 463 |
+
"""
|
| 464 |
+
return functools.partial(Dispatchable, dispatch_type=dispatch_type)
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
def all_of_type(arg_type):
|
| 468 |
+
"""
|
| 469 |
+
Marks all unmarked arguments as a given type.
|
| 470 |
+
|
| 471 |
+
Examples
|
| 472 |
+
--------
|
| 473 |
+
>>> @all_of_type(str)
|
| 474 |
+
... def f(a, b):
|
| 475 |
+
... return a, Dispatchable(b, int)
|
| 476 |
+
>>> f('a', 1)
|
| 477 |
+
(<Dispatchable: type=<class 'str'>, value='a'>,
|
| 478 |
+
<Dispatchable: type=<class 'int'>, value=1>)
|
| 479 |
+
"""
|
| 480 |
+
|
| 481 |
+
def outer(func):
|
| 482 |
+
@functools.wraps(func)
|
| 483 |
+
def inner(*args, **kwargs):
|
| 484 |
+
extracted_args = func(*args, **kwargs)
|
| 485 |
+
return tuple(
|
| 486 |
+
Dispatchable(arg, arg_type)
|
| 487 |
+
if not isinstance(arg, Dispatchable)
|
| 488 |
+
else arg
|
| 489 |
+
for arg in extracted_args
|
| 490 |
+
)
|
| 491 |
+
|
| 492 |
+
return inner
|
| 493 |
+
|
| 494 |
+
return outer
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
def wrap_single_convertor(convert_single):
|
| 498 |
+
"""
|
| 499 |
+
Wraps a ``__ua_convert__`` defined for a single element to all elements.
|
| 500 |
+
If any of them return ``NotImplemented``, the operation is assumed to be
|
| 501 |
+
undefined.
|
| 502 |
+
|
| 503 |
+
Accepts a signature of (value, type, coerce).
|
| 504 |
+
"""
|
| 505 |
+
|
| 506 |
+
@functools.wraps(convert_single)
|
| 507 |
+
def __ua_convert__(dispatchables, coerce):
|
| 508 |
+
converted = []
|
| 509 |
+
for d in dispatchables:
|
| 510 |
+
c = convert_single(d.value, d.type, coerce and d.coercible)
|
| 511 |
+
|
| 512 |
+
if c is NotImplemented:
|
| 513 |
+
return NotImplemented
|
| 514 |
+
|
| 515 |
+
converted.append(c)
|
| 516 |
+
|
| 517 |
+
return converted
|
| 518 |
+
|
| 519 |
+
return __ua_convert__
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
def wrap_single_convertor_instance(convert_single):
|
| 523 |
+
"""
|
| 524 |
+
Wraps a ``__ua_convert__`` defined for a single element to all elements.
|
| 525 |
+
If any of them return ``NotImplemented``, the operation is assumed to be
|
| 526 |
+
undefined.
|
| 527 |
+
|
| 528 |
+
Accepts a signature of (value, type, coerce).
|
| 529 |
+
"""
|
| 530 |
+
|
| 531 |
+
@functools.wraps(convert_single)
|
| 532 |
+
def __ua_convert__(self, dispatchables, coerce):
|
| 533 |
+
converted = []
|
| 534 |
+
for d in dispatchables:
|
| 535 |
+
c = convert_single(self, d.value, d.type, coerce and d.coercible)
|
| 536 |
+
|
| 537 |
+
if c is NotImplemented:
|
| 538 |
+
return NotImplemented
|
| 539 |
+
|
| 540 |
+
converted.append(c)
|
| 541 |
+
|
| 542 |
+
return converted
|
| 543 |
+
|
| 544 |
+
return __ua_convert__
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
def determine_backend(value, dispatch_type, *, domain, only=True, coerce=False):
|
| 548 |
+
"""Set the backend to the first active backend that supports ``value``
|
| 549 |
+
|
| 550 |
+
This is useful for functions that call multimethods without any dispatchable
|
| 551 |
+
arguments. You can use :func:`determine_backend` to ensure the same backend
|
| 552 |
+
is used everywhere in a block of multimethod calls.
|
| 553 |
+
|
| 554 |
+
Parameters
|
| 555 |
+
----------
|
| 556 |
+
value
|
| 557 |
+
The value being tested
|
| 558 |
+
dispatch_type
|
| 559 |
+
The dispatch type associated with ``value``, aka
|
| 560 |
+
":ref:`marking <MarkingGlossary>`".
|
| 561 |
+
domain: string
|
| 562 |
+
The domain to query for backends and set.
|
| 563 |
+
coerce: bool
|
| 564 |
+
Whether or not to allow coercion to the backend's types. Implies ``only``.
|
| 565 |
+
only: bool
|
| 566 |
+
Whether or not this should be the last backend to try.
|
| 567 |
+
|
| 568 |
+
See Also
|
| 569 |
+
--------
|
| 570 |
+
set_backend: For when you know which backend to set
|
| 571 |
+
|
| 572 |
+
Notes
|
| 573 |
+
-----
|
| 574 |
+
|
| 575 |
+
Support is determined by the ``__ua_convert__`` protocol. Backends not
|
| 576 |
+
supporting the type must return ``NotImplemented`` from their
|
| 577 |
+
``__ua_convert__`` if they don't support input of that type.
|
| 578 |
+
|
| 579 |
+
Examples
|
| 580 |
+
--------
|
| 581 |
+
|
| 582 |
+
Suppose we have two backends ``BackendA`` and ``BackendB`` each supporting
|
| 583 |
+
different types, ``TypeA`` and ``TypeB``. Neither supporting the other type:
|
| 584 |
+
|
| 585 |
+
>>> with ua.set_backend(ex.BackendA):
|
| 586 |
+
... ex.call_multimethod(ex.TypeB(), ex.TypeB())
|
| 587 |
+
Traceback (most recent call last):
|
| 588 |
+
...
|
| 589 |
+
uarray.BackendNotImplementedError: ...
|
| 590 |
+
|
| 591 |
+
Now consider a multimethod that creates a new object of ``TypeA``, or
|
| 592 |
+
``TypeB`` depending on the active backend.
|
| 593 |
+
|
| 594 |
+
>>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB):
|
| 595 |
+
... res = ex.creation_multimethod()
|
| 596 |
+
... ex.call_multimethod(res, ex.TypeA())
|
| 597 |
+
Traceback (most recent call last):
|
| 598 |
+
...
|
| 599 |
+
uarray.BackendNotImplementedError: ...
|
| 600 |
+
|
| 601 |
+
``res`` is an object of ``TypeB`` because ``BackendB`` is set in the
|
| 602 |
+
innermost with statement. So, ``call_multimethod`` fails since the types
|
| 603 |
+
don't match.
|
| 604 |
+
|
| 605 |
+
Instead, we need to first find a backend suitable for all of our objects.
|
| 606 |
+
|
| 607 |
+
>>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB):
|
| 608 |
+
... x = ex.TypeA()
|
| 609 |
+
... with ua.determine_backend(x, "mark", domain="ua_examples"):
|
| 610 |
+
... res = ex.creation_multimethod()
|
| 611 |
+
... ex.call_multimethod(res, x)
|
| 612 |
+
TypeA
|
| 613 |
+
|
| 614 |
+
"""
|
| 615 |
+
dispatchables = (Dispatchable(value, dispatch_type, coerce),)
|
| 616 |
+
backend = _uarray.determine_backend(domain, dispatchables, coerce)
|
| 617 |
+
|
| 618 |
+
return set_backend(backend, coerce=coerce, only=only)
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
def determine_backend_multi(
|
| 622 |
+
dispatchables, *, domain, only=True, coerce=False, **kwargs
|
| 623 |
+
):
|
| 624 |
+
"""Set a backend supporting all ``dispatchables``
|
| 625 |
+
|
| 626 |
+
This is useful for functions that call multimethods without any dispatchable
|
| 627 |
+
arguments. You can use :func:`determine_backend_multi` to ensure the same
|
| 628 |
+
backend is used everywhere in a block of multimethod calls involving
|
| 629 |
+
multiple arrays.
|
| 630 |
+
|
| 631 |
+
Parameters
|
| 632 |
+
----------
|
| 633 |
+
dispatchables: Sequence[Union[uarray.Dispatchable, Any]]
|
| 634 |
+
The dispatchables that must be supported
|
| 635 |
+
domain: string
|
| 636 |
+
The domain to query for backends and set.
|
| 637 |
+
coerce: bool
|
| 638 |
+
Whether or not to allow coercion to the backend's types. Implies ``only``.
|
| 639 |
+
only: bool
|
| 640 |
+
Whether or not this should be the last backend to try.
|
| 641 |
+
dispatch_type: Optional[Any]
|
| 642 |
+
The default dispatch type associated with ``dispatchables``, aka
|
| 643 |
+
":ref:`marking <MarkingGlossary>`".
|
| 644 |
+
|
| 645 |
+
See Also
|
| 646 |
+
--------
|
| 647 |
+
determine_backend: For a single dispatch value
|
| 648 |
+
set_backend: For when you know which backend to set
|
| 649 |
+
|
| 650 |
+
Notes
|
| 651 |
+
-----
|
| 652 |
+
|
| 653 |
+
Support is determined by the ``__ua_convert__`` protocol. Backends not
|
| 654 |
+
supporting the type must return ``NotImplemented`` from their
|
| 655 |
+
``__ua_convert__`` if they don't support input of that type.
|
| 656 |
+
|
| 657 |
+
Examples
|
| 658 |
+
--------
|
| 659 |
+
|
| 660 |
+
:func:`determine_backend` allows the backend to be set from a single
|
| 661 |
+
object. :func:`determine_backend_multi` allows multiple objects to be
|
| 662 |
+
checked simultaneously for support in the backend. Suppose we have a
|
| 663 |
+
``BackendAB`` which supports ``TypeA`` and ``TypeB`` in the same call,
|
| 664 |
+
and a ``BackendBC`` that doesn't support ``TypeA``.
|
| 665 |
+
|
| 666 |
+
>>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC):
|
| 667 |
+
... a, b = ex.TypeA(), ex.TypeB()
|
| 668 |
+
... with ua.determine_backend_multi(
|
| 669 |
+
... [ua.Dispatchable(a, "mark"), ua.Dispatchable(b, "mark")],
|
| 670 |
+
... domain="ua_examples"
|
| 671 |
+
... ):
|
| 672 |
+
... res = ex.creation_multimethod()
|
| 673 |
+
... ex.call_multimethod(res, a, b)
|
| 674 |
+
TypeA
|
| 675 |
+
|
| 676 |
+
This won't call ``BackendBC`` because it doesn't support ``TypeA``.
|
| 677 |
+
|
| 678 |
+
We can also use leave out the ``ua.Dispatchable`` if we specify the
|
| 679 |
+
default ``dispatch_type`` for the ``dispatchables`` argument.
|
| 680 |
+
|
| 681 |
+
>>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC):
|
| 682 |
+
... a, b = ex.TypeA(), ex.TypeB()
|
| 683 |
+
... with ua.determine_backend_multi(
|
| 684 |
+
... [a, b], dispatch_type="mark", domain="ua_examples"
|
| 685 |
+
... ):
|
| 686 |
+
... res = ex.creation_multimethod()
|
| 687 |
+
... ex.call_multimethod(res, a, b)
|
| 688 |
+
TypeA
|
| 689 |
+
|
| 690 |
+
"""
|
| 691 |
+
if "dispatch_type" in kwargs:
|
| 692 |
+
disp_type = kwargs.pop("dispatch_type")
|
| 693 |
+
dispatchables = tuple(
|
| 694 |
+
d if isinstance(d, Dispatchable) else Dispatchable(d, disp_type)
|
| 695 |
+
for d in dispatchables
|
| 696 |
+
)
|
| 697 |
+
else:
|
| 698 |
+
dispatchables = tuple(dispatchables)
|
| 699 |
+
if not all(isinstance(d, Dispatchable) for d in dispatchables):
|
| 700 |
+
raise TypeError("dispatchables must be instances of uarray.Dispatchable")
|
| 701 |
+
|
| 702 |
+
if len(kwargs) != 0:
|
| 703 |
+
raise TypeError(f"Received unexpected keyword arguments: {kwargs}")
|
| 704 |
+
|
| 705 |
+
backend = _uarray.determine_backend(domain, dispatchables, coerce)
|
| 706 |
+
|
| 707 |
+
return set_backend(backend, coerce=coerce, only=only)
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/__init__.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NumPy Array API compatibility library
|
| 3 |
+
|
| 4 |
+
This is a small wrapper around NumPy, CuPy, JAX, sparse and others that are
|
| 5 |
+
compatible with the Array API standard https://data-apis.org/array-api/latest/.
|
| 6 |
+
See also NEP 47 https://numpy.org/neps/nep-0047-array-api-standard.html.
|
| 7 |
+
|
| 8 |
+
Unlike array_api_strict, this is not a strict minimal implementation of the
|
| 9 |
+
Array API, but rather just an extension of the main NumPy namespace with
|
| 10 |
+
changes needed to be compliant with the Array API. See
|
| 11 |
+
https://numpy.org/doc/stable/reference/array_api.html for a full list of
|
| 12 |
+
changes. In particular, unlike array_api_strict, this package does not use a
|
| 13 |
+
separate Array object, but rather just uses numpy.ndarray directly.
|
| 14 |
+
|
| 15 |
+
Library authors using the Array API may wish to test against array_api_strict
|
| 16 |
+
to ensure they are not using functionality outside of the standard, but prefer
|
| 17 |
+
this implementation for the default when working with NumPy arrays.
|
| 18 |
+
|
| 19 |
+
"""
|
| 20 |
+
__version__ = '1.13.0'
|
| 21 |
+
|
| 22 |
+
from .common import * # noqa: F401, F403
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (1.22 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-312.pyc
ADDED
|
Binary file (3.28 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/_internal.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Internal helpers
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import importlib
|
| 6 |
+
from collections.abc import Callable
|
| 7 |
+
from functools import wraps
|
| 8 |
+
from inspect import signature
|
| 9 |
+
from types import ModuleType
|
| 10 |
+
from typing import TypeVar
|
| 11 |
+
|
| 12 |
+
_T = TypeVar("_T")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def get_xp(xp: ModuleType) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
|
| 16 |
+
"""
|
| 17 |
+
Decorator to automatically replace xp with the corresponding array module.
|
| 18 |
+
|
| 19 |
+
Use like
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
|
| 23 |
+
@get_xp(np)
|
| 24 |
+
def func(x, /, xp, kwarg=None):
|
| 25 |
+
return xp.func(x, kwarg=kwarg)
|
| 26 |
+
|
| 27 |
+
Note that xp must be a keyword argument and come after all non-keyword
|
| 28 |
+
arguments.
|
| 29 |
+
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def inner(f: Callable[..., _T], /) -> Callable[..., _T]:
|
| 33 |
+
@wraps(f)
|
| 34 |
+
def wrapped_f(*args: object, **kwargs: object) -> object:
|
| 35 |
+
return f(*args, xp=xp, **kwargs)
|
| 36 |
+
|
| 37 |
+
sig = signature(f)
|
| 38 |
+
new_sig = sig.replace(
|
| 39 |
+
parameters=[par for i, par in sig.parameters.items() if i != "xp"]
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
if wrapped_f.__doc__ is None:
|
| 43 |
+
wrapped_f.__doc__ = f"""\
|
| 44 |
+
Array API compatibility wrapper for {f.__name__}.
|
| 45 |
+
|
| 46 |
+
See the corresponding documentation in NumPy/CuPy and/or the array API
|
| 47 |
+
specification for more details.
|
| 48 |
+
|
| 49 |
+
"""
|
| 50 |
+
wrapped_f.__signature__ = new_sig # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue]
|
| 51 |
+
return wrapped_f # type: ignore[return-value] # pyright: ignore[reportReturnType]
|
| 52 |
+
|
| 53 |
+
return inner
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def clone_module(mod_name: str, globals_: dict[str, object]) -> list[str]:
|
| 57 |
+
"""Import everything from module, updating globals().
|
| 58 |
+
Returns __all__.
|
| 59 |
+
"""
|
| 60 |
+
mod = importlib.import_module(mod_name)
|
| 61 |
+
# Neither of these two methods is sufficient by itself,
|
| 62 |
+
# depending on various idiosyncrasies of the libraries we're wrapping.
|
| 63 |
+
objs = {}
|
| 64 |
+
exec(f"from {mod.__name__} import *", objs)
|
| 65 |
+
|
| 66 |
+
for n in dir(mod):
|
| 67 |
+
if not n.startswith("_") and hasattr(mod, n):
|
| 68 |
+
objs[n] = getattr(mod, n)
|
| 69 |
+
|
| 70 |
+
globals_.update(objs)
|
| 71 |
+
return list(objs)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
__all__ = ["get_xp", "clone_module"]
|
| 75 |
+
|
| 76 |
+
def __dir__() -> list[str]:
|
| 77 |
+
return __all__
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from ._helpers import * # noqa: F403
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (256 Bytes). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-312.pyc
ADDED
|
Binary file (21.2 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_fft.cpython-312.pyc
ADDED
|
Binary file (7.46 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-312.pyc
ADDED
|
Binary file (32.4 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-312.pyc
ADDED
|
Binary file (9.65 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-312.pyc
ADDED
|
Binary file (6.02 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/_aliases.py
ADDED
|
@@ -0,0 +1,702 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
These are functions that are just aliases of existing functions in NumPy.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
import inspect
|
| 8 |
+
from collections.abc import Sequence
|
| 9 |
+
from typing import TYPE_CHECKING, Any, NamedTuple, cast
|
| 10 |
+
|
| 11 |
+
from ._helpers import _check_device, array_namespace
|
| 12 |
+
from ._helpers import device as _get_device
|
| 13 |
+
from ._helpers import is_cupy_namespace
|
| 14 |
+
from ._typing import Array, Device, DType, Namespace
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
# TODO: import from typing (requires Python >=3.13)
|
| 18 |
+
from typing_extensions import TypeIs
|
| 19 |
+
|
| 20 |
+
# These functions are modified from the NumPy versions.
|
| 21 |
+
|
| 22 |
+
# Creation functions add the device keyword (which does nothing for NumPy and Dask)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def arange(
|
| 26 |
+
start: float,
|
| 27 |
+
/,
|
| 28 |
+
stop: float | None = None,
|
| 29 |
+
step: float = 1,
|
| 30 |
+
*,
|
| 31 |
+
xp: Namespace,
|
| 32 |
+
dtype: DType | None = None,
|
| 33 |
+
device: Device | None = None,
|
| 34 |
+
**kwargs: object,
|
| 35 |
+
) -> Array:
|
| 36 |
+
_check_device(xp, device)
|
| 37 |
+
return xp.arange(start, stop=stop, step=step, dtype=dtype, **kwargs)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def empty(
|
| 41 |
+
shape: int | tuple[int, ...],
|
| 42 |
+
xp: Namespace,
|
| 43 |
+
*,
|
| 44 |
+
dtype: DType | None = None,
|
| 45 |
+
device: Device | None = None,
|
| 46 |
+
**kwargs: object,
|
| 47 |
+
) -> Array:
|
| 48 |
+
_check_device(xp, device)
|
| 49 |
+
return xp.empty(shape, dtype=dtype, **kwargs)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def empty_like(
|
| 53 |
+
x: Array,
|
| 54 |
+
/,
|
| 55 |
+
xp: Namespace,
|
| 56 |
+
*,
|
| 57 |
+
dtype: DType | None = None,
|
| 58 |
+
device: Device | None = None,
|
| 59 |
+
**kwargs: object,
|
| 60 |
+
) -> Array:
|
| 61 |
+
_check_device(xp, device)
|
| 62 |
+
return xp.empty_like(x, dtype=dtype, **kwargs)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def eye(
|
| 66 |
+
n_rows: int,
|
| 67 |
+
n_cols: int | None = None,
|
| 68 |
+
/,
|
| 69 |
+
*,
|
| 70 |
+
xp: Namespace,
|
| 71 |
+
k: int = 0,
|
| 72 |
+
dtype: DType | None = None,
|
| 73 |
+
device: Device | None = None,
|
| 74 |
+
**kwargs: object,
|
| 75 |
+
) -> Array:
|
| 76 |
+
_check_device(xp, device)
|
| 77 |
+
return xp.eye(n_rows, M=n_cols, k=k, dtype=dtype, **kwargs)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def full(
|
| 81 |
+
shape: int | tuple[int, ...],
|
| 82 |
+
fill_value: complex,
|
| 83 |
+
xp: Namespace,
|
| 84 |
+
*,
|
| 85 |
+
dtype: DType | None = None,
|
| 86 |
+
device: Device | None = None,
|
| 87 |
+
**kwargs: object,
|
| 88 |
+
) -> Array:
|
| 89 |
+
_check_device(xp, device)
|
| 90 |
+
return xp.full(shape, fill_value, dtype=dtype, **kwargs)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def full_like(
|
| 94 |
+
x: Array,
|
| 95 |
+
/,
|
| 96 |
+
fill_value: complex,
|
| 97 |
+
*,
|
| 98 |
+
xp: Namespace,
|
| 99 |
+
dtype: DType | None = None,
|
| 100 |
+
device: Device | None = None,
|
| 101 |
+
**kwargs: object,
|
| 102 |
+
) -> Array:
|
| 103 |
+
_check_device(xp, device)
|
| 104 |
+
return xp.full_like(x, fill_value, dtype=dtype, **kwargs)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def linspace(
|
| 108 |
+
start: float,
|
| 109 |
+
stop: float,
|
| 110 |
+
/,
|
| 111 |
+
num: int,
|
| 112 |
+
*,
|
| 113 |
+
xp: Namespace,
|
| 114 |
+
dtype: DType | None = None,
|
| 115 |
+
device: Device | None = None,
|
| 116 |
+
endpoint: bool = True,
|
| 117 |
+
**kwargs: object,
|
| 118 |
+
) -> Array:
|
| 119 |
+
_check_device(xp, device)
|
| 120 |
+
return xp.linspace(start, stop, num, dtype=dtype, endpoint=endpoint, **kwargs)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def ones(
|
| 124 |
+
shape: int | tuple[int, ...],
|
| 125 |
+
xp: Namespace,
|
| 126 |
+
*,
|
| 127 |
+
dtype: DType | None = None,
|
| 128 |
+
device: Device | None = None,
|
| 129 |
+
**kwargs: object,
|
| 130 |
+
) -> Array:
|
| 131 |
+
_check_device(xp, device)
|
| 132 |
+
return xp.ones(shape, dtype=dtype, **kwargs)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def ones_like(
|
| 136 |
+
x: Array,
|
| 137 |
+
/,
|
| 138 |
+
xp: Namespace,
|
| 139 |
+
*,
|
| 140 |
+
dtype: DType | None = None,
|
| 141 |
+
device: Device | None = None,
|
| 142 |
+
**kwargs: object,
|
| 143 |
+
) -> Array:
|
| 144 |
+
_check_device(xp, device)
|
| 145 |
+
return xp.ones_like(x, dtype=dtype, **kwargs)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def zeros(
|
| 149 |
+
shape: int | tuple[int, ...],
|
| 150 |
+
xp: Namespace,
|
| 151 |
+
*,
|
| 152 |
+
dtype: DType | None = None,
|
| 153 |
+
device: Device | None = None,
|
| 154 |
+
**kwargs: object,
|
| 155 |
+
) -> Array:
|
| 156 |
+
_check_device(xp, device)
|
| 157 |
+
return xp.zeros(shape, dtype=dtype, **kwargs)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def zeros_like(
|
| 161 |
+
x: Array,
|
| 162 |
+
/,
|
| 163 |
+
xp: Namespace,
|
| 164 |
+
*,
|
| 165 |
+
dtype: DType | None = None,
|
| 166 |
+
device: Device | None = None,
|
| 167 |
+
**kwargs: object,
|
| 168 |
+
) -> Array:
|
| 169 |
+
_check_device(xp, device)
|
| 170 |
+
return xp.zeros_like(x, dtype=dtype, **kwargs)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# np.unique() is split into four functions in the array API:
|
| 174 |
+
# unique_all, unique_counts, unique_inverse, and unique_values (this is done
|
| 175 |
+
# to remove polymorphic return types).
|
| 176 |
+
|
| 177 |
+
# The functions here return namedtuples (np.unique() returns a normal
|
| 178 |
+
# tuple).
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
# Note that these named tuples aren't actually part of the standard namespace,
|
| 182 |
+
# but I don't see any issue with exporting the names here regardless.
|
| 183 |
+
class UniqueAllResult(NamedTuple):
|
| 184 |
+
values: Array
|
| 185 |
+
indices: Array
|
| 186 |
+
inverse_indices: Array
|
| 187 |
+
counts: Array
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class UniqueCountsResult(NamedTuple):
|
| 191 |
+
values: Array
|
| 192 |
+
counts: Array
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
class UniqueInverseResult(NamedTuple):
|
| 196 |
+
values: Array
|
| 197 |
+
inverse_indices: Array
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def _unique_kwargs(xp: Namespace) -> dict[str, bool]:
|
| 201 |
+
# Older versions of NumPy and CuPy do not have equal_nan. Rather than
|
| 202 |
+
# trying to parse version numbers, just check if equal_nan is in the
|
| 203 |
+
# signature.
|
| 204 |
+
s = inspect.signature(xp.unique)
|
| 205 |
+
if "equal_nan" in s.parameters:
|
| 206 |
+
return {"equal_nan": False}
|
| 207 |
+
return {}
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def unique_all(x: Array, /, xp: Namespace) -> UniqueAllResult:
|
| 211 |
+
kwargs = _unique_kwargs(xp)
|
| 212 |
+
values, indices, inverse_indices, counts = xp.unique(
|
| 213 |
+
x,
|
| 214 |
+
return_counts=True,
|
| 215 |
+
return_index=True,
|
| 216 |
+
return_inverse=True,
|
| 217 |
+
**kwargs,
|
| 218 |
+
)
|
| 219 |
+
# np.unique() flattens inverse indices, but they need to share x's shape
|
| 220 |
+
# See https://github.com/numpy/numpy/issues/20638
|
| 221 |
+
inverse_indices = inverse_indices.reshape(x.shape)
|
| 222 |
+
return UniqueAllResult(
|
| 223 |
+
values,
|
| 224 |
+
indices,
|
| 225 |
+
inverse_indices,
|
| 226 |
+
counts,
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def unique_counts(x: Array, /, xp: Namespace) -> UniqueCountsResult:
|
| 231 |
+
kwargs = _unique_kwargs(xp)
|
| 232 |
+
res = xp.unique(
|
| 233 |
+
x, return_counts=True, return_index=False, return_inverse=False, **kwargs
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
return UniqueCountsResult(*res)
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def unique_inverse(x: Array, /, xp: Namespace) -> UniqueInverseResult:
|
| 240 |
+
kwargs = _unique_kwargs(xp)
|
| 241 |
+
values, inverse_indices = xp.unique(
|
| 242 |
+
x,
|
| 243 |
+
return_counts=False,
|
| 244 |
+
return_index=False,
|
| 245 |
+
return_inverse=True,
|
| 246 |
+
**kwargs,
|
| 247 |
+
)
|
| 248 |
+
# xp.unique() flattens inverse indices, but they need to share x's shape
|
| 249 |
+
# See https://github.com/numpy/numpy/issues/20638
|
| 250 |
+
inverse_indices = inverse_indices.reshape(x.shape)
|
| 251 |
+
return UniqueInverseResult(values, inverse_indices)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def unique_values(x: Array, /, xp: Namespace) -> Array:
|
| 255 |
+
kwargs = _unique_kwargs(xp)
|
| 256 |
+
return xp.unique(
|
| 257 |
+
x,
|
| 258 |
+
return_counts=False,
|
| 259 |
+
return_index=False,
|
| 260 |
+
return_inverse=False,
|
| 261 |
+
**kwargs,
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
# These functions have different keyword argument names
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def std(
|
| 269 |
+
x: Array,
|
| 270 |
+
/,
|
| 271 |
+
xp: Namespace,
|
| 272 |
+
*,
|
| 273 |
+
axis: int | tuple[int, ...] | None = None,
|
| 274 |
+
correction: float = 0.0, # correction instead of ddof
|
| 275 |
+
keepdims: bool = False,
|
| 276 |
+
**kwargs: object,
|
| 277 |
+
) -> Array:
|
| 278 |
+
return xp.std(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def var(
|
| 282 |
+
x: Array,
|
| 283 |
+
/,
|
| 284 |
+
xp: Namespace,
|
| 285 |
+
*,
|
| 286 |
+
axis: int | tuple[int, ...] | None = None,
|
| 287 |
+
correction: float = 0.0, # correction instead of ddof
|
| 288 |
+
keepdims: bool = False,
|
| 289 |
+
**kwargs: object,
|
| 290 |
+
) -> Array:
|
| 291 |
+
return xp.var(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
# cumulative_sum is renamed from cumsum, and adds the include_initial keyword
|
| 295 |
+
# argument
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def cumulative_sum(
|
| 299 |
+
x: Array,
|
| 300 |
+
/,
|
| 301 |
+
xp: Namespace,
|
| 302 |
+
*,
|
| 303 |
+
axis: int | None = None,
|
| 304 |
+
dtype: DType | None = None,
|
| 305 |
+
include_initial: bool = False,
|
| 306 |
+
**kwargs: object,
|
| 307 |
+
) -> Array:
|
| 308 |
+
wrapped_xp = array_namespace(x)
|
| 309 |
+
|
| 310 |
+
# TODO: The standard is not clear about what should happen when x.ndim == 0.
|
| 311 |
+
if axis is None:
|
| 312 |
+
if x.ndim > 1:
|
| 313 |
+
raise ValueError(
|
| 314 |
+
"axis must be specified in cumulative_sum for more than one dimension"
|
| 315 |
+
)
|
| 316 |
+
axis = 0
|
| 317 |
+
|
| 318 |
+
res = xp.cumsum(x, axis=axis, dtype=dtype, **kwargs)
|
| 319 |
+
|
| 320 |
+
# np.cumsum does not support include_initial
|
| 321 |
+
if include_initial:
|
| 322 |
+
initial_shape = list(x.shape)
|
| 323 |
+
initial_shape[axis] = 1
|
| 324 |
+
res = xp.concatenate(
|
| 325 |
+
[
|
| 326 |
+
wrapped_xp.zeros(
|
| 327 |
+
shape=initial_shape, dtype=res.dtype, device=_get_device(res)
|
| 328 |
+
),
|
| 329 |
+
res,
|
| 330 |
+
],
|
| 331 |
+
axis=axis,
|
| 332 |
+
)
|
| 333 |
+
return res
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
def cumulative_prod(
|
| 337 |
+
x: Array,
|
| 338 |
+
/,
|
| 339 |
+
xp: Namespace,
|
| 340 |
+
*,
|
| 341 |
+
axis: int | None = None,
|
| 342 |
+
dtype: DType | None = None,
|
| 343 |
+
include_initial: bool = False,
|
| 344 |
+
**kwargs: object,
|
| 345 |
+
) -> Array:
|
| 346 |
+
wrapped_xp = array_namespace(x)
|
| 347 |
+
|
| 348 |
+
if axis is None:
|
| 349 |
+
if x.ndim > 1:
|
| 350 |
+
raise ValueError(
|
| 351 |
+
"axis must be specified in cumulative_prod for more than one dimension"
|
| 352 |
+
)
|
| 353 |
+
axis = 0
|
| 354 |
+
|
| 355 |
+
res = xp.cumprod(x, axis=axis, dtype=dtype, **kwargs)
|
| 356 |
+
|
| 357 |
+
# np.cumprod does not support include_initial
|
| 358 |
+
if include_initial:
|
| 359 |
+
initial_shape = list(x.shape)
|
| 360 |
+
initial_shape[axis] = 1
|
| 361 |
+
res = xp.concatenate(
|
| 362 |
+
[
|
| 363 |
+
wrapped_xp.ones(
|
| 364 |
+
shape=initial_shape, dtype=res.dtype, device=_get_device(res)
|
| 365 |
+
),
|
| 366 |
+
res,
|
| 367 |
+
],
|
| 368 |
+
axis=axis,
|
| 369 |
+
)
|
| 370 |
+
return res
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
# The min and max argument names in clip are different and not optional in numpy, and type
|
| 374 |
+
# promotion behavior is different.
|
| 375 |
+
def clip(
|
| 376 |
+
x: Array,
|
| 377 |
+
/,
|
| 378 |
+
min: float | Array | None = None,
|
| 379 |
+
max: float | Array | None = None,
|
| 380 |
+
*,
|
| 381 |
+
xp: Namespace,
|
| 382 |
+
# TODO: np.clip has other ufunc kwargs
|
| 383 |
+
out: Array | None = None,
|
| 384 |
+
) -> Array:
|
| 385 |
+
def _isscalar(a: object) -> TypeIs[float | None]:
|
| 386 |
+
return isinstance(a, int | float) or a is None
|
| 387 |
+
|
| 388 |
+
min_shape = () if _isscalar(min) else min.shape
|
| 389 |
+
max_shape = () if _isscalar(max) else max.shape
|
| 390 |
+
|
| 391 |
+
wrapped_xp = array_namespace(x)
|
| 392 |
+
|
| 393 |
+
result_shape = xp.broadcast_shapes(x.shape, min_shape, max_shape)
|
| 394 |
+
|
| 395 |
+
# np.clip does type promotion but the array API clip requires that the
|
| 396 |
+
# output have the same dtype as x. We do this instead of just downcasting
|
| 397 |
+
# the result of xp.clip() to handle some corner cases better (e.g.,
|
| 398 |
+
# avoiding uint64 -> float64 promotion).
|
| 399 |
+
|
| 400 |
+
# Note: cases where min or max overflow (integer) or round (float) in the
|
| 401 |
+
# wrong direction when downcasting to x.dtype are unspecified. This code
|
| 402 |
+
# just does whatever NumPy does when it downcasts in the assignment, but
|
| 403 |
+
# other behavior could be preferred, especially for integers. For example,
|
| 404 |
+
# this code produces:
|
| 405 |
+
|
| 406 |
+
# >>> clip(asarray(0, dtype=int8), asarray(128, dtype=int16), None)
|
| 407 |
+
# -128
|
| 408 |
+
|
| 409 |
+
# but an answer of 0 might be preferred. See
|
| 410 |
+
# https://github.com/numpy/numpy/issues/24976 for more discussion on this issue.
|
| 411 |
+
|
| 412 |
+
# At least handle the case of Python integers correctly (see
|
| 413 |
+
# https://github.com/numpy/numpy/pull/26892).
|
| 414 |
+
if wrapped_xp.isdtype(x.dtype, "integral"):
|
| 415 |
+
if type(min) is int and min <= wrapped_xp.iinfo(x.dtype).min:
|
| 416 |
+
min = None
|
| 417 |
+
if type(max) is int and max >= wrapped_xp.iinfo(x.dtype).max:
|
| 418 |
+
max = None
|
| 419 |
+
|
| 420 |
+
dev = _get_device(x)
|
| 421 |
+
if out is None:
|
| 422 |
+
out = wrapped_xp.empty(result_shape, dtype=x.dtype, device=dev)
|
| 423 |
+
assert out is not None # workaround for a type-narrowing issue in pyright
|
| 424 |
+
out[()] = x
|
| 425 |
+
|
| 426 |
+
if min is not None:
|
| 427 |
+
a = wrapped_xp.asarray(min, dtype=x.dtype, device=dev)
|
| 428 |
+
a = xp.broadcast_to(a, result_shape)
|
| 429 |
+
ia = (out < a) | xp.isnan(a)
|
| 430 |
+
out[ia] = a[ia]
|
| 431 |
+
|
| 432 |
+
if max is not None:
|
| 433 |
+
b = wrapped_xp.asarray(max, dtype=x.dtype, device=dev)
|
| 434 |
+
b = xp.broadcast_to(b, result_shape)
|
| 435 |
+
ib = (out > b) | xp.isnan(b)
|
| 436 |
+
out[ib] = b[ib]
|
| 437 |
+
|
| 438 |
+
# Return a scalar for 0-D
|
| 439 |
+
return out[()]
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
# Unlike transpose(), the axes argument to permute_dims() is required.
|
| 443 |
+
def permute_dims(x: Array, /, axes: tuple[int, ...], xp: Namespace) -> Array:
|
| 444 |
+
return xp.transpose(x, axes)
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
# np.reshape calls the keyword argument 'newshape' instead of 'shape'
|
| 448 |
+
def reshape(
|
| 449 |
+
x: Array,
|
| 450 |
+
/,
|
| 451 |
+
shape: tuple[int, ...],
|
| 452 |
+
xp: Namespace,
|
| 453 |
+
*,
|
| 454 |
+
copy: bool | None = None,
|
| 455 |
+
**kwargs: object,
|
| 456 |
+
) -> Array:
|
| 457 |
+
if copy is True:
|
| 458 |
+
x = x.copy()
|
| 459 |
+
elif copy is False:
|
| 460 |
+
y = x.view()
|
| 461 |
+
y.shape = shape
|
| 462 |
+
return y
|
| 463 |
+
return xp.reshape(x, shape, **kwargs)
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
# The descending keyword is new in sort and argsort, and 'kind' replaced with
|
| 467 |
+
# 'stable'
|
| 468 |
+
def argsort(
|
| 469 |
+
x: Array,
|
| 470 |
+
/,
|
| 471 |
+
xp: Namespace,
|
| 472 |
+
*,
|
| 473 |
+
axis: int = -1,
|
| 474 |
+
descending: bool = False,
|
| 475 |
+
stable: bool = True,
|
| 476 |
+
**kwargs: object,
|
| 477 |
+
) -> Array:
|
| 478 |
+
# Note: this keyword argument is different, and the default is different.
|
| 479 |
+
# We set it in kwargs like this because numpy.sort uses kind='quicksort'
|
| 480 |
+
# as the default whereas cupy.sort uses kind=None.
|
| 481 |
+
if stable:
|
| 482 |
+
kwargs["kind"] = "stable"
|
| 483 |
+
if not descending:
|
| 484 |
+
res = xp.argsort(x, axis=axis, **kwargs)
|
| 485 |
+
else:
|
| 486 |
+
# As NumPy has no native descending sort, we imitate it here. Note that
|
| 487 |
+
# simply flipping the results of xp.argsort(x, ...) would not
|
| 488 |
+
# respect the relative order like it would in native descending sorts.
|
| 489 |
+
res = xp.flip(
|
| 490 |
+
xp.argsort(xp.flip(x, axis=axis), axis=axis, **kwargs),
|
| 491 |
+
axis=axis,
|
| 492 |
+
)
|
| 493 |
+
# Rely on flip()/argsort() to validate axis
|
| 494 |
+
normalised_axis = axis if axis >= 0 else x.ndim + axis
|
| 495 |
+
max_i = x.shape[normalised_axis] - 1
|
| 496 |
+
res = max_i - res
|
| 497 |
+
return res
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
def sort(
|
| 501 |
+
x: Array,
|
| 502 |
+
/,
|
| 503 |
+
xp: Namespace,
|
| 504 |
+
*,
|
| 505 |
+
axis: int = -1,
|
| 506 |
+
descending: bool = False,
|
| 507 |
+
stable: bool = True,
|
| 508 |
+
**kwargs: object,
|
| 509 |
+
) -> Array:
|
| 510 |
+
# Note: this keyword argument is different, and the default is different.
|
| 511 |
+
# We set it in kwargs like this because numpy.sort uses kind='quicksort'
|
| 512 |
+
# as the default whereas cupy.sort uses kind=None.
|
| 513 |
+
if stable:
|
| 514 |
+
kwargs["kind"] = "stable"
|
| 515 |
+
res = xp.sort(x, axis=axis, **kwargs)
|
| 516 |
+
if descending:
|
| 517 |
+
res = xp.flip(res, axis=axis)
|
| 518 |
+
return res
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
# nonzero should error for zero-dimensional arrays
|
| 522 |
+
def nonzero(x: Array, /, xp: Namespace, **kwargs: object) -> tuple[Array, ...]:
|
| 523 |
+
if x.ndim == 0:
|
| 524 |
+
raise ValueError("nonzero() does not support zero-dimensional arrays")
|
| 525 |
+
return xp.nonzero(x, **kwargs)
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
# linear algebra functions
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
def matmul(x1: Array, x2: Array, /, xp: Namespace, **kwargs: object) -> Array:
|
| 532 |
+
return xp.matmul(x1, x2, **kwargs)
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
# Unlike transpose, matrix_transpose only transposes the last two axes.
|
| 536 |
+
def matrix_transpose(x: Array, /, xp: Namespace) -> Array:
|
| 537 |
+
if x.ndim < 2:
|
| 538 |
+
raise ValueError("x must be at least 2-dimensional for matrix_transpose")
|
| 539 |
+
return xp.swapaxes(x, -1, -2)
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
def tensordot(
|
| 543 |
+
x1: Array,
|
| 544 |
+
x2: Array,
|
| 545 |
+
/,
|
| 546 |
+
xp: Namespace,
|
| 547 |
+
*,
|
| 548 |
+
axes: int | tuple[Sequence[int], Sequence[int]] = 2,
|
| 549 |
+
**kwargs: object,
|
| 550 |
+
) -> Array:
|
| 551 |
+
return xp.tensordot(x1, x2, axes=axes, **kwargs)
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
def vecdot(x1: Array, x2: Array, /, xp: Namespace, *, axis: int = -1) -> Array:
|
| 555 |
+
if x1.shape[axis] != x2.shape[axis]:
|
| 556 |
+
raise ValueError("x1 and x2 must have the same size along the given axis")
|
| 557 |
+
|
| 558 |
+
if hasattr(xp, "broadcast_tensors"):
|
| 559 |
+
_broadcast = xp.broadcast_tensors
|
| 560 |
+
else:
|
| 561 |
+
_broadcast = xp.broadcast_arrays
|
| 562 |
+
|
| 563 |
+
x1_ = xp.moveaxis(x1, axis, -1)
|
| 564 |
+
x2_ = xp.moveaxis(x2, axis, -1)
|
| 565 |
+
x1_, x2_ = _broadcast(x1_, x2_)
|
| 566 |
+
|
| 567 |
+
res = xp.conj(x1_[..., None, :]) @ x2_[..., None]
|
| 568 |
+
return res[..., 0, 0]
|
| 569 |
+
|
| 570 |
+
|
| 571 |
+
# isdtype is a new function in the 2022.12 array API specification.
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
def isdtype(
|
| 575 |
+
dtype: DType,
|
| 576 |
+
kind: DType | str | tuple[DType | str, ...],
|
| 577 |
+
xp: Namespace,
|
| 578 |
+
*,
|
| 579 |
+
_tuple: bool = True, # Disallow nested tuples
|
| 580 |
+
) -> bool:
|
| 581 |
+
"""
|
| 582 |
+
Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
|
| 583 |
+
|
| 584 |
+
Note that outside of this function, this compat library does not yet fully
|
| 585 |
+
support complex numbers.
|
| 586 |
+
|
| 587 |
+
See
|
| 588 |
+
https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
|
| 589 |
+
for more details
|
| 590 |
+
"""
|
| 591 |
+
if isinstance(kind, tuple) and _tuple:
|
| 592 |
+
return any(
|
| 593 |
+
isdtype(dtype, k, xp, _tuple=False)
|
| 594 |
+
for k in cast("tuple[DType | str, ...]", kind)
|
| 595 |
+
)
|
| 596 |
+
elif isinstance(kind, str):
|
| 597 |
+
if kind == "bool":
|
| 598 |
+
return dtype == xp.bool_
|
| 599 |
+
elif kind == "signed integer":
|
| 600 |
+
return xp.issubdtype(dtype, xp.signedinteger)
|
| 601 |
+
elif kind == "unsigned integer":
|
| 602 |
+
return xp.issubdtype(dtype, xp.unsignedinteger)
|
| 603 |
+
elif kind == "integral":
|
| 604 |
+
return xp.issubdtype(dtype, xp.integer)
|
| 605 |
+
elif kind == "real floating":
|
| 606 |
+
return xp.issubdtype(dtype, xp.floating)
|
| 607 |
+
elif kind == "complex floating":
|
| 608 |
+
return xp.issubdtype(dtype, xp.complexfloating)
|
| 609 |
+
elif kind == "numeric":
|
| 610 |
+
return xp.issubdtype(dtype, xp.number)
|
| 611 |
+
else:
|
| 612 |
+
raise ValueError(f"Unrecognized data type kind: {kind!r}")
|
| 613 |
+
else:
|
| 614 |
+
# This will allow things that aren't required by the spec, like
|
| 615 |
+
# isdtype(np.float64, float) or isdtype(np.int64, 'l'). Should we be
|
| 616 |
+
# more strict here to match the type annotation? Note that the
|
| 617 |
+
# array_api_strict implementation will be very strict.
|
| 618 |
+
return dtype == kind
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
# unstack is a new function in the 2023.12 array API standard
|
| 622 |
+
def unstack(x: Array, /, xp: Namespace, *, axis: int = 0) -> tuple[Array, ...]:
|
| 623 |
+
if x.ndim == 0:
|
| 624 |
+
raise ValueError("Input array must be at least 1-d.")
|
| 625 |
+
return tuple(xp.moveaxis(x, axis, 0))
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
# numpy 1.26 does not use the standard definition for sign on complex numbers
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
def sign(x: Array, /, xp: Namespace, **kwargs: object) -> Array:
|
| 632 |
+
if isdtype(x.dtype, "complex floating", xp=xp):
|
| 633 |
+
out = (x / xp.abs(x, **kwargs))[...]
|
| 634 |
+
# sign(0) = 0 but the above formula would give nan
|
| 635 |
+
out[x == 0j] = 0j
|
| 636 |
+
else:
|
| 637 |
+
out = xp.sign(x, **kwargs)
|
| 638 |
+
# CuPy sign() does not propagate nans. See
|
| 639 |
+
# https://github.com/data-apis/array-api-compat/issues/136
|
| 640 |
+
if is_cupy_namespace(xp) and isdtype(x.dtype, "real floating", xp=xp):
|
| 641 |
+
out[xp.isnan(x)] = xp.nan
|
| 642 |
+
return out[()]
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
def finfo(type_: DType | Array, /, xp: Namespace) -> Any:
|
| 646 |
+
# It is surprisingly difficult to recognize a dtype apart from an array.
|
| 647 |
+
# np.int64 is not the same as np.asarray(1).dtype!
|
| 648 |
+
try:
|
| 649 |
+
return xp.finfo(type_)
|
| 650 |
+
except (ValueError, TypeError):
|
| 651 |
+
return xp.finfo(type_.dtype)
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
def iinfo(type_: DType | Array, /, xp: Namespace) -> Any:
|
| 655 |
+
try:
|
| 656 |
+
return xp.iinfo(type_)
|
| 657 |
+
except (ValueError, TypeError):
|
| 658 |
+
return xp.iinfo(type_.dtype)
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
__all__ = [
|
| 662 |
+
"arange",
|
| 663 |
+
"empty",
|
| 664 |
+
"empty_like",
|
| 665 |
+
"eye",
|
| 666 |
+
"full",
|
| 667 |
+
"full_like",
|
| 668 |
+
"linspace",
|
| 669 |
+
"ones",
|
| 670 |
+
"ones_like",
|
| 671 |
+
"zeros",
|
| 672 |
+
"zeros_like",
|
| 673 |
+
"UniqueAllResult",
|
| 674 |
+
"UniqueCountsResult",
|
| 675 |
+
"UniqueInverseResult",
|
| 676 |
+
"unique_all",
|
| 677 |
+
"unique_counts",
|
| 678 |
+
"unique_inverse",
|
| 679 |
+
"unique_values",
|
| 680 |
+
"std",
|
| 681 |
+
"var",
|
| 682 |
+
"cumulative_sum",
|
| 683 |
+
"cumulative_prod",
|
| 684 |
+
"clip",
|
| 685 |
+
"permute_dims",
|
| 686 |
+
"reshape",
|
| 687 |
+
"argsort",
|
| 688 |
+
"sort",
|
| 689 |
+
"nonzero",
|
| 690 |
+
"matmul",
|
| 691 |
+
"matrix_transpose",
|
| 692 |
+
"tensordot",
|
| 693 |
+
"vecdot",
|
| 694 |
+
"isdtype",
|
| 695 |
+
"unstack",
|
| 696 |
+
"sign",
|
| 697 |
+
"finfo",
|
| 698 |
+
"iinfo",
|
| 699 |
+
]
|
| 700 |
+
|
| 701 |
+
def __dir__() -> list[str]:
|
| 702 |
+
return __all__
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/_fft.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from collections.abc import Sequence
|
| 4 |
+
from typing import Literal, TypeAlias
|
| 5 |
+
|
| 6 |
+
from ._typing import Array, Device, DType, Namespace
|
| 7 |
+
|
| 8 |
+
_Norm: TypeAlias = Literal["backward", "ortho", "forward"]
|
| 9 |
+
|
| 10 |
+
# Note: NumPy fft functions improperly upcast float32 and complex64 to
|
| 11 |
+
# complex128, which is why we require wrapping them all here.
|
| 12 |
+
|
| 13 |
+
def fft(
|
| 14 |
+
x: Array,
|
| 15 |
+
/,
|
| 16 |
+
xp: Namespace,
|
| 17 |
+
*,
|
| 18 |
+
n: int | None = None,
|
| 19 |
+
axis: int = -1,
|
| 20 |
+
norm: _Norm = "backward",
|
| 21 |
+
) -> Array:
|
| 22 |
+
res = xp.fft.fft(x, n=n, axis=axis, norm=norm)
|
| 23 |
+
if x.dtype in [xp.float32, xp.complex64]:
|
| 24 |
+
return res.astype(xp.complex64)
|
| 25 |
+
return res
|
| 26 |
+
|
| 27 |
+
def ifft(
|
| 28 |
+
x: Array,
|
| 29 |
+
/,
|
| 30 |
+
xp: Namespace,
|
| 31 |
+
*,
|
| 32 |
+
n: int | None = None,
|
| 33 |
+
axis: int = -1,
|
| 34 |
+
norm: _Norm = "backward",
|
| 35 |
+
) -> Array:
|
| 36 |
+
res = xp.fft.ifft(x, n=n, axis=axis, norm=norm)
|
| 37 |
+
if x.dtype in [xp.float32, xp.complex64]:
|
| 38 |
+
return res.astype(xp.complex64)
|
| 39 |
+
return res
|
| 40 |
+
|
| 41 |
+
def fftn(
|
| 42 |
+
x: Array,
|
| 43 |
+
/,
|
| 44 |
+
xp: Namespace,
|
| 45 |
+
*,
|
| 46 |
+
s: Sequence[int] | None = None,
|
| 47 |
+
axes: Sequence[int] | None = None,
|
| 48 |
+
norm: _Norm = "backward",
|
| 49 |
+
) -> Array:
|
| 50 |
+
res = xp.fft.fftn(x, s=s, axes=axes, norm=norm)
|
| 51 |
+
if x.dtype in [xp.float32, xp.complex64]:
|
| 52 |
+
return res.astype(xp.complex64)
|
| 53 |
+
return res
|
| 54 |
+
|
| 55 |
+
def ifftn(
|
| 56 |
+
x: Array,
|
| 57 |
+
/,
|
| 58 |
+
xp: Namespace,
|
| 59 |
+
*,
|
| 60 |
+
s: Sequence[int] | None = None,
|
| 61 |
+
axes: Sequence[int] | None = None,
|
| 62 |
+
norm: _Norm = "backward",
|
| 63 |
+
) -> Array:
|
| 64 |
+
res = xp.fft.ifftn(x, s=s, axes=axes, norm=norm)
|
| 65 |
+
if x.dtype in [xp.float32, xp.complex64]:
|
| 66 |
+
return res.astype(xp.complex64)
|
| 67 |
+
return res
|
| 68 |
+
|
| 69 |
+
def rfft(
|
| 70 |
+
x: Array,
|
| 71 |
+
/,
|
| 72 |
+
xp: Namespace,
|
| 73 |
+
*,
|
| 74 |
+
n: int | None = None,
|
| 75 |
+
axis: int = -1,
|
| 76 |
+
norm: _Norm = "backward",
|
| 77 |
+
) -> Array:
|
| 78 |
+
res = xp.fft.rfft(x, n=n, axis=axis, norm=norm)
|
| 79 |
+
if x.dtype == xp.float32:
|
| 80 |
+
return res.astype(xp.complex64)
|
| 81 |
+
return res
|
| 82 |
+
|
| 83 |
+
def irfft(
|
| 84 |
+
x: Array,
|
| 85 |
+
/,
|
| 86 |
+
xp: Namespace,
|
| 87 |
+
*,
|
| 88 |
+
n: int | None = None,
|
| 89 |
+
axis: int = -1,
|
| 90 |
+
norm: _Norm = "backward",
|
| 91 |
+
) -> Array:
|
| 92 |
+
res = xp.fft.irfft(x, n=n, axis=axis, norm=norm)
|
| 93 |
+
if x.dtype == xp.complex64:
|
| 94 |
+
return res.astype(xp.float32)
|
| 95 |
+
return res
|
| 96 |
+
|
| 97 |
+
def rfftn(
|
| 98 |
+
x: Array,
|
| 99 |
+
/,
|
| 100 |
+
xp: Namespace,
|
| 101 |
+
*,
|
| 102 |
+
s: Sequence[int] | None = None,
|
| 103 |
+
axes: Sequence[int] | None = None,
|
| 104 |
+
norm: _Norm = "backward",
|
| 105 |
+
) -> Array:
|
| 106 |
+
res = xp.fft.rfftn(x, s=s, axes=axes, norm=norm)
|
| 107 |
+
if x.dtype == xp.float32:
|
| 108 |
+
return res.astype(xp.complex64)
|
| 109 |
+
return res
|
| 110 |
+
|
| 111 |
+
def irfftn(
|
| 112 |
+
x: Array,
|
| 113 |
+
/,
|
| 114 |
+
xp: Namespace,
|
| 115 |
+
*,
|
| 116 |
+
s: Sequence[int] | None = None,
|
| 117 |
+
axes: Sequence[int] | None = None,
|
| 118 |
+
norm: _Norm = "backward",
|
| 119 |
+
) -> Array:
|
| 120 |
+
res = xp.fft.irfftn(x, s=s, axes=axes, norm=norm)
|
| 121 |
+
if x.dtype == xp.complex64:
|
| 122 |
+
return res.astype(xp.float32)
|
| 123 |
+
return res
|
| 124 |
+
|
| 125 |
+
def hfft(
|
| 126 |
+
x: Array,
|
| 127 |
+
/,
|
| 128 |
+
xp: Namespace,
|
| 129 |
+
*,
|
| 130 |
+
n: int | None = None,
|
| 131 |
+
axis: int = -1,
|
| 132 |
+
norm: _Norm = "backward",
|
| 133 |
+
) -> Array:
|
| 134 |
+
res = xp.fft.hfft(x, n=n, axis=axis, norm=norm)
|
| 135 |
+
if x.dtype in [xp.float32, xp.complex64]:
|
| 136 |
+
return res.astype(xp.float32)
|
| 137 |
+
return res
|
| 138 |
+
|
| 139 |
+
def ihfft(
|
| 140 |
+
x: Array,
|
| 141 |
+
/,
|
| 142 |
+
xp: Namespace,
|
| 143 |
+
*,
|
| 144 |
+
n: int | None = None,
|
| 145 |
+
axis: int = -1,
|
| 146 |
+
norm: _Norm = "backward",
|
| 147 |
+
) -> Array:
|
| 148 |
+
res = xp.fft.ihfft(x, n=n, axis=axis, norm=norm)
|
| 149 |
+
if x.dtype in [xp.float32, xp.complex64]:
|
| 150 |
+
return res.astype(xp.complex64)
|
| 151 |
+
return res
|
| 152 |
+
|
| 153 |
+
def fftfreq(
|
| 154 |
+
n: int,
|
| 155 |
+
/,
|
| 156 |
+
xp: Namespace,
|
| 157 |
+
*,
|
| 158 |
+
d: float = 1.0,
|
| 159 |
+
dtype: DType | None = None,
|
| 160 |
+
device: Device | None = None,
|
| 161 |
+
) -> Array:
|
| 162 |
+
if device not in ["cpu", None]:
|
| 163 |
+
raise ValueError(f"Unsupported device {device!r}")
|
| 164 |
+
res = xp.fft.fftfreq(n, d=d)
|
| 165 |
+
if dtype is not None:
|
| 166 |
+
return res.astype(dtype)
|
| 167 |
+
return res
|
| 168 |
+
|
| 169 |
+
def rfftfreq(
|
| 170 |
+
n: int,
|
| 171 |
+
/,
|
| 172 |
+
xp: Namespace,
|
| 173 |
+
*,
|
| 174 |
+
d: float = 1.0,
|
| 175 |
+
dtype: DType | None = None,
|
| 176 |
+
device: Device | None = None,
|
| 177 |
+
) -> Array:
|
| 178 |
+
if device not in ["cpu", None]:
|
| 179 |
+
raise ValueError(f"Unsupported device {device!r}")
|
| 180 |
+
res = xp.fft.rfftfreq(n, d=d)
|
| 181 |
+
if dtype is not None:
|
| 182 |
+
return res.astype(dtype)
|
| 183 |
+
return res
|
| 184 |
+
|
| 185 |
+
def fftshift(
|
| 186 |
+
x: Array, /, xp: Namespace, *, axes: int | Sequence[int] | None = None
|
| 187 |
+
) -> Array:
|
| 188 |
+
return xp.fft.fftshift(x, axes=axes)
|
| 189 |
+
|
| 190 |
+
def ifftshift(
|
| 191 |
+
x: Array, /, xp: Namespace, *, axes: int | Sequence[int] | None = None
|
| 192 |
+
) -> Array:
|
| 193 |
+
return xp.fft.ifftshift(x, axes=axes)
|
| 194 |
+
|
| 195 |
+
__all__ = [
|
| 196 |
+
"fft",
|
| 197 |
+
"ifft",
|
| 198 |
+
"fftn",
|
| 199 |
+
"ifftn",
|
| 200 |
+
"rfft",
|
| 201 |
+
"irfft",
|
| 202 |
+
"rfftn",
|
| 203 |
+
"irfftn",
|
| 204 |
+
"hfft",
|
| 205 |
+
"ihfft",
|
| 206 |
+
"fftfreq",
|
| 207 |
+
"rfftfreq",
|
| 208 |
+
"fftshift",
|
| 209 |
+
"ifftshift",
|
| 210 |
+
]
|
| 211 |
+
|
| 212 |
+
def __dir__() -> list[str]:
|
| 213 |
+
return __all__
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/_helpers.py
ADDED
|
@@ -0,0 +1,1079 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Various helper functions which are not part of the spec.
|
| 3 |
+
|
| 4 |
+
Functions which start with an underscore are for internal use only but helpers
|
| 5 |
+
that are in __all__ are intended as additional helper functions for use by end
|
| 6 |
+
users of the compat library.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import enum
|
| 12 |
+
import inspect
|
| 13 |
+
import math
|
| 14 |
+
import sys
|
| 15 |
+
import warnings
|
| 16 |
+
from collections.abc import Collection, Hashable
|
| 17 |
+
from functools import lru_cache
|
| 18 |
+
from typing import (
|
| 19 |
+
TYPE_CHECKING,
|
| 20 |
+
Any,
|
| 21 |
+
Final,
|
| 22 |
+
Literal,
|
| 23 |
+
SupportsIndex,
|
| 24 |
+
TypeAlias,
|
| 25 |
+
TypeGuard,
|
| 26 |
+
cast,
|
| 27 |
+
overload,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
from ._typing import Array, Device, HasShape, Namespace, SupportsArrayNamespace
|
| 31 |
+
|
| 32 |
+
if TYPE_CHECKING:
|
| 33 |
+
import cupy as cp
|
| 34 |
+
import dask.array as da
|
| 35 |
+
import jax
|
| 36 |
+
import ndonnx as ndx
|
| 37 |
+
import numpy as np
|
| 38 |
+
import numpy.typing as npt
|
| 39 |
+
import sparse
|
| 40 |
+
import torch
|
| 41 |
+
|
| 42 |
+
# TODO: import from typing (requires Python >=3.13)
|
| 43 |
+
from typing_extensions import TypeIs
|
| 44 |
+
|
| 45 |
+
_ZeroGradientArray: TypeAlias = npt.NDArray[np.void]
|
| 46 |
+
|
| 47 |
+
_ArrayApiObj: TypeAlias = (
|
| 48 |
+
npt.NDArray[Any]
|
| 49 |
+
| cp.ndarray
|
| 50 |
+
| da.Array
|
| 51 |
+
| jax.Array
|
| 52 |
+
| ndx.Array
|
| 53 |
+
| sparse.SparseArray
|
| 54 |
+
| torch.Tensor
|
| 55 |
+
| SupportsArrayNamespace[Any]
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
_API_VERSIONS_OLD: Final = frozenset({"2021.12", "2022.12", "2023.12"})
|
| 59 |
+
_API_VERSIONS: Final = _API_VERSIONS_OLD | frozenset({"2024.12"})
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@lru_cache(100)
|
| 63 |
+
def _issubclass_fast(cls: type, modname: str, clsname: str) -> bool:
|
| 64 |
+
try:
|
| 65 |
+
mod = sys.modules[modname]
|
| 66 |
+
except KeyError:
|
| 67 |
+
return False
|
| 68 |
+
parent_cls = getattr(mod, clsname)
|
| 69 |
+
return issubclass(cls, parent_cls)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _is_jax_zero_gradient_array(x: object) -> TypeGuard[_ZeroGradientArray]:
|
| 73 |
+
"""Return True if `x` is a zero-gradient array.
|
| 74 |
+
|
| 75 |
+
These arrays are a design quirk of Jax that may one day be removed.
|
| 76 |
+
See https://github.com/google/jax/issues/20620.
|
| 77 |
+
"""
|
| 78 |
+
# Fast exit
|
| 79 |
+
try:
|
| 80 |
+
dtype = x.dtype # type: ignore[attr-defined]
|
| 81 |
+
except AttributeError:
|
| 82 |
+
return False
|
| 83 |
+
cls = cast(Hashable, type(dtype))
|
| 84 |
+
if not _issubclass_fast(cls, "numpy.dtypes", "VoidDType"):
|
| 85 |
+
return False
|
| 86 |
+
|
| 87 |
+
if "jax" not in sys.modules:
|
| 88 |
+
return False
|
| 89 |
+
|
| 90 |
+
import jax
|
| 91 |
+
# jax.float0 is a np.dtype([('float0', 'V')])
|
| 92 |
+
return dtype == jax.float0
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def is_numpy_array(x: object) -> TypeIs[npt.NDArray[Any]]:
|
| 96 |
+
"""
|
| 97 |
+
Return True if `x` is a NumPy array.
|
| 98 |
+
|
| 99 |
+
This function does not import NumPy if it has not already been imported
|
| 100 |
+
and is therefore cheap to use.
|
| 101 |
+
|
| 102 |
+
This also returns True for `ndarray` subclasses and NumPy scalar objects.
|
| 103 |
+
|
| 104 |
+
See Also
|
| 105 |
+
--------
|
| 106 |
+
|
| 107 |
+
array_namespace
|
| 108 |
+
is_array_api_obj
|
| 109 |
+
is_cupy_array
|
| 110 |
+
is_torch_array
|
| 111 |
+
is_ndonnx_array
|
| 112 |
+
is_dask_array
|
| 113 |
+
is_jax_array
|
| 114 |
+
is_pydata_sparse_array
|
| 115 |
+
"""
|
| 116 |
+
# TODO: Should we reject ndarray subclasses?
|
| 117 |
+
cls = cast(Hashable, type(x))
|
| 118 |
+
return (
|
| 119 |
+
_issubclass_fast(cls, "numpy", "ndarray")
|
| 120 |
+
or _issubclass_fast(cls, "numpy", "generic")
|
| 121 |
+
) and not _is_jax_zero_gradient_array(x)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def is_cupy_array(x: object) -> bool:
|
| 125 |
+
"""
|
| 126 |
+
Return True if `x` is a CuPy array.
|
| 127 |
+
|
| 128 |
+
This function does not import CuPy if it has not already been imported
|
| 129 |
+
and is therefore cheap to use.
|
| 130 |
+
|
| 131 |
+
This also returns True for `cupy.ndarray` subclasses and CuPy scalar objects.
|
| 132 |
+
|
| 133 |
+
See Also
|
| 134 |
+
--------
|
| 135 |
+
|
| 136 |
+
array_namespace
|
| 137 |
+
is_array_api_obj
|
| 138 |
+
is_numpy_array
|
| 139 |
+
is_torch_array
|
| 140 |
+
is_ndonnx_array
|
| 141 |
+
is_dask_array
|
| 142 |
+
is_jax_array
|
| 143 |
+
is_pydata_sparse_array
|
| 144 |
+
"""
|
| 145 |
+
cls = cast(Hashable, type(x))
|
| 146 |
+
return _issubclass_fast(cls, "cupy", "ndarray")
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def is_torch_array(x: object) -> TypeIs[torch.Tensor]:
|
| 150 |
+
"""
|
| 151 |
+
Return True if `x` is a PyTorch tensor.
|
| 152 |
+
|
| 153 |
+
This function does not import PyTorch if it has not already been imported
|
| 154 |
+
and is therefore cheap to use.
|
| 155 |
+
|
| 156 |
+
See Also
|
| 157 |
+
--------
|
| 158 |
+
|
| 159 |
+
array_namespace
|
| 160 |
+
is_array_api_obj
|
| 161 |
+
is_numpy_array
|
| 162 |
+
is_cupy_array
|
| 163 |
+
is_dask_array
|
| 164 |
+
is_jax_array
|
| 165 |
+
is_pydata_sparse_array
|
| 166 |
+
"""
|
| 167 |
+
cls = cast(Hashable, type(x))
|
| 168 |
+
return _issubclass_fast(cls, "torch", "Tensor")
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def is_ndonnx_array(x: object) -> TypeIs[ndx.Array]:
|
| 172 |
+
"""
|
| 173 |
+
Return True if `x` is a ndonnx Array.
|
| 174 |
+
|
| 175 |
+
This function does not import ndonnx if it has not already been imported
|
| 176 |
+
and is therefore cheap to use.
|
| 177 |
+
|
| 178 |
+
See Also
|
| 179 |
+
--------
|
| 180 |
+
|
| 181 |
+
array_namespace
|
| 182 |
+
is_array_api_obj
|
| 183 |
+
is_numpy_array
|
| 184 |
+
is_cupy_array
|
| 185 |
+
is_ndonnx_array
|
| 186 |
+
is_dask_array
|
| 187 |
+
is_jax_array
|
| 188 |
+
is_pydata_sparse_array
|
| 189 |
+
"""
|
| 190 |
+
cls = cast(Hashable, type(x))
|
| 191 |
+
return _issubclass_fast(cls, "ndonnx", "Array")
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def is_dask_array(x: object) -> TypeIs[da.Array]:
|
| 195 |
+
"""
|
| 196 |
+
Return True if `x` is a dask.array Array.
|
| 197 |
+
|
| 198 |
+
This function does not import dask if it has not already been imported
|
| 199 |
+
and is therefore cheap to use.
|
| 200 |
+
|
| 201 |
+
See Also
|
| 202 |
+
--------
|
| 203 |
+
|
| 204 |
+
array_namespace
|
| 205 |
+
is_array_api_obj
|
| 206 |
+
is_numpy_array
|
| 207 |
+
is_cupy_array
|
| 208 |
+
is_torch_array
|
| 209 |
+
is_ndonnx_array
|
| 210 |
+
is_jax_array
|
| 211 |
+
is_pydata_sparse_array
|
| 212 |
+
"""
|
| 213 |
+
cls = cast(Hashable, type(x))
|
| 214 |
+
return _issubclass_fast(cls, "dask.array", "Array")
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def is_jax_array(x: object) -> TypeIs[jax.Array]:
|
| 218 |
+
"""
|
| 219 |
+
Return True if `x` is a JAX array.
|
| 220 |
+
|
| 221 |
+
This function does not import JAX if it has not already been imported
|
| 222 |
+
and is therefore cheap to use.
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
See Also
|
| 226 |
+
--------
|
| 227 |
+
|
| 228 |
+
array_namespace
|
| 229 |
+
is_array_api_obj
|
| 230 |
+
is_numpy_array
|
| 231 |
+
is_cupy_array
|
| 232 |
+
is_torch_array
|
| 233 |
+
is_ndonnx_array
|
| 234 |
+
is_dask_array
|
| 235 |
+
is_pydata_sparse_array
|
| 236 |
+
"""
|
| 237 |
+
cls = cast(Hashable, type(x))
|
| 238 |
+
# We test for jax.core.Tracer here to identify jax arrays during jit tracing. From jax 0.8.2 on,
|
| 239 |
+
# tracers are not a subclass of jax.Array anymore. Note that tracers can also represent
|
| 240 |
+
# non-array values and a fully correct implementation would need to use isinstance checks. Since
|
| 241 |
+
# we use hash-based caching with type names as keys, we cannot use instance checks without
|
| 242 |
+
# losing performance here. For more information, see
|
| 243 |
+
# https://github.com/data-apis/array-api-compat/pull/369 and the corresponding issue.
|
| 244 |
+
return (
|
| 245 |
+
_issubclass_fast(cls, "jax", "Array")
|
| 246 |
+
or _issubclass_fast(cls, "jax.core", "Tracer")
|
| 247 |
+
or _is_jax_zero_gradient_array(x)
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def is_pydata_sparse_array(x: object) -> TypeIs[sparse.SparseArray]:
|
| 252 |
+
"""
|
| 253 |
+
Return True if `x` is an array from the `sparse` package.
|
| 254 |
+
|
| 255 |
+
This function does not import `sparse` if it has not already been imported
|
| 256 |
+
and is therefore cheap to use.
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
See Also
|
| 260 |
+
--------
|
| 261 |
+
|
| 262 |
+
array_namespace
|
| 263 |
+
is_array_api_obj
|
| 264 |
+
is_numpy_array
|
| 265 |
+
is_cupy_array
|
| 266 |
+
is_torch_array
|
| 267 |
+
is_ndonnx_array
|
| 268 |
+
is_dask_array
|
| 269 |
+
is_jax_array
|
| 270 |
+
"""
|
| 271 |
+
# TODO: Account for other backends.
|
| 272 |
+
cls = cast(Hashable, type(x))
|
| 273 |
+
return _issubclass_fast(cls, "sparse", "SparseArray")
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def is_array_api_obj(x: object) -> TypeGuard[_ArrayApiObj]:
|
| 277 |
+
"""
|
| 278 |
+
Return True if `x` is an array API compatible array object.
|
| 279 |
+
|
| 280 |
+
See Also
|
| 281 |
+
--------
|
| 282 |
+
|
| 283 |
+
array_namespace
|
| 284 |
+
is_numpy_array
|
| 285 |
+
is_cupy_array
|
| 286 |
+
is_torch_array
|
| 287 |
+
is_ndonnx_array
|
| 288 |
+
is_dask_array
|
| 289 |
+
is_jax_array
|
| 290 |
+
"""
|
| 291 |
+
return (
|
| 292 |
+
hasattr(x, '__array_namespace__')
|
| 293 |
+
or _is_array_api_cls(cast(Hashable, type(x)))
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
@lru_cache(100)
|
| 298 |
+
def _is_array_api_cls(cls: type) -> bool:
|
| 299 |
+
return (
|
| 300 |
+
# TODO: drop support for numpy<2 which didn't have __array_namespace__
|
| 301 |
+
_issubclass_fast(cls, "numpy", "ndarray")
|
| 302 |
+
or _issubclass_fast(cls, "numpy", "generic")
|
| 303 |
+
or _issubclass_fast(cls, "cupy", "ndarray")
|
| 304 |
+
or _issubclass_fast(cls, "torch", "Tensor")
|
| 305 |
+
or _issubclass_fast(cls, "dask.array", "Array")
|
| 306 |
+
or _issubclass_fast(cls, "sparse", "SparseArray")
|
| 307 |
+
# TODO: drop support for jax<0.4.32 which didn't have __array_namespace__
|
| 308 |
+
or _issubclass_fast(cls, "jax", "Array")
|
| 309 |
+
or _issubclass_fast(cls, "jax.core", "Tracer") # see is_jax_array for limitations
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def _compat_module_name() -> str:
|
| 314 |
+
assert __name__.endswith(".common._helpers")
|
| 315 |
+
return __name__.removesuffix(".common._helpers")
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
@lru_cache(100)
|
| 319 |
+
def is_numpy_namespace(xp: Namespace) -> bool:
|
| 320 |
+
"""
|
| 321 |
+
Returns True if `xp` is a NumPy namespace.
|
| 322 |
+
|
| 323 |
+
This includes both NumPy itself and the version wrapped by array-api-compat.
|
| 324 |
+
|
| 325 |
+
See Also
|
| 326 |
+
--------
|
| 327 |
+
|
| 328 |
+
array_namespace
|
| 329 |
+
is_cupy_namespace
|
| 330 |
+
is_torch_namespace
|
| 331 |
+
is_ndonnx_namespace
|
| 332 |
+
is_dask_namespace
|
| 333 |
+
is_jax_namespace
|
| 334 |
+
is_pydata_sparse_namespace
|
| 335 |
+
is_array_api_strict_namespace
|
| 336 |
+
"""
|
| 337 |
+
return xp.__name__ in {"numpy", _compat_module_name() + ".numpy"}
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
@lru_cache(100)
|
| 341 |
+
def is_cupy_namespace(xp: Namespace) -> bool:
|
| 342 |
+
"""
|
| 343 |
+
Returns True if `xp` is a CuPy namespace.
|
| 344 |
+
|
| 345 |
+
This includes both CuPy itself and the version wrapped by array-api-compat.
|
| 346 |
+
|
| 347 |
+
See Also
|
| 348 |
+
--------
|
| 349 |
+
|
| 350 |
+
array_namespace
|
| 351 |
+
is_numpy_namespace
|
| 352 |
+
is_torch_namespace
|
| 353 |
+
is_ndonnx_namespace
|
| 354 |
+
is_dask_namespace
|
| 355 |
+
is_jax_namespace
|
| 356 |
+
is_pydata_sparse_namespace
|
| 357 |
+
is_array_api_strict_namespace
|
| 358 |
+
"""
|
| 359 |
+
return xp.__name__ in {"cupy", _compat_module_name() + ".cupy"}
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
@lru_cache(100)
|
| 363 |
+
def is_torch_namespace(xp: Namespace) -> bool:
|
| 364 |
+
"""
|
| 365 |
+
Returns True if `xp` is a PyTorch namespace.
|
| 366 |
+
|
| 367 |
+
This includes both PyTorch itself and the version wrapped by array-api-compat.
|
| 368 |
+
|
| 369 |
+
See Also
|
| 370 |
+
--------
|
| 371 |
+
|
| 372 |
+
array_namespace
|
| 373 |
+
is_numpy_namespace
|
| 374 |
+
is_cupy_namespace
|
| 375 |
+
is_ndonnx_namespace
|
| 376 |
+
is_dask_namespace
|
| 377 |
+
is_jax_namespace
|
| 378 |
+
is_pydata_sparse_namespace
|
| 379 |
+
is_array_api_strict_namespace
|
| 380 |
+
"""
|
| 381 |
+
return xp.__name__ in {"torch", _compat_module_name() + ".torch"}
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
def is_ndonnx_namespace(xp: Namespace) -> bool:
|
| 385 |
+
"""
|
| 386 |
+
Returns True if `xp` is an NDONNX namespace.
|
| 387 |
+
|
| 388 |
+
See Also
|
| 389 |
+
--------
|
| 390 |
+
|
| 391 |
+
array_namespace
|
| 392 |
+
is_numpy_namespace
|
| 393 |
+
is_cupy_namespace
|
| 394 |
+
is_torch_namespace
|
| 395 |
+
is_dask_namespace
|
| 396 |
+
is_jax_namespace
|
| 397 |
+
is_pydata_sparse_namespace
|
| 398 |
+
is_array_api_strict_namespace
|
| 399 |
+
"""
|
| 400 |
+
return xp.__name__ == "ndonnx"
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
@lru_cache(100)
|
| 404 |
+
def is_dask_namespace(xp: Namespace) -> bool:
|
| 405 |
+
"""
|
| 406 |
+
Returns True if `xp` is a Dask namespace.
|
| 407 |
+
|
| 408 |
+
This includes both ``dask.array`` itself and the version wrapped by array-api-compat.
|
| 409 |
+
|
| 410 |
+
See Also
|
| 411 |
+
--------
|
| 412 |
+
|
| 413 |
+
array_namespace
|
| 414 |
+
is_numpy_namespace
|
| 415 |
+
is_cupy_namespace
|
| 416 |
+
is_torch_namespace
|
| 417 |
+
is_ndonnx_namespace
|
| 418 |
+
is_jax_namespace
|
| 419 |
+
is_pydata_sparse_namespace
|
| 420 |
+
is_array_api_strict_namespace
|
| 421 |
+
"""
|
| 422 |
+
return xp.__name__ in {"dask.array", _compat_module_name() + ".dask.array"}
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
def is_jax_namespace(xp: Namespace) -> bool:
|
| 426 |
+
"""
|
| 427 |
+
Returns True if `xp` is a JAX namespace.
|
| 428 |
+
|
| 429 |
+
This includes ``jax.numpy`` and ``jax.experimental.array_api`` which existed in
|
| 430 |
+
older versions of JAX.
|
| 431 |
+
|
| 432 |
+
See Also
|
| 433 |
+
--------
|
| 434 |
+
|
| 435 |
+
array_namespace
|
| 436 |
+
is_numpy_namespace
|
| 437 |
+
is_cupy_namespace
|
| 438 |
+
is_torch_namespace
|
| 439 |
+
is_ndonnx_namespace
|
| 440 |
+
is_dask_namespace
|
| 441 |
+
is_pydata_sparse_namespace
|
| 442 |
+
is_array_api_strict_namespace
|
| 443 |
+
"""
|
| 444 |
+
return xp.__name__ in {"jax.numpy", "jax.experimental.array_api"}
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
def is_pydata_sparse_namespace(xp: Namespace) -> bool:
|
| 448 |
+
"""
|
| 449 |
+
Returns True if `xp` is a pydata/sparse namespace.
|
| 450 |
+
|
| 451 |
+
See Also
|
| 452 |
+
--------
|
| 453 |
+
|
| 454 |
+
array_namespace
|
| 455 |
+
is_numpy_namespace
|
| 456 |
+
is_cupy_namespace
|
| 457 |
+
is_torch_namespace
|
| 458 |
+
is_ndonnx_namespace
|
| 459 |
+
is_dask_namespace
|
| 460 |
+
is_jax_namespace
|
| 461 |
+
is_array_api_strict_namespace
|
| 462 |
+
"""
|
| 463 |
+
return xp.__name__ == "sparse"
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
def is_array_api_strict_namespace(xp: Namespace) -> bool:
|
| 467 |
+
"""
|
| 468 |
+
Returns True if `xp` is an array-api-strict namespace.
|
| 469 |
+
|
| 470 |
+
See Also
|
| 471 |
+
--------
|
| 472 |
+
|
| 473 |
+
array_namespace
|
| 474 |
+
is_numpy_namespace
|
| 475 |
+
is_cupy_namespace
|
| 476 |
+
is_torch_namespace
|
| 477 |
+
is_ndonnx_namespace
|
| 478 |
+
is_dask_namespace
|
| 479 |
+
is_jax_namespace
|
| 480 |
+
is_pydata_sparse_namespace
|
| 481 |
+
"""
|
| 482 |
+
return xp.__name__ == "array_api_strict"
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
def _check_api_version(api_version: str | None) -> None:
|
| 486 |
+
if api_version in _API_VERSIONS_OLD:
|
| 487 |
+
warnings.warn(
|
| 488 |
+
f"The {api_version} version of the array API specification was requested but the returned namespace is actually version 2024.12"
|
| 489 |
+
)
|
| 490 |
+
elif api_version is not None and api_version not in _API_VERSIONS:
|
| 491 |
+
raise ValueError(
|
| 492 |
+
"Only the 2024.12 version of the array API specification is currently supported"
|
| 493 |
+
)
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
class _ClsToXPInfo(enum.Enum):
|
| 497 |
+
SCALAR = 0
|
| 498 |
+
MAYBE_JAX_ZERO_GRADIENT = 1
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
@lru_cache(100)
|
| 502 |
+
def _cls_to_namespace(
|
| 503 |
+
cls: type,
|
| 504 |
+
api_version: str | None,
|
| 505 |
+
use_compat: bool | None,
|
| 506 |
+
) -> tuple[Namespace | None, _ClsToXPInfo | None]:
|
| 507 |
+
if use_compat not in (None, True, False):
|
| 508 |
+
raise ValueError("use_compat must be None, True, or False")
|
| 509 |
+
_use_compat = use_compat in (None, True)
|
| 510 |
+
cls_ = cast(Hashable, cls) # Make mypy happy
|
| 511 |
+
|
| 512 |
+
if (
|
| 513 |
+
_issubclass_fast(cls_, "numpy", "ndarray")
|
| 514 |
+
or _issubclass_fast(cls_, "numpy", "generic")
|
| 515 |
+
):
|
| 516 |
+
if use_compat is True:
|
| 517 |
+
_check_api_version(api_version)
|
| 518 |
+
from .. import numpy as xp
|
| 519 |
+
elif use_compat is False:
|
| 520 |
+
import numpy as xp # type: ignore[no-redef]
|
| 521 |
+
else:
|
| 522 |
+
# NumPy 2.0+ have __array_namespace__; however they are not
|
| 523 |
+
# yet fully array API compatible.
|
| 524 |
+
from .. import numpy as xp # type: ignore[no-redef]
|
| 525 |
+
return xp, _ClsToXPInfo.MAYBE_JAX_ZERO_GRADIENT
|
| 526 |
+
|
| 527 |
+
# Note: this must happen _after_ the test for np.generic,
|
| 528 |
+
# because np.float64 and np.complex128 are subclasses of float and complex.
|
| 529 |
+
if issubclass(cls, int | float | complex | type(None)):
|
| 530 |
+
return None, _ClsToXPInfo.SCALAR
|
| 531 |
+
|
| 532 |
+
if _issubclass_fast(cls_, "cupy", "ndarray"):
|
| 533 |
+
if _use_compat:
|
| 534 |
+
_check_api_version(api_version)
|
| 535 |
+
from .. import cupy as xp # type: ignore[no-redef]
|
| 536 |
+
else:
|
| 537 |
+
import cupy as xp # type: ignore[no-redef]
|
| 538 |
+
return xp, None
|
| 539 |
+
|
| 540 |
+
if _issubclass_fast(cls_, "torch", "Tensor"):
|
| 541 |
+
if _use_compat:
|
| 542 |
+
_check_api_version(api_version)
|
| 543 |
+
from .. import torch as xp # type: ignore[no-redef]
|
| 544 |
+
else:
|
| 545 |
+
import torch as xp # type: ignore[no-redef]
|
| 546 |
+
return xp, None
|
| 547 |
+
|
| 548 |
+
if _issubclass_fast(cls_, "dask.array", "Array"):
|
| 549 |
+
if _use_compat:
|
| 550 |
+
_check_api_version(api_version)
|
| 551 |
+
from ..dask import array as xp # type: ignore[no-redef]
|
| 552 |
+
else:
|
| 553 |
+
import dask.array as xp # type: ignore[no-redef]
|
| 554 |
+
return xp, None
|
| 555 |
+
|
| 556 |
+
# Backwards compatibility for jax<0.4.32
|
| 557 |
+
if _issubclass_fast(cls_, "jax", "Array"):
|
| 558 |
+
return _jax_namespace(api_version, use_compat), None
|
| 559 |
+
|
| 560 |
+
return None, None
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
def _jax_namespace(api_version: str | None, use_compat: bool | None) -> Namespace:
|
| 564 |
+
if use_compat:
|
| 565 |
+
raise ValueError("JAX does not have an array-api-compat wrapper")
|
| 566 |
+
import jax.numpy as jnp
|
| 567 |
+
if not hasattr(jnp, "__array_namespace_info__"):
|
| 568 |
+
# JAX v0.4.32 and newer implements the array API directly in jax.numpy.
|
| 569 |
+
# For older JAX versions, it is available via jax.experimental.array_api.
|
| 570 |
+
# jnp.Array objects gain the __array_namespace__ method.
|
| 571 |
+
import jax.experimental.array_api # noqa: F401
|
| 572 |
+
# Test api_version
|
| 573 |
+
return jnp.empty(0).__array_namespace__(api_version=api_version)
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
def array_namespace(
|
| 577 |
+
*xs: Array | complex | None,
|
| 578 |
+
api_version: str | None = None,
|
| 579 |
+
use_compat: bool | None = None,
|
| 580 |
+
) -> Namespace:
|
| 581 |
+
"""
|
| 582 |
+
Get the array API compatible namespace for the arrays `xs`.
|
| 583 |
+
|
| 584 |
+
Parameters
|
| 585 |
+
----------
|
| 586 |
+
xs: arrays
|
| 587 |
+
one or more arrays. xs can also be Python scalars (bool, int, float,
|
| 588 |
+
complex, or None), which are ignored.
|
| 589 |
+
|
| 590 |
+
api_version: str
|
| 591 |
+
The newest version of the spec that you need support for (currently
|
| 592 |
+
the compat library wrapped APIs support v2024.12).
|
| 593 |
+
|
| 594 |
+
use_compat: bool or None
|
| 595 |
+
If None (the default), the native namespace will be returned if it is
|
| 596 |
+
already array API compatible, otherwise a compat wrapper is used. If
|
| 597 |
+
True, the compat library wrapped library will be returned. If False,
|
| 598 |
+
the native library namespace is returned.
|
| 599 |
+
|
| 600 |
+
Returns
|
| 601 |
+
-------
|
| 602 |
+
|
| 603 |
+
out: namespace
|
| 604 |
+
The array API compatible namespace corresponding to the arrays in `xs`.
|
| 605 |
+
|
| 606 |
+
Raises
|
| 607 |
+
------
|
| 608 |
+
TypeError
|
| 609 |
+
If `xs` contains arrays from different array libraries or contains a
|
| 610 |
+
non-array.
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
Typical usage is to pass the arguments of a function to
|
| 614 |
+
`array_namespace()` at the top of a function to get the corresponding
|
| 615 |
+
array API namespace:
|
| 616 |
+
|
| 617 |
+
.. code:: python
|
| 618 |
+
|
| 619 |
+
def your_function(x, y):
|
| 620 |
+
xp = array_api_compat.array_namespace(x, y)
|
| 621 |
+
# Now use xp as the array library namespace
|
| 622 |
+
return xp.mean(x, axis=0) + 2*xp.std(y, axis=0)
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
Wrapped array namespaces can also be imported directly. For example,
|
| 626 |
+
`array_namespace(np.array(...))` will return `array_api_compat.numpy`.
|
| 627 |
+
This function will also work for any array library not wrapped by
|
| 628 |
+
array-api-compat if it explicitly defines `__array_namespace__
|
| 629 |
+
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__array_namespace__.html>`__
|
| 630 |
+
(the wrapped namespace is always preferred if it exists).
|
| 631 |
+
|
| 632 |
+
See Also
|
| 633 |
+
--------
|
| 634 |
+
|
| 635 |
+
is_array_api_obj
|
| 636 |
+
is_numpy_array
|
| 637 |
+
is_cupy_array
|
| 638 |
+
is_torch_array
|
| 639 |
+
is_dask_array
|
| 640 |
+
is_jax_array
|
| 641 |
+
is_pydata_sparse_array
|
| 642 |
+
|
| 643 |
+
"""
|
| 644 |
+
namespaces: set[Namespace] = set()
|
| 645 |
+
for x in xs:
|
| 646 |
+
xp, info = _cls_to_namespace(cast(Hashable, type(x)), api_version, use_compat)
|
| 647 |
+
if info is _ClsToXPInfo.SCALAR:
|
| 648 |
+
continue
|
| 649 |
+
|
| 650 |
+
if (
|
| 651 |
+
info is _ClsToXPInfo.MAYBE_JAX_ZERO_GRADIENT
|
| 652 |
+
and _is_jax_zero_gradient_array(x)
|
| 653 |
+
):
|
| 654 |
+
xp = _jax_namespace(api_version, use_compat)
|
| 655 |
+
|
| 656 |
+
if xp is None:
|
| 657 |
+
get_ns = getattr(x, "__array_namespace__", None)
|
| 658 |
+
if get_ns is None:
|
| 659 |
+
raise TypeError(f"{type(x).__name__} is not a supported array type")
|
| 660 |
+
if use_compat:
|
| 661 |
+
raise ValueError(
|
| 662 |
+
"The given array does not have an array-api-compat wrapper"
|
| 663 |
+
)
|
| 664 |
+
xp = get_ns(api_version=api_version)
|
| 665 |
+
|
| 666 |
+
namespaces.add(xp)
|
| 667 |
+
|
| 668 |
+
try:
|
| 669 |
+
(xp,) = namespaces
|
| 670 |
+
return xp
|
| 671 |
+
except ValueError:
|
| 672 |
+
if not namespaces:
|
| 673 |
+
raise TypeError(
|
| 674 |
+
"array_namespace requires at least one non-scalar array input"
|
| 675 |
+
)
|
| 676 |
+
raise TypeError(f"Multiple namespaces for array inputs: {namespaces}")
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
# backwards compatibility alias
|
| 680 |
+
get_namespace = array_namespace
|
| 681 |
+
|
| 682 |
+
|
| 683 |
+
def _check_device(bare_xp: Namespace, device: Device) -> None: # pyright: ignore[reportUnusedFunction]
|
| 684 |
+
"""
|
| 685 |
+
Validate dummy device on device-less array backends.
|
| 686 |
+
|
| 687 |
+
Notes
|
| 688 |
+
-----
|
| 689 |
+
This function is also invoked by CuPy, which does have multiple devices
|
| 690 |
+
if there are multiple GPUs available.
|
| 691 |
+
However, CuPy multi-device support is currently impossible
|
| 692 |
+
without using the global device or a context manager:
|
| 693 |
+
|
| 694 |
+
https://github.com/data-apis/array-api-compat/pull/293
|
| 695 |
+
"""
|
| 696 |
+
if bare_xp is sys.modules.get("numpy"):
|
| 697 |
+
if device not in ("cpu", None):
|
| 698 |
+
raise ValueError(f"Unsupported device for NumPy: {device!r}")
|
| 699 |
+
|
| 700 |
+
elif bare_xp is sys.modules.get("dask.array"):
|
| 701 |
+
if device not in ("cpu", _DASK_DEVICE, None):
|
| 702 |
+
raise ValueError(f"Unsupported device for Dask: {device!r}")
|
| 703 |
+
|
| 704 |
+
|
| 705 |
+
# Placeholder object to represent the dask device
|
| 706 |
+
# when the array backend is not the CPU.
|
| 707 |
+
# (since it is not easy to tell which device a dask array is on)
|
| 708 |
+
class _dask_device:
|
| 709 |
+
def __repr__(self) -> Literal["DASK_DEVICE"]:
|
| 710 |
+
return "DASK_DEVICE"
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
_DASK_DEVICE = _dask_device()
|
| 714 |
+
|
| 715 |
+
|
| 716 |
+
# device() is not on numpy.ndarray or dask.array and to_device() is not on numpy.ndarray
|
| 717 |
+
# or cupy.ndarray. They are not included in array objects of this library
|
| 718 |
+
# because this library just reuses the respective ndarray classes without
|
| 719 |
+
# wrapping or subclassing them. These helper functions can be used instead of
|
| 720 |
+
# the wrapper functions for libraries that need to support both NumPy/CuPy and
|
| 721 |
+
# other libraries that use devices.
|
| 722 |
+
def device(x: _ArrayApiObj, /) -> Device:
|
| 723 |
+
"""
|
| 724 |
+
Hardware device the array data resides on.
|
| 725 |
+
|
| 726 |
+
This is equivalent to `x.device` according to the `standard
|
| 727 |
+
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.device.html>`__.
|
| 728 |
+
This helper is included because some array libraries either do not have
|
| 729 |
+
the `device` attribute or include it with an incompatible API.
|
| 730 |
+
|
| 731 |
+
Parameters
|
| 732 |
+
----------
|
| 733 |
+
x: array
|
| 734 |
+
array instance from an array API compatible library.
|
| 735 |
+
|
| 736 |
+
Returns
|
| 737 |
+
-------
|
| 738 |
+
out: device
|
| 739 |
+
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
|
| 740 |
+
section of the array API specification).
|
| 741 |
+
|
| 742 |
+
Notes
|
| 743 |
+
-----
|
| 744 |
+
|
| 745 |
+
For NumPy the device is always `"cpu"`. For Dask, the device is always a
|
| 746 |
+
special `DASK_DEVICE` object.
|
| 747 |
+
|
| 748 |
+
See Also
|
| 749 |
+
--------
|
| 750 |
+
|
| 751 |
+
to_device : Move array data to a different device.
|
| 752 |
+
|
| 753 |
+
"""
|
| 754 |
+
if is_numpy_array(x):
|
| 755 |
+
return "cpu"
|
| 756 |
+
elif is_dask_array(x):
|
| 757 |
+
# Peek at the metadata of the Dask array to determine type
|
| 758 |
+
if is_numpy_array(x._meta):
|
| 759 |
+
# Must be on CPU since backed by numpy
|
| 760 |
+
return "cpu"
|
| 761 |
+
return _DASK_DEVICE
|
| 762 |
+
elif is_jax_array(x):
|
| 763 |
+
# FIXME Jitted JAX arrays do not have a device attribute
|
| 764 |
+
# https://github.com/jax-ml/jax/issues/26000
|
| 765 |
+
# Return None in this case. Note that this workaround breaks
|
| 766 |
+
# the standard and will result in new arrays being created on the
|
| 767 |
+
# default device instead of the same device as the input array(s).
|
| 768 |
+
x_device = getattr(x, "device", None)
|
| 769 |
+
# Older JAX releases had .device() as a method, which has been replaced
|
| 770 |
+
# with a property in accordance with the standard.
|
| 771 |
+
if inspect.ismethod(x_device):
|
| 772 |
+
return x_device()
|
| 773 |
+
else:
|
| 774 |
+
return x_device
|
| 775 |
+
elif is_pydata_sparse_array(x):
|
| 776 |
+
# `sparse` will gain `.device`, so check for this first.
|
| 777 |
+
x_device = getattr(x, "device", None)
|
| 778 |
+
if x_device is not None:
|
| 779 |
+
return x_device
|
| 780 |
+
# Everything but DOK has this attr.
|
| 781 |
+
try:
|
| 782 |
+
inner = x.data # pyright: ignore
|
| 783 |
+
except AttributeError:
|
| 784 |
+
return "cpu"
|
| 785 |
+
# Return the device of the constituent array
|
| 786 |
+
return device(inner) # pyright: ignore
|
| 787 |
+
return x.device # type: ignore # pyright: ignore
|
| 788 |
+
|
| 789 |
+
|
| 790 |
+
# Prevent shadowing, used below
|
| 791 |
+
_device = device
|
| 792 |
+
|
| 793 |
+
|
| 794 |
+
# Based on cupy.array_api.Array.to_device
|
| 795 |
+
def _cupy_to_device(
|
| 796 |
+
x: cp.ndarray,
|
| 797 |
+
device: Device,
|
| 798 |
+
/,
|
| 799 |
+
stream: int | Any | None = None,
|
| 800 |
+
) -> cp.ndarray:
|
| 801 |
+
import cupy as cp
|
| 802 |
+
|
| 803 |
+
if device == "cpu":
|
| 804 |
+
# allowing us to use `to_device(x, "cpu")`
|
| 805 |
+
# is useful for portable test swapping between
|
| 806 |
+
# host and device backends
|
| 807 |
+
return x.get()
|
| 808 |
+
if not isinstance(device, cp.cuda.Device):
|
| 809 |
+
raise TypeError(f"Unsupported device type {device!r}")
|
| 810 |
+
|
| 811 |
+
if stream is None:
|
| 812 |
+
with device:
|
| 813 |
+
return cp.asarray(x)
|
| 814 |
+
|
| 815 |
+
# stream can be an int as specified in __dlpack__, or a CuPy stream
|
| 816 |
+
if isinstance(stream, int):
|
| 817 |
+
stream = cp.cuda.ExternalStream(stream)
|
| 818 |
+
elif not isinstance(stream, cp.cuda.Stream):
|
| 819 |
+
raise TypeError(f"Unsupported stream type {stream!r}")
|
| 820 |
+
|
| 821 |
+
with device, stream:
|
| 822 |
+
return cp.asarray(x)
|
| 823 |
+
|
| 824 |
+
|
| 825 |
+
def _torch_to_device(
|
| 826 |
+
x: torch.Tensor,
|
| 827 |
+
device: torch.device | str | int,
|
| 828 |
+
/,
|
| 829 |
+
stream: int | Any | None = None,
|
| 830 |
+
) -> torch.Tensor:
|
| 831 |
+
if stream is not None:
|
| 832 |
+
raise NotImplementedError
|
| 833 |
+
return x.to(device)
|
| 834 |
+
|
| 835 |
+
|
| 836 |
+
def to_device(x: Array, device: Device, /, *, stream: int | Any | None = None) -> Array:
|
| 837 |
+
"""
|
| 838 |
+
Copy the array from the device on which it currently resides to the specified ``device``.
|
| 839 |
+
|
| 840 |
+
This is equivalent to `x.to_device(device, stream=stream)` according to
|
| 841 |
+
the `standard
|
| 842 |
+
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.to_device.html>`__.
|
| 843 |
+
This helper is included because some array libraries do not have the
|
| 844 |
+
`to_device` method.
|
| 845 |
+
|
| 846 |
+
Parameters
|
| 847 |
+
----------
|
| 848 |
+
|
| 849 |
+
x: array
|
| 850 |
+
array instance from an array API compatible library.
|
| 851 |
+
|
| 852 |
+
device: device
|
| 853 |
+
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
|
| 854 |
+
section of the array API specification).
|
| 855 |
+
|
| 856 |
+
stream: int | Any | None
|
| 857 |
+
stream object to use during copy. In addition to the types supported
|
| 858 |
+
in ``array.__dlpack__``, implementations may choose to support any
|
| 859 |
+
library-specific stream object with the caveat that any code using
|
| 860 |
+
such an object would not be portable.
|
| 861 |
+
|
| 862 |
+
Returns
|
| 863 |
+
-------
|
| 864 |
+
|
| 865 |
+
out: array
|
| 866 |
+
an array with the same data and data type as ``x`` and located on the
|
| 867 |
+
specified ``device``.
|
| 868 |
+
|
| 869 |
+
Notes
|
| 870 |
+
-----
|
| 871 |
+
|
| 872 |
+
For NumPy, this function effectively does nothing since the only supported
|
| 873 |
+
device is the CPU. For CuPy, this method supports CuPy CUDA
|
| 874 |
+
:external+cupy:class:`Device <cupy.cuda.Device>` and
|
| 875 |
+
:external+cupy:class:`Stream <cupy.cuda.Stream>` objects. For PyTorch,
|
| 876 |
+
this is the same as :external+torch:meth:`x.to(device) <torch.Tensor.to>`
|
| 877 |
+
(the ``stream`` argument is not supported in PyTorch).
|
| 878 |
+
|
| 879 |
+
See Also
|
| 880 |
+
--------
|
| 881 |
+
|
| 882 |
+
device : Hardware device the array data resides on.
|
| 883 |
+
|
| 884 |
+
"""
|
| 885 |
+
if is_numpy_array(x):
|
| 886 |
+
if stream is not None:
|
| 887 |
+
raise ValueError("The stream argument to to_device() is not supported")
|
| 888 |
+
if device == "cpu":
|
| 889 |
+
return x
|
| 890 |
+
raise ValueError(f"Unsupported device {device!r}")
|
| 891 |
+
elif is_cupy_array(x):
|
| 892 |
+
# cupy does not yet have to_device
|
| 893 |
+
return _cupy_to_device(x, device, stream=stream)
|
| 894 |
+
elif is_torch_array(x):
|
| 895 |
+
return _torch_to_device(x, device, stream=stream)
|
| 896 |
+
elif is_dask_array(x):
|
| 897 |
+
if stream is not None:
|
| 898 |
+
raise ValueError("The stream argument to to_device() is not supported")
|
| 899 |
+
# TODO: What if our array is on the GPU already?
|
| 900 |
+
if device == "cpu":
|
| 901 |
+
return x
|
| 902 |
+
raise ValueError(f"Unsupported device {device!r}")
|
| 903 |
+
elif is_jax_array(x):
|
| 904 |
+
if not hasattr(x, "__array_namespace__"):
|
| 905 |
+
# In JAX v0.4.31 and older, this import adds to_device method to x...
|
| 906 |
+
import jax.experimental.array_api # noqa: F401 # pyright: ignore
|
| 907 |
+
|
| 908 |
+
# ... but only on eager JAX. It won't work inside jax.jit.
|
| 909 |
+
if not hasattr(x, "to_device"):
|
| 910 |
+
return x
|
| 911 |
+
return x.to_device(device, stream=stream)
|
| 912 |
+
elif is_pydata_sparse_array(x) and device == _device(x):
|
| 913 |
+
# Perform trivial check to return the same array if
|
| 914 |
+
# device is same instead of err-ing.
|
| 915 |
+
return x
|
| 916 |
+
return x.to_device(device, stream=stream) # pyright: ignore
|
| 917 |
+
|
| 918 |
+
|
| 919 |
+
@overload
|
| 920 |
+
def size(x: HasShape[Collection[SupportsIndex]]) -> int: ...
|
| 921 |
+
@overload
|
| 922 |
+
def size(x: HasShape[Collection[SupportsIndex | None]]) -> int | None: ...
|
| 923 |
+
def size(x: HasShape[Collection[SupportsIndex | None]]) -> int | None:
|
| 924 |
+
"""
|
| 925 |
+
Return the total number of elements of x.
|
| 926 |
+
|
| 927 |
+
This is equivalent to `x.size` according to the `standard
|
| 928 |
+
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.size.html>`__.
|
| 929 |
+
|
| 930 |
+
This helper is included because PyTorch defines `size` in an
|
| 931 |
+
:external+torch:meth:`incompatible way <torch.Tensor.size>`.
|
| 932 |
+
It also fixes dask.array's behaviour which returns nan for unknown sizes, whereas
|
| 933 |
+
the standard requires None.
|
| 934 |
+
"""
|
| 935 |
+
# Lazy API compliant arrays, such as ndonnx, can contain None in their shape
|
| 936 |
+
if None in x.shape:
|
| 937 |
+
return None
|
| 938 |
+
out = math.prod(cast("Collection[SupportsIndex]", x.shape))
|
| 939 |
+
# dask.array.Array.shape can contain NaN
|
| 940 |
+
return None if math.isnan(out) else out
|
| 941 |
+
|
| 942 |
+
|
| 943 |
+
@lru_cache(100)
|
| 944 |
+
def _is_writeable_cls(cls: type) -> bool | None:
|
| 945 |
+
if (
|
| 946 |
+
_issubclass_fast(cls, "numpy", "generic")
|
| 947 |
+
or _issubclass_fast(cls, "jax", "Array")
|
| 948 |
+
or _issubclass_fast(cls, "jax.core", "Tracer") # see is_jax_array for limitations
|
| 949 |
+
or _issubclass_fast(cls, "sparse", "SparseArray")
|
| 950 |
+
):
|
| 951 |
+
return False
|
| 952 |
+
if _is_array_api_cls(cls):
|
| 953 |
+
return True
|
| 954 |
+
return None
|
| 955 |
+
|
| 956 |
+
|
| 957 |
+
def is_writeable_array(x: object) -> TypeGuard[_ArrayApiObj]:
|
| 958 |
+
"""
|
| 959 |
+
Return False if ``x.__setitem__`` is expected to raise; True otherwise.
|
| 960 |
+
Return False if `x` is not an array API compatible object.
|
| 961 |
+
|
| 962 |
+
Warning
|
| 963 |
+
-------
|
| 964 |
+
As there is no standard way to check if an array is writeable without actually
|
| 965 |
+
writing to it, this function blindly returns True for all unknown array types.
|
| 966 |
+
"""
|
| 967 |
+
cls = cast(Hashable, type(x))
|
| 968 |
+
if _issubclass_fast(cls, "numpy", "ndarray"):
|
| 969 |
+
return cast("npt.NDArray", x).flags.writeable
|
| 970 |
+
res = _is_writeable_cls(cls)
|
| 971 |
+
if res is not None:
|
| 972 |
+
return res
|
| 973 |
+
return hasattr(x, '__array_namespace__')
|
| 974 |
+
|
| 975 |
+
|
| 976 |
+
@lru_cache(100)
|
| 977 |
+
def _is_lazy_cls(cls: type) -> bool | None:
|
| 978 |
+
if (
|
| 979 |
+
_issubclass_fast(cls, "numpy", "ndarray")
|
| 980 |
+
or _issubclass_fast(cls, "numpy", "generic")
|
| 981 |
+
or _issubclass_fast(cls, "cupy", "ndarray")
|
| 982 |
+
or _issubclass_fast(cls, "torch", "Tensor")
|
| 983 |
+
or _issubclass_fast(cls, "sparse", "SparseArray")
|
| 984 |
+
):
|
| 985 |
+
return False
|
| 986 |
+
if (
|
| 987 |
+
_issubclass_fast(cls, "jax", "Array")
|
| 988 |
+
or _issubclass_fast(cls, "jax.core", "Tracer") # see is_jax_array for limitations
|
| 989 |
+
or _issubclass_fast(cls, "dask.array", "Array")
|
| 990 |
+
or _issubclass_fast(cls, "ndonnx", "Array")
|
| 991 |
+
):
|
| 992 |
+
return True
|
| 993 |
+
return None
|
| 994 |
+
|
| 995 |
+
|
| 996 |
+
def is_lazy_array(x: object) -> TypeGuard[_ArrayApiObj]:
|
| 997 |
+
"""Return True if x is potentially a future or it may be otherwise impossible or
|
| 998 |
+
expensive to eagerly read its contents, regardless of their size, e.g. by
|
| 999 |
+
calling ``bool(x)`` or ``float(x)``.
|
| 1000 |
+
|
| 1001 |
+
Return False otherwise; e.g. ``bool(x)`` etc. is guaranteed to succeed and to be
|
| 1002 |
+
cheap as long as the array has the right dtype and size.
|
| 1003 |
+
|
| 1004 |
+
Note
|
| 1005 |
+
----
|
| 1006 |
+
This function errs on the side of caution for array types that may or may not be
|
| 1007 |
+
lazy, e.g. JAX arrays, by always returning True for them.
|
| 1008 |
+
"""
|
| 1009 |
+
# **JAX note:** while it is possible to determine if you're inside or outside
|
| 1010 |
+
# jax.jit by testing the subclass of a jax.Array object, as well as testing bool()
|
| 1011 |
+
# as we do below for unknown arrays, this is not recommended by JAX best practices.
|
| 1012 |
+
|
| 1013 |
+
# **Dask note:** Dask eagerly computes the graph on __bool__, __float__, and so on.
|
| 1014 |
+
# This behaviour, while impossible to change without breaking backwards
|
| 1015 |
+
# compatibility, is highly detrimental to performance as the whole graph will end
|
| 1016 |
+
# up being computed multiple times.
|
| 1017 |
+
|
| 1018 |
+
# Note: skipping reclassification of JAX zero gradient arrays, as one will
|
| 1019 |
+
# exclusively get them once they leave a jax.grad JIT context.
|
| 1020 |
+
cls = cast(Hashable, type(x))
|
| 1021 |
+
res = _is_lazy_cls(cls)
|
| 1022 |
+
if res is not None:
|
| 1023 |
+
return res
|
| 1024 |
+
|
| 1025 |
+
if not hasattr(x, "__array_namespace__"):
|
| 1026 |
+
return False
|
| 1027 |
+
|
| 1028 |
+
# Unknown Array API compatible object. Note that this test may have dire consequences
|
| 1029 |
+
# in terms of performance, e.g. for a lazy object that eagerly computes the graph
|
| 1030 |
+
# on __bool__ (dask is one such example, which however is special-cased above).
|
| 1031 |
+
|
| 1032 |
+
# Select a single point of the array
|
| 1033 |
+
s = size(cast("HasShape[Collection[SupportsIndex | None]]", x))
|
| 1034 |
+
if s is None:
|
| 1035 |
+
return True
|
| 1036 |
+
xp = array_namespace(x)
|
| 1037 |
+
if s > 1:
|
| 1038 |
+
x = xp.reshape(x, (-1,))[0]
|
| 1039 |
+
# Cast to dtype=bool and deal with size 0 arrays
|
| 1040 |
+
x = xp.any(x)
|
| 1041 |
+
|
| 1042 |
+
try:
|
| 1043 |
+
bool(x)
|
| 1044 |
+
return False
|
| 1045 |
+
# The Array API standard dictactes that __bool__ should raise TypeError if the
|
| 1046 |
+
# output cannot be defined.
|
| 1047 |
+
# Here we allow for it to raise arbitrary exceptions, e.g. like Dask does.
|
| 1048 |
+
except Exception:
|
| 1049 |
+
return True
|
| 1050 |
+
|
| 1051 |
+
|
| 1052 |
+
__all__ = [
|
| 1053 |
+
"array_namespace",
|
| 1054 |
+
"device",
|
| 1055 |
+
"get_namespace",
|
| 1056 |
+
"is_array_api_obj",
|
| 1057 |
+
"is_array_api_strict_namespace",
|
| 1058 |
+
"is_cupy_array",
|
| 1059 |
+
"is_cupy_namespace",
|
| 1060 |
+
"is_dask_array",
|
| 1061 |
+
"is_dask_namespace",
|
| 1062 |
+
"is_jax_array",
|
| 1063 |
+
"is_jax_namespace",
|
| 1064 |
+
"is_numpy_array",
|
| 1065 |
+
"is_numpy_namespace",
|
| 1066 |
+
"is_torch_array",
|
| 1067 |
+
"is_torch_namespace",
|
| 1068 |
+
"is_ndonnx_array",
|
| 1069 |
+
"is_ndonnx_namespace",
|
| 1070 |
+
"is_pydata_sparse_array",
|
| 1071 |
+
"is_pydata_sparse_namespace",
|
| 1072 |
+
"is_writeable_array",
|
| 1073 |
+
"is_lazy_array",
|
| 1074 |
+
"size",
|
| 1075 |
+
"to_device",
|
| 1076 |
+
]
|
| 1077 |
+
|
| 1078 |
+
def __dir__() -> list[str]:
|
| 1079 |
+
return __all__
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/_linalg.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
from typing import Literal, NamedTuple, cast
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
if np.__version__[0] == "2":
|
| 9 |
+
from numpy.lib.array_utils import normalize_axis_tuple
|
| 10 |
+
else:
|
| 11 |
+
from numpy.core.numeric import normalize_axis_tuple # type: ignore[no-redef]
|
| 12 |
+
|
| 13 |
+
from .._internal import get_xp
|
| 14 |
+
from ._aliases import isdtype, matmul, matrix_transpose, tensordot, vecdot
|
| 15 |
+
from ._typing import Array, DType, JustFloat, JustInt, Namespace
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# These are in the main NumPy namespace but not in numpy.linalg
|
| 19 |
+
def cross(
|
| 20 |
+
x1: Array,
|
| 21 |
+
x2: Array,
|
| 22 |
+
/,
|
| 23 |
+
xp: Namespace,
|
| 24 |
+
*,
|
| 25 |
+
axis: int = -1,
|
| 26 |
+
**kwargs: object,
|
| 27 |
+
) -> Array:
|
| 28 |
+
return xp.cross(x1, x2, axis=axis, **kwargs)
|
| 29 |
+
|
| 30 |
+
def outer(x1: Array, x2: Array, /, xp: Namespace, **kwargs: object) -> Array:
|
| 31 |
+
return xp.outer(x1, x2, **kwargs)
|
| 32 |
+
|
| 33 |
+
class EighResult(NamedTuple):
|
| 34 |
+
eigenvalues: Array
|
| 35 |
+
eigenvectors: Array
|
| 36 |
+
|
| 37 |
+
class QRResult(NamedTuple):
|
| 38 |
+
Q: Array
|
| 39 |
+
R: Array
|
| 40 |
+
|
| 41 |
+
class SlogdetResult(NamedTuple):
|
| 42 |
+
sign: Array
|
| 43 |
+
logabsdet: Array
|
| 44 |
+
|
| 45 |
+
class SVDResult(NamedTuple):
|
| 46 |
+
U: Array
|
| 47 |
+
S: Array
|
| 48 |
+
Vh: Array
|
| 49 |
+
|
| 50 |
+
# These functions are the same as their NumPy counterparts except they return
|
| 51 |
+
# a namedtuple.
|
| 52 |
+
def eigh(x: Array, /, xp: Namespace, **kwargs: object) -> EighResult:
|
| 53 |
+
return EighResult(*xp.linalg.eigh(x, **kwargs))
|
| 54 |
+
|
| 55 |
+
def qr(
|
| 56 |
+
x: Array,
|
| 57 |
+
/,
|
| 58 |
+
xp: Namespace,
|
| 59 |
+
*,
|
| 60 |
+
mode: Literal["reduced", "complete"] = "reduced",
|
| 61 |
+
**kwargs: object,
|
| 62 |
+
) -> QRResult:
|
| 63 |
+
return QRResult(*xp.linalg.qr(x, mode=mode, **kwargs))
|
| 64 |
+
|
| 65 |
+
def slogdet(x: Array, /, xp: Namespace, **kwargs: object) -> SlogdetResult:
|
| 66 |
+
return SlogdetResult(*xp.linalg.slogdet(x, **kwargs))
|
| 67 |
+
|
| 68 |
+
def svd(
|
| 69 |
+
x: Array,
|
| 70 |
+
/,
|
| 71 |
+
xp: Namespace,
|
| 72 |
+
*,
|
| 73 |
+
full_matrices: bool = True,
|
| 74 |
+
**kwargs: object,
|
| 75 |
+
) -> SVDResult:
|
| 76 |
+
return SVDResult(*xp.linalg.svd(x, full_matrices=full_matrices, **kwargs))
|
| 77 |
+
|
| 78 |
+
# These functions have additional keyword arguments
|
| 79 |
+
|
| 80 |
+
# The upper keyword argument is new from NumPy
|
| 81 |
+
def cholesky(
|
| 82 |
+
x: Array,
|
| 83 |
+
/,
|
| 84 |
+
xp: Namespace,
|
| 85 |
+
*,
|
| 86 |
+
upper: bool = False,
|
| 87 |
+
**kwargs: object,
|
| 88 |
+
) -> Array:
|
| 89 |
+
L = xp.linalg.cholesky(x, **kwargs)
|
| 90 |
+
if upper:
|
| 91 |
+
U = get_xp(xp)(matrix_transpose)(L)
|
| 92 |
+
if get_xp(xp)(isdtype)(U.dtype, 'complex floating'):
|
| 93 |
+
U = xp.conj(U) # pyright: ignore[reportConstantRedefinition]
|
| 94 |
+
return U
|
| 95 |
+
return L
|
| 96 |
+
|
| 97 |
+
# The rtol keyword argument of matrix_rank() and pinv() is new from NumPy.
|
| 98 |
+
# Note that it has a different semantic meaning from tol and rcond.
|
| 99 |
+
def matrix_rank(
|
| 100 |
+
x: Array,
|
| 101 |
+
/,
|
| 102 |
+
xp: Namespace,
|
| 103 |
+
*,
|
| 104 |
+
rtol: float | Array | None = None,
|
| 105 |
+
**kwargs: object,
|
| 106 |
+
) -> Array:
|
| 107 |
+
# this is different from xp.linalg.matrix_rank, which supports 1
|
| 108 |
+
# dimensional arrays.
|
| 109 |
+
if x.ndim < 2:
|
| 110 |
+
raise xp.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional")
|
| 111 |
+
S: Array = get_xp(xp)(svdvals)(x, **kwargs)
|
| 112 |
+
if rtol is None:
|
| 113 |
+
tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * xp.finfo(S.dtype).eps
|
| 114 |
+
else:
|
| 115 |
+
# this is different from xp.linalg.matrix_rank, which does not
|
| 116 |
+
# multiply the tolerance by the largest singular value.
|
| 117 |
+
tol = S.max(axis=-1, keepdims=True)*xp.asarray(rtol)[..., xp.newaxis]
|
| 118 |
+
return xp.count_nonzero(S > tol, axis=-1)
|
| 119 |
+
|
| 120 |
+
def pinv(
|
| 121 |
+
x: Array,
|
| 122 |
+
/,
|
| 123 |
+
xp: Namespace,
|
| 124 |
+
*,
|
| 125 |
+
rtol: float | Array | None = None,
|
| 126 |
+
**kwargs: object,
|
| 127 |
+
) -> Array:
|
| 128 |
+
# this is different from xp.linalg.pinv, which does not multiply the
|
| 129 |
+
# default tolerance by max(M, N).
|
| 130 |
+
if rtol is None:
|
| 131 |
+
rtol = max(x.shape[-2:]) * xp.finfo(x.dtype).eps
|
| 132 |
+
return xp.linalg.pinv(x, rcond=rtol, **kwargs)
|
| 133 |
+
|
| 134 |
+
# These functions are new in the array API spec
|
| 135 |
+
|
| 136 |
+
def matrix_norm(
|
| 137 |
+
x: Array,
|
| 138 |
+
/,
|
| 139 |
+
xp: Namespace,
|
| 140 |
+
*,
|
| 141 |
+
keepdims: bool = False,
|
| 142 |
+
ord: Literal[1, 2, -1, -2] | JustFloat | Literal["fro", "nuc"] | None = "fro",
|
| 143 |
+
) -> Array:
|
| 144 |
+
return xp.linalg.norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord)
|
| 145 |
+
|
| 146 |
+
# svdvals is not in NumPy (but it is in SciPy). It is equivalent to
|
| 147 |
+
# xp.linalg.svd(compute_uv=False).
|
| 148 |
+
def svdvals(x: Array, /, xp: Namespace) -> Array | tuple[Array, ...]:
|
| 149 |
+
return xp.linalg.svd(x, compute_uv=False)
|
| 150 |
+
|
| 151 |
+
def vector_norm(
|
| 152 |
+
x: Array,
|
| 153 |
+
/,
|
| 154 |
+
xp: Namespace,
|
| 155 |
+
*,
|
| 156 |
+
axis: int | tuple[int, ...] | None = None,
|
| 157 |
+
keepdims: bool = False,
|
| 158 |
+
ord: JustInt | JustFloat = 2,
|
| 159 |
+
) -> Array:
|
| 160 |
+
# xp.linalg.norm tries to do a matrix norm whenever axis is a 2-tuple or
|
| 161 |
+
# when axis=None and the input is 2-D, so to force a vector norm, we make
|
| 162 |
+
# it so the input is 1-D (for axis=None), or reshape so that norm is done
|
| 163 |
+
# on a single dimension.
|
| 164 |
+
if axis is None:
|
| 165 |
+
# Note: xp.linalg.norm() doesn't handle 0-D arrays
|
| 166 |
+
_x = x.ravel()
|
| 167 |
+
_axis = 0
|
| 168 |
+
elif isinstance(axis, tuple):
|
| 169 |
+
# Note: The axis argument supports any number of axes, whereas
|
| 170 |
+
# xp.linalg.norm() only supports a single axis for vector norm.
|
| 171 |
+
normalized_axis = cast(
|
| 172 |
+
"tuple[int, ...]",
|
| 173 |
+
normalize_axis_tuple(axis, x.ndim), # pyright: ignore[reportCallIssue]
|
| 174 |
+
)
|
| 175 |
+
rest = tuple(i for i in range(x.ndim) if i not in normalized_axis)
|
| 176 |
+
newshape = axis + rest
|
| 177 |
+
_x = xp.transpose(x, newshape).reshape(
|
| 178 |
+
(math.prod([x.shape[i] for i in axis]), *[x.shape[i] for i in rest]))
|
| 179 |
+
_axis = 0
|
| 180 |
+
else:
|
| 181 |
+
_x = x
|
| 182 |
+
_axis = axis
|
| 183 |
+
|
| 184 |
+
res = xp.linalg.norm(_x, axis=_axis, ord=ord)
|
| 185 |
+
|
| 186 |
+
if keepdims:
|
| 187 |
+
# We can't reuse xp.linalg.norm(keepdims) because of the reshape hacks
|
| 188 |
+
# above to avoid matrix norm logic.
|
| 189 |
+
shape = list(x.shape)
|
| 190 |
+
axes = cast(
|
| 191 |
+
"tuple[int, ...]",
|
| 192 |
+
normalize_axis_tuple( # pyright: ignore[reportCallIssue]
|
| 193 |
+
range(x.ndim) if axis is None else axis,
|
| 194 |
+
x.ndim,
|
| 195 |
+
),
|
| 196 |
+
)
|
| 197 |
+
for i in axes:
|
| 198 |
+
shape[i] = 1
|
| 199 |
+
res = xp.reshape(res, tuple(shape))
|
| 200 |
+
|
| 201 |
+
return res
|
| 202 |
+
|
| 203 |
+
# xp.diagonal and xp.trace operate on the first two axes whereas these
|
| 204 |
+
# operates on the last two
|
| 205 |
+
|
| 206 |
+
def diagonal(x: Array, /, xp: Namespace, *, offset: int = 0, **kwargs: object) -> Array:
|
| 207 |
+
return xp.diagonal(x, offset=offset, axis1=-2, axis2=-1, **kwargs)
|
| 208 |
+
|
| 209 |
+
def trace(
|
| 210 |
+
x: Array,
|
| 211 |
+
/,
|
| 212 |
+
xp: Namespace,
|
| 213 |
+
*,
|
| 214 |
+
offset: int = 0,
|
| 215 |
+
dtype: DType | None = None,
|
| 216 |
+
**kwargs: object,
|
| 217 |
+
) -> Array:
|
| 218 |
+
return xp.asarray(
|
| 219 |
+
xp.trace(x, offset=offset, dtype=dtype, axis1=-2, axis2=-1, **kwargs)
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
__all__ = ['cross', 'matmul', 'outer', 'tensordot', 'EighResult',
|
| 223 |
+
'QRResult', 'SlogdetResult', 'SVDResult', 'eigh', 'qr', 'slogdet',
|
| 224 |
+
'svd', 'cholesky', 'matrix_rank', 'pinv', 'matrix_norm',
|
| 225 |
+
'matrix_transpose', 'svdvals', 'vecdot', 'vector_norm', 'diagonal',
|
| 226 |
+
'trace']
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def __dir__() -> list[str]:
|
| 230 |
+
return __all__
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/_typing.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from collections.abc import Mapping
|
| 4 |
+
from types import ModuleType as Namespace
|
| 5 |
+
from typing import (
|
| 6 |
+
TYPE_CHECKING,
|
| 7 |
+
Literal,
|
| 8 |
+
Protocol,
|
| 9 |
+
TypeAlias,
|
| 10 |
+
TypedDict,
|
| 11 |
+
TypeVar,
|
| 12 |
+
final,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
if TYPE_CHECKING:
|
| 16 |
+
from _typeshed import Incomplete
|
| 17 |
+
|
| 18 |
+
SupportsBufferProtocol: TypeAlias = Incomplete
|
| 19 |
+
Array: TypeAlias = Incomplete
|
| 20 |
+
Device: TypeAlias = Incomplete
|
| 21 |
+
DType: TypeAlias = Incomplete
|
| 22 |
+
else:
|
| 23 |
+
SupportsBufferProtocol = object
|
| 24 |
+
Array = object
|
| 25 |
+
Device = object
|
| 26 |
+
DType = object
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
_T_co = TypeVar("_T_co", covariant=True)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
# These "Just" types are equivalent to the `Just` type from the `optype` library,
|
| 33 |
+
# apart from them not being `@runtime_checkable`.
|
| 34 |
+
# - docs: https://github.com/jorenham/optype/blob/master/README.md#just
|
| 35 |
+
# - code: https://github.com/jorenham/optype/blob/master/optype/_core/_just.py
|
| 36 |
+
@final
|
| 37 |
+
class JustInt(Protocol): # type: ignore[misc]
|
| 38 |
+
@property # type: ignore[override]
|
| 39 |
+
def __class__(self, /) -> type[int]: ...
|
| 40 |
+
@__class__.setter
|
| 41 |
+
def __class__(self, value: type[int], /) -> None: ... # pyright: ignore[reportIncompatibleMethodOverride]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@final
|
| 45 |
+
class JustFloat(Protocol): # type: ignore[misc]
|
| 46 |
+
@property # type: ignore[override]
|
| 47 |
+
def __class__(self, /) -> type[float]: ...
|
| 48 |
+
@__class__.setter
|
| 49 |
+
def __class__(self, value: type[float], /) -> None: ... # pyright: ignore[reportIncompatibleMethodOverride]
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@final
|
| 53 |
+
class JustComplex(Protocol): # type: ignore[misc]
|
| 54 |
+
@property # type: ignore[override]
|
| 55 |
+
def __class__(self, /) -> type[complex]: ...
|
| 56 |
+
@__class__.setter
|
| 57 |
+
def __class__(self, value: type[complex], /) -> None: ... # pyright: ignore[reportIncompatibleMethodOverride]
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class NestedSequence(Protocol[_T_co]):
|
| 61 |
+
def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ...
|
| 62 |
+
def __len__(self, /) -> int: ...
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class SupportsArrayNamespace(Protocol[_T_co]):
|
| 66 |
+
def __array_namespace__(self, /, *, api_version: str | None) -> _T_co: ...
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class HasShape(Protocol[_T_co]):
|
| 70 |
+
@property
|
| 71 |
+
def shape(self, /) -> _T_co: ...
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# Return type of `__array_namespace_info__.default_dtypes`
|
| 75 |
+
Capabilities = TypedDict(
|
| 76 |
+
"Capabilities",
|
| 77 |
+
{
|
| 78 |
+
"boolean indexing": bool,
|
| 79 |
+
"data-dependent shapes": bool,
|
| 80 |
+
"max dimensions": int,
|
| 81 |
+
},
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
# Return type of `__array_namespace_info__.default_dtypes`
|
| 85 |
+
DefaultDTypes = TypedDict(
|
| 86 |
+
"DefaultDTypes",
|
| 87 |
+
{
|
| 88 |
+
"real floating": DType,
|
| 89 |
+
"complex floating": DType,
|
| 90 |
+
"integral": DType,
|
| 91 |
+
"indexing": DType,
|
| 92 |
+
},
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
_DTypeKind: TypeAlias = Literal[
|
| 97 |
+
"bool",
|
| 98 |
+
"signed integer",
|
| 99 |
+
"unsigned integer",
|
| 100 |
+
"integral",
|
| 101 |
+
"real floating",
|
| 102 |
+
"complex floating",
|
| 103 |
+
"numeric",
|
| 104 |
+
]
|
| 105 |
+
# Type of the `kind` parameter in `__array_namespace_info__.dtypes`
|
| 106 |
+
DTypeKind: TypeAlias = _DTypeKind | tuple[_DTypeKind, ...]
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
# `__array_namespace_info__.dtypes(kind="bool")`
|
| 110 |
+
class DTypesBool(TypedDict):
|
| 111 |
+
bool: DType
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
# `__array_namespace_info__.dtypes(kind="signed integer")`
|
| 115 |
+
class DTypesSigned(TypedDict):
|
| 116 |
+
int8: DType
|
| 117 |
+
int16: DType
|
| 118 |
+
int32: DType
|
| 119 |
+
int64: DType
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# `__array_namespace_info__.dtypes(kind="unsigned integer")`
|
| 123 |
+
class DTypesUnsigned(TypedDict):
|
| 124 |
+
uint8: DType
|
| 125 |
+
uint16: DType
|
| 126 |
+
uint32: DType
|
| 127 |
+
uint64: DType
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
# `__array_namespace_info__.dtypes(kind="integral")`
|
| 131 |
+
class DTypesIntegral(DTypesSigned, DTypesUnsigned):
|
| 132 |
+
pass
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
# `__array_namespace_info__.dtypes(kind="real floating")`
|
| 136 |
+
class DTypesReal(TypedDict):
|
| 137 |
+
float32: DType
|
| 138 |
+
float64: DType
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
# `__array_namespace_info__.dtypes(kind="complex floating")`
|
| 142 |
+
class DTypesComplex(TypedDict):
|
| 143 |
+
complex64: DType
|
| 144 |
+
complex128: DType
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
# `__array_namespace_info__.dtypes(kind="numeric")`
|
| 148 |
+
class DTypesNumeric(DTypesIntegral, DTypesReal, DTypesComplex):
|
| 149 |
+
pass
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
# `__array_namespace_info__.dtypes(kind=None)` (default)
|
| 153 |
+
class DTypesAll(DTypesBool, DTypesNumeric):
|
| 154 |
+
pass
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
# `__array_namespace_info__.dtypes(kind=?)` (fallback)
|
| 158 |
+
DTypesAny: TypeAlias = Mapping[str, DType]
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
__all__ = [
|
| 162 |
+
"Array",
|
| 163 |
+
"Capabilities",
|
| 164 |
+
"DType",
|
| 165 |
+
"DTypeKind",
|
| 166 |
+
"DTypesAny",
|
| 167 |
+
"DTypesAll",
|
| 168 |
+
"DTypesBool",
|
| 169 |
+
"DTypesNumeric",
|
| 170 |
+
"DTypesIntegral",
|
| 171 |
+
"DTypesSigned",
|
| 172 |
+
"DTypesUnsigned",
|
| 173 |
+
"DTypesReal",
|
| 174 |
+
"DTypesComplex",
|
| 175 |
+
"DefaultDTypes",
|
| 176 |
+
"Device",
|
| 177 |
+
"HasShape",
|
| 178 |
+
"Namespace",
|
| 179 |
+
"JustInt",
|
| 180 |
+
"JustFloat",
|
| 181 |
+
"JustComplex",
|
| 182 |
+
"NestedSequence",
|
| 183 |
+
"SupportsArrayNamespace",
|
| 184 |
+
"SupportsBufferProtocol",
|
| 185 |
+
]
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def __dir__() -> list[str]:
|
| 189 |
+
return __all__
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Final
|
| 2 |
+
from cupy import * # noqa: F403
|
| 3 |
+
|
| 4 |
+
# from cupy import * doesn't overwrite these builtin names
|
| 5 |
+
from cupy import abs, max, min, round # noqa: F401
|
| 6 |
+
|
| 7 |
+
# These imports may overwrite names from the import * above.
|
| 8 |
+
from ._aliases import * # noqa: F403
|
| 9 |
+
from ._info import __array_namespace_info__ # noqa: F401
|
| 10 |
+
|
| 11 |
+
# See the comment in the numpy __init__.py
|
| 12 |
+
__import__(__package__ + '.linalg')
|
| 13 |
+
__import__(__package__ + '.fft')
|
| 14 |
+
|
| 15 |
+
__array_api_version__: Final = '2024.12'
|
| 16 |
+
|
| 17 |
+
__all__ = sorted(
|
| 18 |
+
{name for name in globals() if not name.startswith("__")}
|
| 19 |
+
- {"Final", "_aliases", "_info", "_typing"}
|
| 20 |
+
| {"__array_api_version__", "__array_namespace_info__", "linalg", "fft"}
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
def __dir__() -> list[str]:
|
| 24 |
+
return __all__
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (1.09 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-312.pyc
ADDED
|
Binary file (8.26 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_info.cpython-312.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-312.pyc
ADDED
|
Binary file (1.22 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/fft.cpython-312.pyc
ADDED
|
Binary file (1.83 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-312.pyc
ADDED
|
Binary file (2.31 kB). View file
|
|
|
URSA/.venv_ursa/lib/python3.12/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from builtins import bool as py_bool
|
| 4 |
+
|
| 5 |
+
import cupy as cp
|
| 6 |
+
|
| 7 |
+
from ..common import _aliases, _helpers
|
| 8 |
+
from ..common._typing import NestedSequence, SupportsBufferProtocol
|
| 9 |
+
from .._internal import get_xp
|
| 10 |
+
from ._typing import Array, Device, DType
|
| 11 |
+
|
| 12 |
+
bool = cp.bool_
|
| 13 |
+
|
| 14 |
+
# Basic renames
|
| 15 |
+
acos = cp.arccos
|
| 16 |
+
acosh = cp.arccosh
|
| 17 |
+
asin = cp.arcsin
|
| 18 |
+
asinh = cp.arcsinh
|
| 19 |
+
atan = cp.arctan
|
| 20 |
+
atan2 = cp.arctan2
|
| 21 |
+
atanh = cp.arctanh
|
| 22 |
+
bitwise_left_shift = cp.left_shift
|
| 23 |
+
bitwise_invert = cp.invert
|
| 24 |
+
bitwise_right_shift = cp.right_shift
|
| 25 |
+
concat = cp.concatenate
|
| 26 |
+
pow = cp.power
|
| 27 |
+
|
| 28 |
+
arange = get_xp(cp)(_aliases.arange)
|
| 29 |
+
empty = get_xp(cp)(_aliases.empty)
|
| 30 |
+
empty_like = get_xp(cp)(_aliases.empty_like)
|
| 31 |
+
eye = get_xp(cp)(_aliases.eye)
|
| 32 |
+
full = get_xp(cp)(_aliases.full)
|
| 33 |
+
full_like = get_xp(cp)(_aliases.full_like)
|
| 34 |
+
linspace = get_xp(cp)(_aliases.linspace)
|
| 35 |
+
ones = get_xp(cp)(_aliases.ones)
|
| 36 |
+
ones_like = get_xp(cp)(_aliases.ones_like)
|
| 37 |
+
zeros = get_xp(cp)(_aliases.zeros)
|
| 38 |
+
zeros_like = get_xp(cp)(_aliases.zeros_like)
|
| 39 |
+
UniqueAllResult = get_xp(cp)(_aliases.UniqueAllResult)
|
| 40 |
+
UniqueCountsResult = get_xp(cp)(_aliases.UniqueCountsResult)
|
| 41 |
+
UniqueInverseResult = get_xp(cp)(_aliases.UniqueInverseResult)
|
| 42 |
+
unique_all = get_xp(cp)(_aliases.unique_all)
|
| 43 |
+
unique_counts = get_xp(cp)(_aliases.unique_counts)
|
| 44 |
+
unique_inverse = get_xp(cp)(_aliases.unique_inverse)
|
| 45 |
+
unique_values = get_xp(cp)(_aliases.unique_values)
|
| 46 |
+
std = get_xp(cp)(_aliases.std)
|
| 47 |
+
var = get_xp(cp)(_aliases.var)
|
| 48 |
+
cumulative_sum = get_xp(cp)(_aliases.cumulative_sum)
|
| 49 |
+
cumulative_prod = get_xp(cp)(_aliases.cumulative_prod)
|
| 50 |
+
clip = get_xp(cp)(_aliases.clip)
|
| 51 |
+
permute_dims = get_xp(cp)(_aliases.permute_dims)
|
| 52 |
+
reshape = get_xp(cp)(_aliases.reshape)
|
| 53 |
+
argsort = get_xp(cp)(_aliases.argsort)
|
| 54 |
+
sort = get_xp(cp)(_aliases.sort)
|
| 55 |
+
nonzero = get_xp(cp)(_aliases.nonzero)
|
| 56 |
+
matmul = get_xp(cp)(_aliases.matmul)
|
| 57 |
+
matrix_transpose = get_xp(cp)(_aliases.matrix_transpose)
|
| 58 |
+
tensordot = get_xp(cp)(_aliases.tensordot)
|
| 59 |
+
sign = get_xp(cp)(_aliases.sign)
|
| 60 |
+
finfo = get_xp(cp)(_aliases.finfo)
|
| 61 |
+
iinfo = get_xp(cp)(_aliases.iinfo)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# asarray also adds the copy keyword, which is not present in numpy 1.0.
|
| 65 |
+
def asarray(
|
| 66 |
+
obj: Array | complex | NestedSequence[complex] | SupportsBufferProtocol,
|
| 67 |
+
/,
|
| 68 |
+
*,
|
| 69 |
+
dtype: DType | None = None,
|
| 70 |
+
device: Device | None = None,
|
| 71 |
+
copy: py_bool | None = None,
|
| 72 |
+
**kwargs: object,
|
| 73 |
+
) -> Array:
|
| 74 |
+
"""
|
| 75 |
+
Array API compatibility wrapper for asarray().
|
| 76 |
+
|
| 77 |
+
See the corresponding documentation in the array library and/or the array API
|
| 78 |
+
specification for more details.
|
| 79 |
+
"""
|
| 80 |
+
with cp.cuda.Device(device):
|
| 81 |
+
if copy is None:
|
| 82 |
+
return cp.asarray(obj, dtype=dtype, **kwargs)
|
| 83 |
+
else:
|
| 84 |
+
res = cp.array(obj, dtype=dtype, copy=copy, **kwargs)
|
| 85 |
+
if not copy and res is not obj:
|
| 86 |
+
raise ValueError("Unable to avoid copy while creating an array as requested")
|
| 87 |
+
return res
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def astype(
|
| 91 |
+
x: Array,
|
| 92 |
+
dtype: DType,
|
| 93 |
+
/,
|
| 94 |
+
*,
|
| 95 |
+
copy: py_bool = True,
|
| 96 |
+
device: Device | None = None,
|
| 97 |
+
) -> Array:
|
| 98 |
+
if device is None:
|
| 99 |
+
return x.astype(dtype=dtype, copy=copy)
|
| 100 |
+
out = _helpers.to_device(x.astype(dtype=dtype, copy=False), device)
|
| 101 |
+
return out.copy() if copy and out is x else out
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
# cupy.count_nonzero does not have keepdims
|
| 105 |
+
def count_nonzero(
|
| 106 |
+
x: Array,
|
| 107 |
+
axis: int | tuple[int, ...] | None = None,
|
| 108 |
+
keepdims: py_bool = False,
|
| 109 |
+
) -> Array:
|
| 110 |
+
result = cp.count_nonzero(x, axis)
|
| 111 |
+
if keepdims:
|
| 112 |
+
if axis is None:
|
| 113 |
+
return cp.reshape(result, [1]*x.ndim)
|
| 114 |
+
return cp.expand_dims(result, axis)
|
| 115 |
+
return result
|
| 116 |
+
|
| 117 |
+
# ceil, floor, and trunc return integers for integer inputs
|
| 118 |
+
|
| 119 |
+
def ceil(x: Array, /) -> Array:
|
| 120 |
+
if cp.issubdtype(x.dtype, cp.integer):
|
| 121 |
+
return x.copy()
|
| 122 |
+
return cp.ceil(x)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def floor(x: Array, /) -> Array:
|
| 126 |
+
if cp.issubdtype(x.dtype, cp.integer):
|
| 127 |
+
return x.copy()
|
| 128 |
+
return cp.floor(x)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def trunc(x: Array, /) -> Array:
|
| 132 |
+
if cp.issubdtype(x.dtype, cp.integer):
|
| 133 |
+
return x.copy()
|
| 134 |
+
return cp.trunc(x)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
# take_along_axis: axis defaults to -1 but in cupy (and numpy) axis is a required arg
|
| 138 |
+
def take_along_axis(x: Array, indices: Array, /, *, axis: int = -1) -> Array:
|
| 139 |
+
return cp.take_along_axis(x, indices, axis=axis)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
# These functions are completely new here. If the library already has them
|
| 143 |
+
# (i.e., numpy 2.0), use the library version instead of our wrapper.
|
| 144 |
+
if hasattr(cp, 'vecdot'):
|
| 145 |
+
vecdot = cp.vecdot
|
| 146 |
+
else:
|
| 147 |
+
vecdot = get_xp(cp)(_aliases.vecdot)
|
| 148 |
+
|
| 149 |
+
if hasattr(cp, 'isdtype'):
|
| 150 |
+
isdtype = cp.isdtype
|
| 151 |
+
else:
|
| 152 |
+
isdtype = get_xp(cp)(_aliases.isdtype)
|
| 153 |
+
|
| 154 |
+
if hasattr(cp, 'unstack'):
|
| 155 |
+
unstack = cp.unstack
|
| 156 |
+
else:
|
| 157 |
+
unstack = get_xp(cp)(_aliases.unstack)
|
| 158 |
+
|
| 159 |
+
__all__ = _aliases.__all__ + ['asarray', 'astype',
|
| 160 |
+
'acos', 'acosh', 'asin', 'asinh', 'atan',
|
| 161 |
+
'atan2', 'atanh', 'bitwise_left_shift',
|
| 162 |
+
'bitwise_invert', 'bitwise_right_shift',
|
| 163 |
+
'bool', 'concat', 'count_nonzero', 'pow', 'sign',
|
| 164 |
+
'ceil', 'floor', 'trunc', 'take_along_axis']
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def __dir__() -> list[str]:
|
| 168 |
+
return __all__
|