Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- .venv/Lib/site-packages/scipy/misc/face.dat +3 -0
- .venv/Lib/site-packages/scipy/sparse/csgraph/__init__.py +208 -0
- .venv/Lib/site-packages/scipy/sparse/csgraph/_traversal.cp39-win_amd64.pyd +0 -0
- .venv/Lib/site-packages/scipy/sparse/csgraph/_validation.py +61 -0
- .venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py +119 -0
- .venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_conversions.py +61 -0
- .venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_flow.py +201 -0
- .venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py +369 -0
- .venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_matching.py +294 -0
- .venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_pydata_sparse.py +149 -0
- .venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_reordering.py +70 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/__init__.py +146 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/__init__.py +71 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py +153 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cp39-win_amd64.dll.a +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cp39-win_amd64.pyd +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py +746 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/tests/__init__.py +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py +805 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/__init__.py +22 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/__pycache__/_svds.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/_svds.py +545 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/_svds_doc.py +400 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/COPYING +45 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/__init__.py +20 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/__init__.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/arpack.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cp39-win_amd64.dll.a +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cp39-win_amd64.pyd +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/arpack.py +1702 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__init__.py +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py +718 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__init__.py +16 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/__init__.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/lobpcg.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py +1112 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py +645 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/tests/__init__.py +0 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_eigen/tests/test_svds.py +862 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_expm_multiply.py +810 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_interface.py +896 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_isolve/__init__.py +20 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_isolve/_gcrotmk.py +514 -0
- .venv/Lib/site-packages/scipy/sparse/linalg/_isolve/iterative.py +1079 -0
.gitattributes
CHANGED
|
@@ -59,3 +59,4 @@ reference_sample_wavs/syuukovoice_200918_3_01.wav filter=lfs diff=lfs merge=lfs
|
|
| 59 |
.venv/Lib/site-packages/scipy/interpolate/_rbfinterp_pythran.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
| 60 |
.venv/Lib/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
| 61 |
.venv/Lib/site-packages/scipy/linalg/_flapack.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 59 |
.venv/Lib/site-packages/scipy/interpolate/_rbfinterp_pythran.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
| 60 |
.venv/Lib/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
| 61 |
.venv/Lib/site-packages/scipy/linalg/_flapack.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
.venv/Lib/site-packages/scipy/misc/face.dat filter=lfs diff=lfs merge=lfs -text
|
.venv/Lib/site-packages/scipy/misc/face.dat
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9d8b0b4d081313e2b485748c770472e5a95ed1738146883d84c7030493e82886
|
| 3 |
+
size 1581821
|
.venv/Lib/site-packages/scipy/sparse/csgraph/__init__.py
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r"""
|
| 2 |
+
Compressed sparse graph routines (:mod:`scipy.sparse.csgraph`)
|
| 3 |
+
==============================================================
|
| 4 |
+
|
| 5 |
+
.. currentmodule:: scipy.sparse.csgraph
|
| 6 |
+
|
| 7 |
+
Fast graph algorithms based on sparse matrix representations.
|
| 8 |
+
|
| 9 |
+
Contents
|
| 10 |
+
--------
|
| 11 |
+
|
| 12 |
+
.. autosummary::
|
| 13 |
+
:toctree: generated/
|
| 14 |
+
|
| 15 |
+
connected_components -- determine connected components of a graph
|
| 16 |
+
laplacian -- compute the laplacian of a graph
|
| 17 |
+
shortest_path -- compute the shortest path between points on a positive graph
|
| 18 |
+
dijkstra -- use Dijkstra's algorithm for shortest path
|
| 19 |
+
floyd_warshall -- use the Floyd-Warshall algorithm for shortest path
|
| 20 |
+
bellman_ford -- use the Bellman-Ford algorithm for shortest path
|
| 21 |
+
johnson -- use Johnson's algorithm for shortest path
|
| 22 |
+
breadth_first_order -- compute a breadth-first order of nodes
|
| 23 |
+
depth_first_order -- compute a depth-first order of nodes
|
| 24 |
+
breadth_first_tree -- construct the breadth-first tree from a given node
|
| 25 |
+
depth_first_tree -- construct a depth-first tree from a given node
|
| 26 |
+
minimum_spanning_tree -- construct the minimum spanning tree of a graph
|
| 27 |
+
reverse_cuthill_mckee -- compute permutation for reverse Cuthill-McKee ordering
|
| 28 |
+
maximum_flow -- solve the maximum flow problem for a graph
|
| 29 |
+
maximum_bipartite_matching -- compute a maximum matching of a bipartite graph
|
| 30 |
+
min_weight_full_bipartite_matching - compute a minimum weight full matching of a bipartite graph
|
| 31 |
+
structural_rank -- compute the structural rank of a graph
|
| 32 |
+
NegativeCycleError
|
| 33 |
+
|
| 34 |
+
.. autosummary::
|
| 35 |
+
:toctree: generated/
|
| 36 |
+
|
| 37 |
+
construct_dist_matrix
|
| 38 |
+
csgraph_from_dense
|
| 39 |
+
csgraph_from_masked
|
| 40 |
+
csgraph_masked_from_dense
|
| 41 |
+
csgraph_to_dense
|
| 42 |
+
csgraph_to_masked
|
| 43 |
+
reconstruct_path
|
| 44 |
+
|
| 45 |
+
Graph Representations
|
| 46 |
+
---------------------
|
| 47 |
+
This module uses graphs which are stored in a matrix format. A
|
| 48 |
+
graph with N nodes can be represented by an (N x N) adjacency matrix G.
|
| 49 |
+
If there is a connection from node i to node j, then G[i, j] = w, where
|
| 50 |
+
w is the weight of the connection. For nodes i and j which are
|
| 51 |
+
not connected, the value depends on the representation:
|
| 52 |
+
|
| 53 |
+
- for dense array representations, non-edges are represented by
|
| 54 |
+
G[i, j] = 0, infinity, or NaN.
|
| 55 |
+
|
| 56 |
+
- for dense masked representations (of type np.ma.MaskedArray), non-edges
|
| 57 |
+
are represented by masked values. This can be useful when graphs with
|
| 58 |
+
zero-weight edges are desired.
|
| 59 |
+
|
| 60 |
+
- for sparse array representations, non-edges are represented by
|
| 61 |
+
non-entries in the matrix. This sort of sparse representation also
|
| 62 |
+
allows for edges with zero weights.
|
| 63 |
+
|
| 64 |
+
As a concrete example, imagine that you would like to represent the following
|
| 65 |
+
undirected graph::
|
| 66 |
+
|
| 67 |
+
G
|
| 68 |
+
|
| 69 |
+
(0)
|
| 70 |
+
/ \
|
| 71 |
+
1 2
|
| 72 |
+
/ \
|
| 73 |
+
(2) (1)
|
| 74 |
+
|
| 75 |
+
This graph has three nodes, where node 0 and 1 are connected by an edge of
|
| 76 |
+
weight 2, and nodes 0 and 2 are connected by an edge of weight 1.
|
| 77 |
+
We can construct the dense, masked, and sparse representations as follows,
|
| 78 |
+
keeping in mind that an undirected graph is represented by a symmetric matrix::
|
| 79 |
+
|
| 80 |
+
>>> import numpy as np
|
| 81 |
+
>>> G_dense = np.array([[0, 2, 1],
|
| 82 |
+
... [2, 0, 0],
|
| 83 |
+
... [1, 0, 0]])
|
| 84 |
+
>>> G_masked = np.ma.masked_values(G_dense, 0)
|
| 85 |
+
>>> from scipy.sparse import csr_matrix
|
| 86 |
+
>>> G_sparse = csr_matrix(G_dense)
|
| 87 |
+
|
| 88 |
+
This becomes more difficult when zero edges are significant. For example,
|
| 89 |
+
consider the situation when we slightly modify the above graph::
|
| 90 |
+
|
| 91 |
+
G2
|
| 92 |
+
|
| 93 |
+
(0)
|
| 94 |
+
/ \
|
| 95 |
+
0 2
|
| 96 |
+
/ \
|
| 97 |
+
(2) (1)
|
| 98 |
+
|
| 99 |
+
This is identical to the previous graph, except nodes 0 and 2 are connected
|
| 100 |
+
by an edge of zero weight. In this case, the dense representation above
|
| 101 |
+
leads to ambiguities: how can non-edges be represented if zero is a meaningful
|
| 102 |
+
value? In this case, either a masked or sparse representation must be used
|
| 103 |
+
to eliminate the ambiguity::
|
| 104 |
+
|
| 105 |
+
>>> import numpy as np
|
| 106 |
+
>>> G2_data = np.array([[np.inf, 2, 0 ],
|
| 107 |
+
... [2, np.inf, np.inf],
|
| 108 |
+
... [0, np.inf, np.inf]])
|
| 109 |
+
>>> G2_masked = np.ma.masked_invalid(G2_data)
|
| 110 |
+
>>> from scipy.sparse.csgraph import csgraph_from_dense
|
| 111 |
+
>>> # G2_sparse = csr_matrix(G2_data) would give the wrong result
|
| 112 |
+
>>> G2_sparse = csgraph_from_dense(G2_data, null_value=np.inf)
|
| 113 |
+
>>> G2_sparse.data
|
| 114 |
+
array([ 2., 0., 2., 0.])
|
| 115 |
+
|
| 116 |
+
Here we have used a utility routine from the csgraph submodule in order to
|
| 117 |
+
convert the dense representation to a sparse representation which can be
|
| 118 |
+
understood by the algorithms in submodule. By viewing the data array, we
|
| 119 |
+
can see that the zero values are explicitly encoded in the graph.
|
| 120 |
+
|
| 121 |
+
Directed vs. undirected
|
| 122 |
+
^^^^^^^^^^^^^^^^^^^^^^^
|
| 123 |
+
Matrices may represent either directed or undirected graphs. This is
|
| 124 |
+
specified throughout the csgraph module by a boolean keyword. Graphs are
|
| 125 |
+
assumed to be directed by default. In a directed graph, traversal from node
|
| 126 |
+
i to node j can be accomplished over the edge G[i, j], but not the edge
|
| 127 |
+
G[j, i]. Consider the following dense graph::
|
| 128 |
+
|
| 129 |
+
>>> import numpy as np
|
| 130 |
+
>>> G_dense = np.array([[0, 1, 0],
|
| 131 |
+
... [2, 0, 3],
|
| 132 |
+
... [0, 4, 0]])
|
| 133 |
+
|
| 134 |
+
When ``directed=True`` we get the graph::
|
| 135 |
+
|
| 136 |
+
---1--> ---3-->
|
| 137 |
+
(0) (1) (2)
|
| 138 |
+
<--2--- <--4---
|
| 139 |
+
|
| 140 |
+
In a non-directed graph, traversal from node i to node j can be
|
| 141 |
+
accomplished over either G[i, j] or G[j, i]. If both edges are not null,
|
| 142 |
+
and the two have unequal weights, then the smaller of the two is used.
|
| 143 |
+
|
| 144 |
+
So for the same graph, when ``directed=False`` we get the graph::
|
| 145 |
+
|
| 146 |
+
(0)--1--(1)--3--(2)
|
| 147 |
+
|
| 148 |
+
Note that a symmetric matrix will represent an undirected graph, regardless
|
| 149 |
+
of whether the 'directed' keyword is set to True or False. In this case,
|
| 150 |
+
using ``directed=True`` generally leads to more efficient computation.
|
| 151 |
+
|
| 152 |
+
The routines in this module accept as input either scipy.sparse representations
|
| 153 |
+
(csr, csc, or lil format), masked representations, or dense representations
|
| 154 |
+
with non-edges indicated by zeros, infinities, and NaN entries.
|
| 155 |
+
""" # noqa: E501
|
| 156 |
+
|
| 157 |
+
__docformat__ = "restructuredtext en"
|
| 158 |
+
|
| 159 |
+
__all__ = ['connected_components',
|
| 160 |
+
'laplacian',
|
| 161 |
+
'shortest_path',
|
| 162 |
+
'floyd_warshall',
|
| 163 |
+
'dijkstra',
|
| 164 |
+
'bellman_ford',
|
| 165 |
+
'johnson',
|
| 166 |
+
'breadth_first_order',
|
| 167 |
+
'depth_first_order',
|
| 168 |
+
'breadth_first_tree',
|
| 169 |
+
'depth_first_tree',
|
| 170 |
+
'minimum_spanning_tree',
|
| 171 |
+
'reverse_cuthill_mckee',
|
| 172 |
+
'maximum_flow',
|
| 173 |
+
'maximum_bipartite_matching',
|
| 174 |
+
'min_weight_full_bipartite_matching',
|
| 175 |
+
'structural_rank',
|
| 176 |
+
'construct_dist_matrix',
|
| 177 |
+
'reconstruct_path',
|
| 178 |
+
'csgraph_masked_from_dense',
|
| 179 |
+
'csgraph_from_dense',
|
| 180 |
+
'csgraph_from_masked',
|
| 181 |
+
'csgraph_to_dense',
|
| 182 |
+
'csgraph_to_masked',
|
| 183 |
+
'NegativeCycleError']
|
| 184 |
+
|
| 185 |
+
from ._laplacian import laplacian
|
| 186 |
+
from ._shortest_path import (
|
| 187 |
+
shortest_path, floyd_warshall, dijkstra, bellman_ford, johnson,
|
| 188 |
+
NegativeCycleError
|
| 189 |
+
)
|
| 190 |
+
from ._traversal import (
|
| 191 |
+
breadth_first_order, depth_first_order, breadth_first_tree,
|
| 192 |
+
depth_first_tree, connected_components
|
| 193 |
+
)
|
| 194 |
+
from ._min_spanning_tree import minimum_spanning_tree
|
| 195 |
+
from ._flow import maximum_flow
|
| 196 |
+
from ._matching import (
|
| 197 |
+
maximum_bipartite_matching, min_weight_full_bipartite_matching
|
| 198 |
+
)
|
| 199 |
+
from ._reordering import reverse_cuthill_mckee, structural_rank
|
| 200 |
+
from ._tools import (
|
| 201 |
+
construct_dist_matrix, reconstruct_path, csgraph_from_dense,
|
| 202 |
+
csgraph_to_dense, csgraph_masked_from_dense, csgraph_from_masked,
|
| 203 |
+
csgraph_to_masked
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
from scipy._lib._testutils import PytestTester
|
| 207 |
+
test = PytestTester(__name__)
|
| 208 |
+
del PytestTester
|
.venv/Lib/site-packages/scipy/sparse/csgraph/_traversal.cp39-win_amd64.pyd
ADDED
|
Binary file (625 kB). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/csgraph/_validation.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from scipy.sparse import csr_matrix, issparse
|
| 3 |
+
from scipy.sparse._sputils import convert_pydata_sparse_to_scipy
|
| 4 |
+
from scipy.sparse.csgraph._tools import (
|
| 5 |
+
csgraph_to_dense, csgraph_from_dense,
|
| 6 |
+
csgraph_masked_from_dense, csgraph_from_masked
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
DTYPE = np.float64
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def validate_graph(csgraph, directed, dtype=DTYPE,
|
| 13 |
+
csr_output=True, dense_output=True,
|
| 14 |
+
copy_if_dense=False, copy_if_sparse=False,
|
| 15 |
+
null_value_in=0, null_value_out=np.inf,
|
| 16 |
+
infinity_null=True, nan_null=True):
|
| 17 |
+
"""Routine for validation and conversion of csgraph inputs"""
|
| 18 |
+
if not (csr_output or dense_output):
|
| 19 |
+
raise ValueError("Internal: dense or csr output must be true")
|
| 20 |
+
|
| 21 |
+
csgraph = convert_pydata_sparse_to_scipy(csgraph)
|
| 22 |
+
|
| 23 |
+
# if undirected and csc storage, then transposing in-place
|
| 24 |
+
# is quicker than later converting to csr.
|
| 25 |
+
if (not directed) and issparse(csgraph) and csgraph.format == "csc":
|
| 26 |
+
csgraph = csgraph.T
|
| 27 |
+
|
| 28 |
+
if issparse(csgraph):
|
| 29 |
+
if csr_output:
|
| 30 |
+
csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse)
|
| 31 |
+
else:
|
| 32 |
+
csgraph = csgraph_to_dense(csgraph, null_value=null_value_out)
|
| 33 |
+
elif np.ma.isMaskedArray(csgraph):
|
| 34 |
+
if dense_output:
|
| 35 |
+
mask = csgraph.mask
|
| 36 |
+
csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense)
|
| 37 |
+
csgraph[mask] = null_value_out
|
| 38 |
+
else:
|
| 39 |
+
csgraph = csgraph_from_masked(csgraph)
|
| 40 |
+
else:
|
| 41 |
+
if dense_output:
|
| 42 |
+
csgraph = csgraph_masked_from_dense(csgraph,
|
| 43 |
+
copy=copy_if_dense,
|
| 44 |
+
null_value=null_value_in,
|
| 45 |
+
nan_null=nan_null,
|
| 46 |
+
infinity_null=infinity_null)
|
| 47 |
+
mask = csgraph.mask
|
| 48 |
+
csgraph = np.asarray(csgraph.data, dtype=DTYPE)
|
| 49 |
+
csgraph[mask] = null_value_out
|
| 50 |
+
else:
|
| 51 |
+
csgraph = csgraph_from_dense(csgraph, null_value=null_value_in,
|
| 52 |
+
infinity_null=infinity_null,
|
| 53 |
+
nan_null=nan_null)
|
| 54 |
+
|
| 55 |
+
if csgraph.ndim != 2:
|
| 56 |
+
raise ValueError("compressed-sparse graph must be 2-D")
|
| 57 |
+
|
| 58 |
+
if csgraph.shape[0] != csgraph.shape[1]:
|
| 59 |
+
raise ValueError("compressed-sparse graph must be shape (N, N)")
|
| 60 |
+
|
| 61 |
+
return csgraph
|
.venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy.testing import assert_equal, assert_array_almost_equal
|
| 3 |
+
from scipy.sparse import csgraph, csr_array
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def test_weak_connections():
|
| 7 |
+
Xde = np.array([[0, 1, 0],
|
| 8 |
+
[0, 0, 0],
|
| 9 |
+
[0, 0, 0]])
|
| 10 |
+
|
| 11 |
+
Xsp = csgraph.csgraph_from_dense(Xde, null_value=0)
|
| 12 |
+
|
| 13 |
+
for X in Xsp, Xde:
|
| 14 |
+
n_components, labels =\
|
| 15 |
+
csgraph.connected_components(X, directed=True,
|
| 16 |
+
connection='weak')
|
| 17 |
+
|
| 18 |
+
assert_equal(n_components, 2)
|
| 19 |
+
assert_array_almost_equal(labels, [0, 0, 1])
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def test_strong_connections():
|
| 23 |
+
X1de = np.array([[0, 1, 0],
|
| 24 |
+
[0, 0, 0],
|
| 25 |
+
[0, 0, 0]])
|
| 26 |
+
X2de = X1de + X1de.T
|
| 27 |
+
|
| 28 |
+
X1sp = csgraph.csgraph_from_dense(X1de, null_value=0)
|
| 29 |
+
X2sp = csgraph.csgraph_from_dense(X2de, null_value=0)
|
| 30 |
+
|
| 31 |
+
for X in X1sp, X1de:
|
| 32 |
+
n_components, labels =\
|
| 33 |
+
csgraph.connected_components(X, directed=True,
|
| 34 |
+
connection='strong')
|
| 35 |
+
|
| 36 |
+
assert_equal(n_components, 3)
|
| 37 |
+
labels.sort()
|
| 38 |
+
assert_array_almost_equal(labels, [0, 1, 2])
|
| 39 |
+
|
| 40 |
+
for X in X2sp, X2de:
|
| 41 |
+
n_components, labels =\
|
| 42 |
+
csgraph.connected_components(X, directed=True,
|
| 43 |
+
connection='strong')
|
| 44 |
+
|
| 45 |
+
assert_equal(n_components, 2)
|
| 46 |
+
labels.sort()
|
| 47 |
+
assert_array_almost_equal(labels, [0, 0, 1])
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def test_strong_connections2():
|
| 51 |
+
X = np.array([[0, 0, 0, 0, 0, 0],
|
| 52 |
+
[1, 0, 1, 0, 0, 0],
|
| 53 |
+
[0, 0, 0, 1, 0, 0],
|
| 54 |
+
[0, 0, 1, 0, 1, 0],
|
| 55 |
+
[0, 0, 0, 0, 0, 0],
|
| 56 |
+
[0, 0, 0, 0, 1, 0]])
|
| 57 |
+
n_components, labels =\
|
| 58 |
+
csgraph.connected_components(X, directed=True,
|
| 59 |
+
connection='strong')
|
| 60 |
+
assert_equal(n_components, 5)
|
| 61 |
+
labels.sort()
|
| 62 |
+
assert_array_almost_equal(labels, [0, 1, 2, 2, 3, 4])
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def test_weak_connections2():
|
| 66 |
+
X = np.array([[0, 0, 0, 0, 0, 0],
|
| 67 |
+
[1, 0, 0, 0, 0, 0],
|
| 68 |
+
[0, 0, 0, 1, 0, 0],
|
| 69 |
+
[0, 0, 1, 0, 1, 0],
|
| 70 |
+
[0, 0, 0, 0, 0, 0],
|
| 71 |
+
[0, 0, 0, 0, 1, 0]])
|
| 72 |
+
n_components, labels =\
|
| 73 |
+
csgraph.connected_components(X, directed=True,
|
| 74 |
+
connection='weak')
|
| 75 |
+
assert_equal(n_components, 2)
|
| 76 |
+
labels.sort()
|
| 77 |
+
assert_array_almost_equal(labels, [0, 0, 1, 1, 1, 1])
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def test_ticket1876():
|
| 81 |
+
# Regression test: this failed in the original implementation
|
| 82 |
+
# There should be two strongly-connected components; previously gave one
|
| 83 |
+
g = np.array([[0, 1, 1, 0],
|
| 84 |
+
[1, 0, 0, 1],
|
| 85 |
+
[0, 0, 0, 1],
|
| 86 |
+
[0, 0, 1, 0]])
|
| 87 |
+
n_components, labels = csgraph.connected_components(g, connection='strong')
|
| 88 |
+
|
| 89 |
+
assert_equal(n_components, 2)
|
| 90 |
+
assert_equal(labels[0], labels[1])
|
| 91 |
+
assert_equal(labels[2], labels[3])
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def test_fully_connected_graph():
|
| 95 |
+
# Fully connected dense matrices raised an exception.
|
| 96 |
+
# https://github.com/scipy/scipy/issues/3818
|
| 97 |
+
g = np.ones((4, 4))
|
| 98 |
+
n_components, labels = csgraph.connected_components(g)
|
| 99 |
+
assert_equal(n_components, 1)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def test_int64_indices_undirected():
|
| 103 |
+
# See https://github.com/scipy/scipy/issues/18716
|
| 104 |
+
g = csr_array(([1], np.array([[0], [1]], dtype=np.int64)), shape=(2, 2))
|
| 105 |
+
assert g.indices.dtype == np.int64
|
| 106 |
+
n, labels = csgraph.connected_components(g, directed=False)
|
| 107 |
+
assert n == 1
|
| 108 |
+
assert_array_almost_equal(labels, [0, 0])
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def test_int64_indices_directed():
|
| 112 |
+
# See https://github.com/scipy/scipy/issues/18716
|
| 113 |
+
g = csr_array(([1], np.array([[0], [1]], dtype=np.int64)), shape=(2, 2))
|
| 114 |
+
assert g.indices.dtype == np.int64
|
| 115 |
+
n, labels = csgraph.connected_components(g, directed=True,
|
| 116 |
+
connection='strong')
|
| 117 |
+
assert n == 2
|
| 118 |
+
assert_array_almost_equal(labels, [1, 0])
|
| 119 |
+
|
.venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_conversions.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy.testing import assert_array_almost_equal
|
| 3 |
+
from scipy.sparse import csr_matrix
|
| 4 |
+
from scipy.sparse.csgraph import csgraph_from_dense, csgraph_to_dense
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def test_csgraph_from_dense():
|
| 8 |
+
np.random.seed(1234)
|
| 9 |
+
G = np.random.random((10, 10))
|
| 10 |
+
some_nulls = (G < 0.4)
|
| 11 |
+
all_nulls = (G < 0.8)
|
| 12 |
+
|
| 13 |
+
for null_value in [0, np.nan, np.inf]:
|
| 14 |
+
G[all_nulls] = null_value
|
| 15 |
+
with np.errstate(invalid="ignore"):
|
| 16 |
+
G_csr = csgraph_from_dense(G, null_value=0)
|
| 17 |
+
|
| 18 |
+
G[all_nulls] = 0
|
| 19 |
+
assert_array_almost_equal(G, G_csr.toarray())
|
| 20 |
+
|
| 21 |
+
for null_value in [np.nan, np.inf]:
|
| 22 |
+
G[all_nulls] = 0
|
| 23 |
+
G[some_nulls] = null_value
|
| 24 |
+
with np.errstate(invalid="ignore"):
|
| 25 |
+
G_csr = csgraph_from_dense(G, null_value=0)
|
| 26 |
+
|
| 27 |
+
G[all_nulls] = 0
|
| 28 |
+
assert_array_almost_equal(G, G_csr.toarray())
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def test_csgraph_to_dense():
|
| 32 |
+
np.random.seed(1234)
|
| 33 |
+
G = np.random.random((10, 10))
|
| 34 |
+
nulls = (G < 0.8)
|
| 35 |
+
G[nulls] = np.inf
|
| 36 |
+
|
| 37 |
+
G_csr = csgraph_from_dense(G)
|
| 38 |
+
|
| 39 |
+
for null_value in [0, 10, -np.inf, np.inf]:
|
| 40 |
+
G[nulls] = null_value
|
| 41 |
+
assert_array_almost_equal(G, csgraph_to_dense(G_csr, null_value))
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def test_multiple_edges():
|
| 45 |
+
# create a random square matrix with an even number of elements
|
| 46 |
+
np.random.seed(1234)
|
| 47 |
+
X = np.random.random((10, 10))
|
| 48 |
+
Xcsr = csr_matrix(X)
|
| 49 |
+
|
| 50 |
+
# now double-up every other column
|
| 51 |
+
Xcsr.indices[::2] = Xcsr.indices[1::2]
|
| 52 |
+
|
| 53 |
+
# normal sparse toarray() will sum the duplicated edges
|
| 54 |
+
Xdense = Xcsr.toarray()
|
| 55 |
+
assert_array_almost_equal(Xdense[:, 1::2],
|
| 56 |
+
X[:, ::2] + X[:, 1::2])
|
| 57 |
+
|
| 58 |
+
# csgraph_to_dense chooses the minimum of each duplicated edge
|
| 59 |
+
Xdense = csgraph_to_dense(Xcsr)
|
| 60 |
+
assert_array_almost_equal(Xdense[:, 1::2],
|
| 61 |
+
np.minimum(X[:, ::2], X[:, 1::2]))
|
.venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_flow.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy.testing import assert_array_equal
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
from scipy.sparse import csr_matrix, csc_matrix
|
| 6 |
+
from scipy.sparse.csgraph import maximum_flow
|
| 7 |
+
from scipy.sparse.csgraph._flow import (
|
| 8 |
+
_add_reverse_edges, _make_edge_pointers, _make_tails
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
methods = ['edmonds_karp', 'dinic']
|
| 12 |
+
|
| 13 |
+
def test_raises_on_dense_input():
|
| 14 |
+
with pytest.raises(TypeError):
|
| 15 |
+
graph = np.array([[0, 1], [0, 0]])
|
| 16 |
+
maximum_flow(graph, 0, 1)
|
| 17 |
+
maximum_flow(graph, 0, 1, method='edmonds_karp')
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def test_raises_on_csc_input():
|
| 21 |
+
with pytest.raises(TypeError):
|
| 22 |
+
graph = csc_matrix([[0, 1], [0, 0]])
|
| 23 |
+
maximum_flow(graph, 0, 1)
|
| 24 |
+
maximum_flow(graph, 0, 1, method='edmonds_karp')
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def test_raises_on_floating_point_input():
|
| 28 |
+
with pytest.raises(ValueError):
|
| 29 |
+
graph = csr_matrix([[0, 1.5], [0, 0]], dtype=np.float64)
|
| 30 |
+
maximum_flow(graph, 0, 1)
|
| 31 |
+
maximum_flow(graph, 0, 1, method='edmonds_karp')
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def test_raises_on_non_square_input():
|
| 35 |
+
with pytest.raises(ValueError):
|
| 36 |
+
graph = csr_matrix([[0, 1, 2], [2, 1, 0]])
|
| 37 |
+
maximum_flow(graph, 0, 1)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def test_raises_when_source_is_sink():
|
| 41 |
+
with pytest.raises(ValueError):
|
| 42 |
+
graph = csr_matrix([[0, 1], [0, 0]])
|
| 43 |
+
maximum_flow(graph, 0, 0)
|
| 44 |
+
maximum_flow(graph, 0, 0, method='edmonds_karp')
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@pytest.mark.parametrize('method', methods)
|
| 48 |
+
@pytest.mark.parametrize('source', [-1, 2, 3])
|
| 49 |
+
def test_raises_when_source_is_out_of_bounds(source, method):
|
| 50 |
+
with pytest.raises(ValueError):
|
| 51 |
+
graph = csr_matrix([[0, 1], [0, 0]])
|
| 52 |
+
maximum_flow(graph, source, 1, method=method)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@pytest.mark.parametrize('method', methods)
|
| 56 |
+
@pytest.mark.parametrize('sink', [-1, 2, 3])
|
| 57 |
+
def test_raises_when_sink_is_out_of_bounds(sink, method):
|
| 58 |
+
with pytest.raises(ValueError):
|
| 59 |
+
graph = csr_matrix([[0, 1], [0, 0]])
|
| 60 |
+
maximum_flow(graph, 0, sink, method=method)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@pytest.mark.parametrize('method', methods)
|
| 64 |
+
def test_simple_graph(method):
|
| 65 |
+
# This graph looks as follows:
|
| 66 |
+
# (0) --5--> (1)
|
| 67 |
+
graph = csr_matrix([[0, 5], [0, 0]])
|
| 68 |
+
res = maximum_flow(graph, 0, 1, method=method)
|
| 69 |
+
assert res.flow_value == 5
|
| 70 |
+
expected_flow = np.array([[0, 5], [-5, 0]])
|
| 71 |
+
assert_array_equal(res.flow.toarray(), expected_flow)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@pytest.mark.parametrize('method', methods)
|
| 75 |
+
def test_bottle_neck_graph(method):
|
| 76 |
+
# This graph cannot use the full capacity between 0 and 1:
|
| 77 |
+
# (0) --5--> (1) --3--> (2)
|
| 78 |
+
graph = csr_matrix([[0, 5, 0], [0, 0, 3], [0, 0, 0]])
|
| 79 |
+
res = maximum_flow(graph, 0, 2, method=method)
|
| 80 |
+
assert res.flow_value == 3
|
| 81 |
+
expected_flow = np.array([[0, 3, 0], [-3, 0, 3], [0, -3, 0]])
|
| 82 |
+
assert_array_equal(res.flow.toarray(), expected_flow)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
@pytest.mark.parametrize('method', methods)
|
| 86 |
+
def test_backwards_flow(method):
|
| 87 |
+
# This example causes backwards flow between vertices 3 and 4,
|
| 88 |
+
# and so this test ensures that we handle that accordingly. See
|
| 89 |
+
# https://stackoverflow.com/q/38843963/5085211
|
| 90 |
+
# for more information.
|
| 91 |
+
graph = csr_matrix([[0, 10, 0, 0, 10, 0, 0, 0],
|
| 92 |
+
[0, 0, 10, 0, 0, 0, 0, 0],
|
| 93 |
+
[0, 0, 0, 10, 0, 0, 0, 0],
|
| 94 |
+
[0, 0, 0, 0, 0, 0, 0, 10],
|
| 95 |
+
[0, 0, 0, 10, 0, 10, 0, 0],
|
| 96 |
+
[0, 0, 0, 0, 0, 0, 10, 0],
|
| 97 |
+
[0, 0, 0, 0, 0, 0, 0, 10],
|
| 98 |
+
[0, 0, 0, 0, 0, 0, 0, 0]])
|
| 99 |
+
res = maximum_flow(graph, 0, 7, method=method)
|
| 100 |
+
assert res.flow_value == 20
|
| 101 |
+
expected_flow = np.array([[0, 10, 0, 0, 10, 0, 0, 0],
|
| 102 |
+
[-10, 0, 10, 0, 0, 0, 0, 0],
|
| 103 |
+
[0, -10, 0, 10, 0, 0, 0, 0],
|
| 104 |
+
[0, 0, -10, 0, 0, 0, 0, 10],
|
| 105 |
+
[-10, 0, 0, 0, 0, 10, 0, 0],
|
| 106 |
+
[0, 0, 0, 0, -10, 0, 10, 0],
|
| 107 |
+
[0, 0, 0, 0, 0, -10, 0, 10],
|
| 108 |
+
[0, 0, 0, -10, 0, 0, -10, 0]])
|
| 109 |
+
assert_array_equal(res.flow.toarray(), expected_flow)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
@pytest.mark.parametrize('method', methods)
|
| 113 |
+
def test_example_from_clrs_chapter_26_1(method):
|
| 114 |
+
# See page 659 in CLRS second edition, but note that the maximum flow
|
| 115 |
+
# we find is slightly different than the one in CLRS; we push a flow of
|
| 116 |
+
# 12 to v_1 instead of v_2.
|
| 117 |
+
graph = csr_matrix([[0, 16, 13, 0, 0, 0],
|
| 118 |
+
[0, 0, 10, 12, 0, 0],
|
| 119 |
+
[0, 4, 0, 0, 14, 0],
|
| 120 |
+
[0, 0, 9, 0, 0, 20],
|
| 121 |
+
[0, 0, 0, 7, 0, 4],
|
| 122 |
+
[0, 0, 0, 0, 0, 0]])
|
| 123 |
+
res = maximum_flow(graph, 0, 5, method=method)
|
| 124 |
+
assert res.flow_value == 23
|
| 125 |
+
expected_flow = np.array([[0, 12, 11, 0, 0, 0],
|
| 126 |
+
[-12, 0, 0, 12, 0, 0],
|
| 127 |
+
[-11, 0, 0, 0, 11, 0],
|
| 128 |
+
[0, -12, 0, 0, -7, 19],
|
| 129 |
+
[0, 0, -11, 7, 0, 4],
|
| 130 |
+
[0, 0, 0, -19, -4, 0]])
|
| 131 |
+
assert_array_equal(res.flow.toarray(), expected_flow)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
@pytest.mark.parametrize('method', methods)
|
| 135 |
+
def test_disconnected_graph(method):
|
| 136 |
+
# This tests the following disconnected graph:
|
| 137 |
+
# (0) --5--> (1) (2) --3--> (3)
|
| 138 |
+
graph = csr_matrix([[0, 5, 0, 0],
|
| 139 |
+
[0, 0, 0, 0],
|
| 140 |
+
[0, 0, 9, 3],
|
| 141 |
+
[0, 0, 0, 0]])
|
| 142 |
+
res = maximum_flow(graph, 0, 3, method=method)
|
| 143 |
+
assert res.flow_value == 0
|
| 144 |
+
expected_flow = np.zeros((4, 4), dtype=np.int32)
|
| 145 |
+
assert_array_equal(res.flow.toarray(), expected_flow)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
@pytest.mark.parametrize('method', methods)
|
| 149 |
+
def test_add_reverse_edges_large_graph(method):
|
| 150 |
+
# Regression test for https://github.com/scipy/scipy/issues/14385
|
| 151 |
+
n = 100_000
|
| 152 |
+
indices = np.arange(1, n)
|
| 153 |
+
indptr = np.array(list(range(n)) + [n - 1])
|
| 154 |
+
data = np.ones(n - 1, dtype=np.int32)
|
| 155 |
+
graph = csr_matrix((data, indices, indptr), shape=(n, n))
|
| 156 |
+
res = maximum_flow(graph, 0, n - 1, method=method)
|
| 157 |
+
assert res.flow_value == 1
|
| 158 |
+
expected_flow = graph - graph.transpose()
|
| 159 |
+
assert_array_equal(res.flow.data, expected_flow.data)
|
| 160 |
+
assert_array_equal(res.flow.indices, expected_flow.indices)
|
| 161 |
+
assert_array_equal(res.flow.indptr, expected_flow.indptr)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
@pytest.mark.parametrize("a,b_data_expected", [
|
| 165 |
+
([[]], []),
|
| 166 |
+
([[0], [0]], []),
|
| 167 |
+
([[1, 0, 2], [0, 0, 0], [0, 3, 0]], [1, 2, 0, 0, 3]),
|
| 168 |
+
([[9, 8, 7], [4, 5, 6], [0, 0, 0]], [9, 8, 7, 4, 5, 6, 0, 0])])
|
| 169 |
+
def test_add_reverse_edges(a, b_data_expected):
|
| 170 |
+
"""Test that the reversal of the edges of the input graph works
|
| 171 |
+
as expected.
|
| 172 |
+
"""
|
| 173 |
+
a = csr_matrix(a, dtype=np.int32, shape=(len(a), len(a)))
|
| 174 |
+
b = _add_reverse_edges(a)
|
| 175 |
+
assert_array_equal(b.data, b_data_expected)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
@pytest.mark.parametrize("a,expected", [
|
| 179 |
+
([[]], []),
|
| 180 |
+
([[0]], []),
|
| 181 |
+
([[1]], [0]),
|
| 182 |
+
([[0, 1], [10, 0]], [1, 0]),
|
| 183 |
+
([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 3, 4, 1, 2])
|
| 184 |
+
])
|
| 185 |
+
def test_make_edge_pointers(a, expected):
|
| 186 |
+
a = csr_matrix(a, dtype=np.int32)
|
| 187 |
+
rev_edge_ptr = _make_edge_pointers(a)
|
| 188 |
+
assert_array_equal(rev_edge_ptr, expected)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
@pytest.mark.parametrize("a,expected", [
|
| 192 |
+
([[]], []),
|
| 193 |
+
([[0]], []),
|
| 194 |
+
([[1]], [0]),
|
| 195 |
+
([[0, 1], [10, 0]], [0, 1]),
|
| 196 |
+
([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 0, 1, 2, 2])
|
| 197 |
+
])
|
| 198 |
+
def test_make_tails(a, expected):
|
| 199 |
+
a = csr_matrix(a, dtype=np.int32)
|
| 200 |
+
tails = _make_tails(a)
|
| 201 |
+
assert_array_equal(tails, expected)
|
.venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py
ADDED
|
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
import numpy as np
|
| 3 |
+
from numpy.testing import assert_allclose
|
| 4 |
+
from pytest import raises as assert_raises
|
| 5 |
+
from scipy import sparse
|
| 6 |
+
|
| 7 |
+
from scipy.sparse import csgraph
|
| 8 |
+
from scipy._lib._util import np_long, np_ulong
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def check_int_type(mat):
|
| 12 |
+
return np.issubdtype(mat.dtype, np.signedinteger) or np.issubdtype(
|
| 13 |
+
mat.dtype, np_ulong
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def test_laplacian_value_error():
|
| 18 |
+
for t in int, float, complex:
|
| 19 |
+
for m in ([1, 1],
|
| 20 |
+
[[[1]]],
|
| 21 |
+
[[1, 2, 3], [4, 5, 6]],
|
| 22 |
+
[[1, 2], [3, 4], [5, 5]]):
|
| 23 |
+
A = np.array(m, dtype=t)
|
| 24 |
+
assert_raises(ValueError, csgraph.laplacian, A)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _explicit_laplacian(x, normed=False):
|
| 28 |
+
if sparse.issparse(x):
|
| 29 |
+
x = x.toarray()
|
| 30 |
+
x = np.asarray(x)
|
| 31 |
+
y = -1.0 * x
|
| 32 |
+
for j in range(y.shape[0]):
|
| 33 |
+
y[j,j] = x[j,j+1:].sum() + x[j,:j].sum()
|
| 34 |
+
if normed:
|
| 35 |
+
d = np.diag(y).copy()
|
| 36 |
+
d[d == 0] = 1.0
|
| 37 |
+
y /= d[:,None]**.5
|
| 38 |
+
y /= d[None,:]**.5
|
| 39 |
+
return y
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _check_symmetric_graph_laplacian(mat, normed, copy=True):
|
| 43 |
+
if not hasattr(mat, 'shape'):
|
| 44 |
+
mat = eval(mat, dict(np=np, sparse=sparse))
|
| 45 |
+
|
| 46 |
+
if sparse.issparse(mat):
|
| 47 |
+
sp_mat = mat
|
| 48 |
+
mat = sp_mat.toarray()
|
| 49 |
+
else:
|
| 50 |
+
sp_mat = sparse.csr_matrix(mat)
|
| 51 |
+
|
| 52 |
+
mat_copy = np.copy(mat)
|
| 53 |
+
sp_mat_copy = sparse.csr_matrix(sp_mat, copy=True)
|
| 54 |
+
|
| 55 |
+
n_nodes = mat.shape[0]
|
| 56 |
+
explicit_laplacian = _explicit_laplacian(mat, normed=normed)
|
| 57 |
+
laplacian = csgraph.laplacian(mat, normed=normed, copy=copy)
|
| 58 |
+
sp_laplacian = csgraph.laplacian(sp_mat, normed=normed,
|
| 59 |
+
copy=copy)
|
| 60 |
+
|
| 61 |
+
if copy:
|
| 62 |
+
assert_allclose(mat, mat_copy)
|
| 63 |
+
_assert_allclose_sparse(sp_mat, sp_mat_copy)
|
| 64 |
+
else:
|
| 65 |
+
if not (normed and check_int_type(mat)):
|
| 66 |
+
assert_allclose(laplacian, mat)
|
| 67 |
+
if sp_mat.format == 'coo':
|
| 68 |
+
_assert_allclose_sparse(sp_laplacian, sp_mat)
|
| 69 |
+
|
| 70 |
+
assert_allclose(laplacian, sp_laplacian.toarray())
|
| 71 |
+
|
| 72 |
+
for tested in [laplacian, sp_laplacian.toarray()]:
|
| 73 |
+
if not normed:
|
| 74 |
+
assert_allclose(tested.sum(axis=0), np.zeros(n_nodes))
|
| 75 |
+
assert_allclose(tested.T, tested)
|
| 76 |
+
assert_allclose(tested, explicit_laplacian)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def test_symmetric_graph_laplacian():
|
| 80 |
+
symmetric_mats = (
|
| 81 |
+
'np.arange(10) * np.arange(10)[:, np.newaxis]',
|
| 82 |
+
'np.ones((7, 7))',
|
| 83 |
+
'np.eye(19)',
|
| 84 |
+
'sparse.diags([1, 1], [-1, 1], shape=(4, 4))',
|
| 85 |
+
'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).toarray()',
|
| 86 |
+
'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).todense()',
|
| 87 |
+
'np.vander(np.arange(4)) + np.vander(np.arange(4)).T'
|
| 88 |
+
)
|
| 89 |
+
for mat in symmetric_mats:
|
| 90 |
+
for normed in True, False:
|
| 91 |
+
for copy in True, False:
|
| 92 |
+
_check_symmetric_graph_laplacian(mat, normed, copy)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def _assert_allclose_sparse(a, b, **kwargs):
|
| 96 |
+
# helper function that can deal with sparse matrices
|
| 97 |
+
if sparse.issparse(a):
|
| 98 |
+
a = a.toarray()
|
| 99 |
+
if sparse.issparse(b):
|
| 100 |
+
b = b.toarray()
|
| 101 |
+
assert_allclose(a, b, **kwargs)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _check_laplacian_dtype_none(
|
| 105 |
+
A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type
|
| 106 |
+
):
|
| 107 |
+
mat = arr_type(A, dtype=dtype)
|
| 108 |
+
L, d = csgraph.laplacian(
|
| 109 |
+
mat,
|
| 110 |
+
normed=normed,
|
| 111 |
+
return_diag=True,
|
| 112 |
+
use_out_degree=use_out_degree,
|
| 113 |
+
copy=copy,
|
| 114 |
+
dtype=None,
|
| 115 |
+
)
|
| 116 |
+
if normed and check_int_type(mat):
|
| 117 |
+
assert L.dtype == np.float64
|
| 118 |
+
assert d.dtype == np.float64
|
| 119 |
+
_assert_allclose_sparse(L, desired_L, atol=1e-12)
|
| 120 |
+
_assert_allclose_sparse(d, desired_d, atol=1e-12)
|
| 121 |
+
else:
|
| 122 |
+
assert L.dtype == dtype
|
| 123 |
+
assert d.dtype == dtype
|
| 124 |
+
desired_L = np.asarray(desired_L).astype(dtype)
|
| 125 |
+
desired_d = np.asarray(desired_d).astype(dtype)
|
| 126 |
+
_assert_allclose_sparse(L, desired_L, atol=1e-12)
|
| 127 |
+
_assert_allclose_sparse(d, desired_d, atol=1e-12)
|
| 128 |
+
|
| 129 |
+
if not copy:
|
| 130 |
+
if not (normed and check_int_type(mat)):
|
| 131 |
+
if type(mat) is np.ndarray:
|
| 132 |
+
assert_allclose(L, mat)
|
| 133 |
+
elif mat.format == "coo":
|
| 134 |
+
_assert_allclose_sparse(L, mat)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def _check_laplacian_dtype(
|
| 138 |
+
A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type
|
| 139 |
+
):
|
| 140 |
+
mat = arr_type(A, dtype=dtype)
|
| 141 |
+
L, d = csgraph.laplacian(
|
| 142 |
+
mat,
|
| 143 |
+
normed=normed,
|
| 144 |
+
return_diag=True,
|
| 145 |
+
use_out_degree=use_out_degree,
|
| 146 |
+
copy=copy,
|
| 147 |
+
dtype=dtype,
|
| 148 |
+
)
|
| 149 |
+
assert L.dtype == dtype
|
| 150 |
+
assert d.dtype == dtype
|
| 151 |
+
desired_L = np.asarray(desired_L).astype(dtype)
|
| 152 |
+
desired_d = np.asarray(desired_d).astype(dtype)
|
| 153 |
+
_assert_allclose_sparse(L, desired_L, atol=1e-12)
|
| 154 |
+
_assert_allclose_sparse(d, desired_d, atol=1e-12)
|
| 155 |
+
|
| 156 |
+
if not copy:
|
| 157 |
+
if not (normed and check_int_type(mat)):
|
| 158 |
+
if type(mat) is np.ndarray:
|
| 159 |
+
assert_allclose(L, mat)
|
| 160 |
+
elif mat.format == 'coo':
|
| 161 |
+
_assert_allclose_sparse(L, mat)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
INT_DTYPES = {np.intc, np_long, np.longlong}
|
| 165 |
+
REAL_DTYPES = {np.float32, np.float64, np.longdouble}
|
| 166 |
+
COMPLEX_DTYPES = {np.complex64, np.complex128, np.clongdouble}
|
| 167 |
+
# use sorted list to ensure fixed order of tests
|
| 168 |
+
DTYPES = sorted(INT_DTYPES ^ REAL_DTYPES ^ COMPLEX_DTYPES, key=str)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
@pytest.mark.parametrize("dtype", DTYPES)
|
| 172 |
+
@pytest.mark.parametrize("arr_type", [np.array,
|
| 173 |
+
sparse.csr_matrix,
|
| 174 |
+
sparse.coo_matrix,
|
| 175 |
+
sparse.csr_array,
|
| 176 |
+
sparse.coo_array])
|
| 177 |
+
@pytest.mark.parametrize("copy", [True, False])
|
| 178 |
+
@pytest.mark.parametrize("normed", [True, False])
|
| 179 |
+
@pytest.mark.parametrize("use_out_degree", [True, False])
|
| 180 |
+
def test_asymmetric_laplacian(use_out_degree, normed,
|
| 181 |
+
copy, dtype, arr_type):
|
| 182 |
+
# adjacency matrix
|
| 183 |
+
A = [[0, 1, 0],
|
| 184 |
+
[4, 2, 0],
|
| 185 |
+
[0, 0, 0]]
|
| 186 |
+
A = arr_type(np.array(A), dtype=dtype)
|
| 187 |
+
A_copy = A.copy()
|
| 188 |
+
|
| 189 |
+
if not normed and use_out_degree:
|
| 190 |
+
# Laplacian matrix using out-degree
|
| 191 |
+
L = [[1, -1, 0],
|
| 192 |
+
[-4, 4, 0],
|
| 193 |
+
[0, 0, 0]]
|
| 194 |
+
d = [1, 4, 0]
|
| 195 |
+
|
| 196 |
+
if normed and use_out_degree:
|
| 197 |
+
# normalized Laplacian matrix using out-degree
|
| 198 |
+
L = [[1, -0.5, 0],
|
| 199 |
+
[-2, 1, 0],
|
| 200 |
+
[0, 0, 0]]
|
| 201 |
+
d = [1, 2, 1]
|
| 202 |
+
|
| 203 |
+
if not normed and not use_out_degree:
|
| 204 |
+
# Laplacian matrix using in-degree
|
| 205 |
+
L = [[4, -1, 0],
|
| 206 |
+
[-4, 1, 0],
|
| 207 |
+
[0, 0, 0]]
|
| 208 |
+
d = [4, 1, 0]
|
| 209 |
+
|
| 210 |
+
if normed and not use_out_degree:
|
| 211 |
+
# normalized Laplacian matrix using in-degree
|
| 212 |
+
L = [[1, -0.5, 0],
|
| 213 |
+
[-2, 1, 0],
|
| 214 |
+
[0, 0, 0]]
|
| 215 |
+
d = [2, 1, 1]
|
| 216 |
+
|
| 217 |
+
_check_laplacian_dtype_none(
|
| 218 |
+
A,
|
| 219 |
+
L,
|
| 220 |
+
d,
|
| 221 |
+
normed=normed,
|
| 222 |
+
use_out_degree=use_out_degree,
|
| 223 |
+
copy=copy,
|
| 224 |
+
dtype=dtype,
|
| 225 |
+
arr_type=arr_type,
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
_check_laplacian_dtype(
|
| 229 |
+
A_copy,
|
| 230 |
+
L,
|
| 231 |
+
d,
|
| 232 |
+
normed=normed,
|
| 233 |
+
use_out_degree=use_out_degree,
|
| 234 |
+
copy=copy,
|
| 235 |
+
dtype=dtype,
|
| 236 |
+
arr_type=arr_type,
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
@pytest.mark.parametrize("fmt", ['csr', 'csc', 'coo', 'lil',
|
| 241 |
+
'dok', 'dia', 'bsr'])
|
| 242 |
+
@pytest.mark.parametrize("normed", [True, False])
|
| 243 |
+
@pytest.mark.parametrize("copy", [True, False])
|
| 244 |
+
def test_sparse_formats(fmt, normed, copy):
|
| 245 |
+
mat = sparse.diags([1, 1], [-1, 1], shape=(4, 4), format=fmt)
|
| 246 |
+
_check_symmetric_graph_laplacian(mat, normed, copy)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
@pytest.mark.parametrize(
|
| 250 |
+
"arr_type", [np.asarray,
|
| 251 |
+
sparse.csr_matrix,
|
| 252 |
+
sparse.coo_matrix,
|
| 253 |
+
sparse.csr_array,
|
| 254 |
+
sparse.coo_array]
|
| 255 |
+
)
|
| 256 |
+
@pytest.mark.parametrize("form", ["array", "function", "lo"])
|
| 257 |
+
def test_laplacian_symmetrized(arr_type, form):
|
| 258 |
+
# adjacency matrix
|
| 259 |
+
n = 3
|
| 260 |
+
mat = arr_type(np.arange(n * n).reshape(n, n))
|
| 261 |
+
L_in, d_in = csgraph.laplacian(
|
| 262 |
+
mat,
|
| 263 |
+
return_diag=True,
|
| 264 |
+
form=form,
|
| 265 |
+
)
|
| 266 |
+
L_out, d_out = csgraph.laplacian(
|
| 267 |
+
mat,
|
| 268 |
+
return_diag=True,
|
| 269 |
+
use_out_degree=True,
|
| 270 |
+
form=form,
|
| 271 |
+
)
|
| 272 |
+
Ls, ds = csgraph.laplacian(
|
| 273 |
+
mat,
|
| 274 |
+
return_diag=True,
|
| 275 |
+
symmetrized=True,
|
| 276 |
+
form=form,
|
| 277 |
+
)
|
| 278 |
+
Ls_normed, ds_normed = csgraph.laplacian(
|
| 279 |
+
mat,
|
| 280 |
+
return_diag=True,
|
| 281 |
+
symmetrized=True,
|
| 282 |
+
normed=True,
|
| 283 |
+
form=form,
|
| 284 |
+
)
|
| 285 |
+
mat += mat.T
|
| 286 |
+
Lss, dss = csgraph.laplacian(mat, return_diag=True, form=form)
|
| 287 |
+
Lss_normed, dss_normed = csgraph.laplacian(
|
| 288 |
+
mat,
|
| 289 |
+
return_diag=True,
|
| 290 |
+
normed=True,
|
| 291 |
+
form=form,
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
assert_allclose(ds, d_in + d_out)
|
| 295 |
+
assert_allclose(ds, dss)
|
| 296 |
+
assert_allclose(ds_normed, dss_normed)
|
| 297 |
+
|
| 298 |
+
d = {}
|
| 299 |
+
for L in ["L_in", "L_out", "Ls", "Ls_normed", "Lss", "Lss_normed"]:
|
| 300 |
+
if form == "array":
|
| 301 |
+
d[L] = eval(L)
|
| 302 |
+
else:
|
| 303 |
+
d[L] = eval(L)(np.eye(n, dtype=mat.dtype))
|
| 304 |
+
|
| 305 |
+
_assert_allclose_sparse(d["Ls"], d["L_in"] + d["L_out"].T)
|
| 306 |
+
_assert_allclose_sparse(d["Ls"], d["Lss"])
|
| 307 |
+
_assert_allclose_sparse(d["Ls_normed"], d["Lss_normed"])
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
@pytest.mark.parametrize(
|
| 311 |
+
"arr_type", [np.asarray,
|
| 312 |
+
sparse.csr_matrix,
|
| 313 |
+
sparse.coo_matrix,
|
| 314 |
+
sparse.csr_array,
|
| 315 |
+
sparse.coo_array]
|
| 316 |
+
)
|
| 317 |
+
@pytest.mark.parametrize("dtype", DTYPES)
|
| 318 |
+
@pytest.mark.parametrize("normed", [True, False])
|
| 319 |
+
@pytest.mark.parametrize("symmetrized", [True, False])
|
| 320 |
+
@pytest.mark.parametrize("use_out_degree", [True, False])
|
| 321 |
+
@pytest.mark.parametrize("form", ["function", "lo"])
|
| 322 |
+
def test_format(dtype, arr_type, normed, symmetrized, use_out_degree, form):
|
| 323 |
+
n = 3
|
| 324 |
+
mat = [[0, 1, 0], [4, 2, 0], [0, 0, 0]]
|
| 325 |
+
mat = arr_type(np.array(mat), dtype=dtype)
|
| 326 |
+
Lo, do = csgraph.laplacian(
|
| 327 |
+
mat,
|
| 328 |
+
return_diag=True,
|
| 329 |
+
normed=normed,
|
| 330 |
+
symmetrized=symmetrized,
|
| 331 |
+
use_out_degree=use_out_degree,
|
| 332 |
+
dtype=dtype,
|
| 333 |
+
)
|
| 334 |
+
La, da = csgraph.laplacian(
|
| 335 |
+
mat,
|
| 336 |
+
return_diag=True,
|
| 337 |
+
normed=normed,
|
| 338 |
+
symmetrized=symmetrized,
|
| 339 |
+
use_out_degree=use_out_degree,
|
| 340 |
+
dtype=dtype,
|
| 341 |
+
form="array",
|
| 342 |
+
)
|
| 343 |
+
assert_allclose(do, da)
|
| 344 |
+
_assert_allclose_sparse(Lo, La)
|
| 345 |
+
|
| 346 |
+
L, d = csgraph.laplacian(
|
| 347 |
+
mat,
|
| 348 |
+
return_diag=True,
|
| 349 |
+
normed=normed,
|
| 350 |
+
symmetrized=symmetrized,
|
| 351 |
+
use_out_degree=use_out_degree,
|
| 352 |
+
dtype=dtype,
|
| 353 |
+
form=form,
|
| 354 |
+
)
|
| 355 |
+
assert_allclose(d, do)
|
| 356 |
+
assert d.dtype == dtype
|
| 357 |
+
Lm = L(np.eye(n, dtype=mat.dtype)).astype(dtype)
|
| 358 |
+
_assert_allclose_sparse(Lm, Lo, rtol=2e-7, atol=2e-7)
|
| 359 |
+
x = np.arange(6).reshape(3, 2)
|
| 360 |
+
if not (normed and dtype in INT_DTYPES):
|
| 361 |
+
assert_allclose(L(x), Lo @ x)
|
| 362 |
+
else:
|
| 363 |
+
# Normalized Lo is casted to integer, but L() is not
|
| 364 |
+
pass
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def test_format_error_message():
|
| 368 |
+
with pytest.raises(ValueError, match="Invalid form: 'toto'"):
|
| 369 |
+
_ = csgraph.laplacian(np.eye(1), form='toto')
|
.venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_matching.py
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from itertools import product
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.testing import assert_array_equal, assert_equal
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
from scipy.sparse import csr_matrix, coo_matrix, diags
|
| 8 |
+
from scipy.sparse.csgraph import (
|
| 9 |
+
maximum_bipartite_matching, min_weight_full_bipartite_matching
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def test_maximum_bipartite_matching_raises_on_dense_input():
|
| 14 |
+
with pytest.raises(TypeError):
|
| 15 |
+
graph = np.array([[0, 1], [0, 0]])
|
| 16 |
+
maximum_bipartite_matching(graph)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def test_maximum_bipartite_matching_empty_graph():
|
| 20 |
+
graph = csr_matrix((0, 0))
|
| 21 |
+
x = maximum_bipartite_matching(graph, perm_type='row')
|
| 22 |
+
y = maximum_bipartite_matching(graph, perm_type='column')
|
| 23 |
+
expected_matching = np.array([])
|
| 24 |
+
assert_array_equal(expected_matching, x)
|
| 25 |
+
assert_array_equal(expected_matching, y)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def test_maximum_bipartite_matching_empty_left_partition():
|
| 29 |
+
graph = csr_matrix((2, 0))
|
| 30 |
+
x = maximum_bipartite_matching(graph, perm_type='row')
|
| 31 |
+
y = maximum_bipartite_matching(graph, perm_type='column')
|
| 32 |
+
assert_array_equal(np.array([]), x)
|
| 33 |
+
assert_array_equal(np.array([-1, -1]), y)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def test_maximum_bipartite_matching_empty_right_partition():
|
| 37 |
+
graph = csr_matrix((0, 3))
|
| 38 |
+
x = maximum_bipartite_matching(graph, perm_type='row')
|
| 39 |
+
y = maximum_bipartite_matching(graph, perm_type='column')
|
| 40 |
+
assert_array_equal(np.array([-1, -1, -1]), x)
|
| 41 |
+
assert_array_equal(np.array([]), y)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def test_maximum_bipartite_matching_graph_with_no_edges():
|
| 45 |
+
graph = csr_matrix((2, 2))
|
| 46 |
+
x = maximum_bipartite_matching(graph, perm_type='row')
|
| 47 |
+
y = maximum_bipartite_matching(graph, perm_type='column')
|
| 48 |
+
assert_array_equal(np.array([-1, -1]), x)
|
| 49 |
+
assert_array_equal(np.array([-1, -1]), y)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def test_maximum_bipartite_matching_graph_that_causes_augmentation():
|
| 53 |
+
# In this graph, column 1 is initially assigned to row 1, but it should be
|
| 54 |
+
# reassigned to make room for row 2.
|
| 55 |
+
graph = csr_matrix([[1, 1], [1, 0]])
|
| 56 |
+
x = maximum_bipartite_matching(graph, perm_type='column')
|
| 57 |
+
y = maximum_bipartite_matching(graph, perm_type='row')
|
| 58 |
+
expected_matching = np.array([1, 0])
|
| 59 |
+
assert_array_equal(expected_matching, x)
|
| 60 |
+
assert_array_equal(expected_matching, y)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def test_maximum_bipartite_matching_graph_with_more_rows_than_columns():
|
| 64 |
+
graph = csr_matrix([[1, 1], [1, 0], [0, 1]])
|
| 65 |
+
x = maximum_bipartite_matching(graph, perm_type='column')
|
| 66 |
+
y = maximum_bipartite_matching(graph, perm_type='row')
|
| 67 |
+
assert_array_equal(np.array([0, -1, 1]), x)
|
| 68 |
+
assert_array_equal(np.array([0, 2]), y)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def test_maximum_bipartite_matching_graph_with_more_columns_than_rows():
|
| 72 |
+
graph = csr_matrix([[1, 1, 0], [0, 0, 1]])
|
| 73 |
+
x = maximum_bipartite_matching(graph, perm_type='column')
|
| 74 |
+
y = maximum_bipartite_matching(graph, perm_type='row')
|
| 75 |
+
assert_array_equal(np.array([0, 2]), x)
|
| 76 |
+
assert_array_equal(np.array([0, -1, 1]), y)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def test_maximum_bipartite_matching_explicit_zeros_count_as_edges():
|
| 80 |
+
data = [0, 0]
|
| 81 |
+
indices = [1, 0]
|
| 82 |
+
indptr = [0, 1, 2]
|
| 83 |
+
graph = csr_matrix((data, indices, indptr), shape=(2, 2))
|
| 84 |
+
x = maximum_bipartite_matching(graph, perm_type='row')
|
| 85 |
+
y = maximum_bipartite_matching(graph, perm_type='column')
|
| 86 |
+
expected_matching = np.array([1, 0])
|
| 87 |
+
assert_array_equal(expected_matching, x)
|
| 88 |
+
assert_array_equal(expected_matching, y)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def test_maximum_bipartite_matching_feasibility_of_result():
|
| 92 |
+
# This is a regression test for GitHub issue #11458
|
| 93 |
+
data = np.ones(50, dtype=int)
|
| 94 |
+
indices = [11, 12, 19, 22, 23, 5, 22, 3, 8, 10, 5, 6, 11, 12, 13, 5, 13,
|
| 95 |
+
14, 20, 22, 3, 15, 3, 13, 14, 11, 12, 19, 22, 23, 5, 22, 3, 8,
|
| 96 |
+
10, 5, 6, 11, 12, 13, 5, 13, 14, 20, 22, 3, 15, 3, 13, 14]
|
| 97 |
+
indptr = [0, 5, 7, 10, 10, 15, 20, 22, 22, 23, 25, 30, 32, 35, 35, 40, 45,
|
| 98 |
+
47, 47, 48, 50]
|
| 99 |
+
graph = csr_matrix((data, indices, indptr), shape=(20, 25))
|
| 100 |
+
x = maximum_bipartite_matching(graph, perm_type='row')
|
| 101 |
+
y = maximum_bipartite_matching(graph, perm_type='column')
|
| 102 |
+
assert (x != -1).sum() == 13
|
| 103 |
+
assert (y != -1).sum() == 13
|
| 104 |
+
# Ensure that each element of the matching is in fact an edge in the graph.
|
| 105 |
+
for u, v in zip(range(graph.shape[0]), y):
|
| 106 |
+
if v != -1:
|
| 107 |
+
assert graph[u, v]
|
| 108 |
+
for u, v in zip(x, range(graph.shape[1])):
|
| 109 |
+
if u != -1:
|
| 110 |
+
assert graph[u, v]
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def test_matching_large_random_graph_with_one_edge_incident_to_each_vertex():
|
| 114 |
+
np.random.seed(42)
|
| 115 |
+
A = diags(np.ones(25), offsets=0, format='csr')
|
| 116 |
+
rand_perm = np.random.permutation(25)
|
| 117 |
+
rand_perm2 = np.random.permutation(25)
|
| 118 |
+
|
| 119 |
+
Rrow = np.arange(25)
|
| 120 |
+
Rcol = rand_perm
|
| 121 |
+
Rdata = np.ones(25, dtype=int)
|
| 122 |
+
Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr()
|
| 123 |
+
|
| 124 |
+
Crow = rand_perm2
|
| 125 |
+
Ccol = np.arange(25)
|
| 126 |
+
Cdata = np.ones(25, dtype=int)
|
| 127 |
+
Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr()
|
| 128 |
+
# Randomly permute identity matrix
|
| 129 |
+
B = Rmat * A * Cmat
|
| 130 |
+
|
| 131 |
+
# Row permute
|
| 132 |
+
perm = maximum_bipartite_matching(B, perm_type='row')
|
| 133 |
+
Rrow = np.arange(25)
|
| 134 |
+
Rcol = perm
|
| 135 |
+
Rdata = np.ones(25, dtype=int)
|
| 136 |
+
Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr()
|
| 137 |
+
C1 = Rmat * B
|
| 138 |
+
|
| 139 |
+
# Column permute
|
| 140 |
+
perm2 = maximum_bipartite_matching(B, perm_type='column')
|
| 141 |
+
Crow = perm2
|
| 142 |
+
Ccol = np.arange(25)
|
| 143 |
+
Cdata = np.ones(25, dtype=int)
|
| 144 |
+
Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr()
|
| 145 |
+
C2 = B * Cmat
|
| 146 |
+
|
| 147 |
+
# Should get identity matrix back
|
| 148 |
+
assert_equal(any(C1.diagonal() == 0), False)
|
| 149 |
+
assert_equal(any(C2.diagonal() == 0), False)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
@pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)])
|
| 153 |
+
def test_min_weight_full_matching_trivial_graph(num_rows, num_cols):
|
| 154 |
+
biadjacency_matrix = csr_matrix((num_cols, num_rows))
|
| 155 |
+
row_ind, col_ind = min_weight_full_bipartite_matching(biadjacency_matrix)
|
| 156 |
+
assert len(row_ind) == 0
|
| 157 |
+
assert len(col_ind) == 0
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
@pytest.mark.parametrize('biadjacency_matrix',
|
| 161 |
+
[
|
| 162 |
+
[[1, 1, 1], [1, 0, 0], [1, 0, 0]],
|
| 163 |
+
[[1, 1, 1], [0, 0, 1], [0, 0, 1]],
|
| 164 |
+
[[1, 0, 0, 1], [1, 1, 0, 1], [0, 0, 0, 0]],
|
| 165 |
+
[[1, 0, 0], [2, 0, 0]],
|
| 166 |
+
[[0, 1, 0], [0, 2, 0]],
|
| 167 |
+
[[1, 0], [2, 0], [5, 0]]
|
| 168 |
+
])
|
| 169 |
+
def test_min_weight_full_matching_infeasible_problems(biadjacency_matrix):
|
| 170 |
+
with pytest.raises(ValueError):
|
| 171 |
+
min_weight_full_bipartite_matching(csr_matrix(biadjacency_matrix))
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def test_min_weight_full_matching_large_infeasible():
|
| 175 |
+
# Regression test for GitHub issue #17269
|
| 176 |
+
a = np.asarray([
|
| 177 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
| 178 |
+
0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 179 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
| 180 |
+
0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 181 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
| 182 |
+
0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 183 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
| 184 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 185 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
| 186 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0],
|
| 187 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
| 188 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0],
|
| 189 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
| 190 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0],
|
| 191 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
| 192 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0],
|
| 193 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
| 194 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001],
|
| 195 |
+
[0.0, 0.11687445, 0.0, 0.0, 0.01319788, 0.07509257, 0.0,
|
| 196 |
+
0.0, 0.0, 0.74228317, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
| 197 |
+
0.0, 0.0, 0.0, 0.0, 0.0],
|
| 198 |
+
[0.0, 0.0, 0.0, 0.81087935, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
| 199 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 200 |
+
[0.0, 0.0, 0.0, 0.0, 0.8408466, 0.0, 0.0, 0.0, 0.0, 0.01194389,
|
| 201 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 202 |
+
[0.0, 0.82994211, 0.0, 0.0, 0.0, 0.11468516, 0.0, 0.0, 0.0,
|
| 203 |
+
0.11173505, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
| 204 |
+
0.0, 0.0],
|
| 205 |
+
[0.18796507, 0.0, 0.04002318, 0.0, 0.0, 0.0, 0.0, 0.0, 0.75883335,
|
| 206 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 207 |
+
[0.0, 0.0, 0.71545464, 0.0, 0.0, 0.0, 0.0, 0.0, 0.02748488,
|
| 208 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 209 |
+
[0.78470564, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.14829198,
|
| 210 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 211 |
+
[0.0, 0.10870609, 0.0, 0.0, 0.0, 0.8918677, 0.0, 0.0, 0.0, 0.06306644,
|
| 212 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 213 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
| 214 |
+
0.63844085, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 215 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.7442354, 0.0, 0.0, 0.0,
|
| 216 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 217 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09850549, 0.0, 0.0, 0.18638258,
|
| 218 |
+
0.2769244, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 219 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.73182464, 0.0, 0.0, 0.46443561,
|
| 220 |
+
0.38589284, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
| 221 |
+
[0.29510278, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09666032, 0.0,
|
| 222 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
|
| 223 |
+
])
|
| 224 |
+
with pytest.raises(ValueError, match='no full matching exists'):
|
| 225 |
+
min_weight_full_bipartite_matching(csr_matrix(a))
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def test_explicit_zero_causes_warning():
|
| 229 |
+
with pytest.warns(UserWarning):
|
| 230 |
+
biadjacency_matrix = csr_matrix(((2, 0, 3), (0, 1, 1), (0, 2, 3)))
|
| 231 |
+
min_weight_full_bipartite_matching(biadjacency_matrix)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
# General test for linear sum assignment solvers to make it possible to rely
|
| 235 |
+
# on the same tests for scipy.optimize.linear_sum_assignment.
|
| 236 |
+
def linear_sum_assignment_assertions(
|
| 237 |
+
solver, array_type, sign, test_case
|
| 238 |
+
):
|
| 239 |
+
cost_matrix, expected_cost = test_case
|
| 240 |
+
maximize = sign == -1
|
| 241 |
+
cost_matrix = sign * array_type(cost_matrix)
|
| 242 |
+
expected_cost = sign * np.array(expected_cost)
|
| 243 |
+
|
| 244 |
+
row_ind, col_ind = solver(cost_matrix, maximize=maximize)
|
| 245 |
+
assert_array_equal(row_ind, np.sort(row_ind))
|
| 246 |
+
assert_array_equal(expected_cost,
|
| 247 |
+
np.array(cost_matrix[row_ind, col_ind]).flatten())
|
| 248 |
+
|
| 249 |
+
cost_matrix = cost_matrix.T
|
| 250 |
+
row_ind, col_ind = solver(cost_matrix, maximize=maximize)
|
| 251 |
+
assert_array_equal(row_ind, np.sort(row_ind))
|
| 252 |
+
assert_array_equal(np.sort(expected_cost),
|
| 253 |
+
np.sort(np.array(
|
| 254 |
+
cost_matrix[row_ind, col_ind])).flatten())
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
linear_sum_assignment_test_cases = product(
|
| 258 |
+
[-1, 1],
|
| 259 |
+
[
|
| 260 |
+
# Square
|
| 261 |
+
([[400, 150, 400],
|
| 262 |
+
[400, 450, 600],
|
| 263 |
+
[300, 225, 300]],
|
| 264 |
+
[150, 400, 300]),
|
| 265 |
+
|
| 266 |
+
# Rectangular variant
|
| 267 |
+
([[400, 150, 400, 1],
|
| 268 |
+
[400, 450, 600, 2],
|
| 269 |
+
[300, 225, 300, 3]],
|
| 270 |
+
[150, 2, 300]),
|
| 271 |
+
|
| 272 |
+
([[10, 10, 8],
|
| 273 |
+
[9, 8, 1],
|
| 274 |
+
[9, 7, 4]],
|
| 275 |
+
[10, 1, 7]),
|
| 276 |
+
|
| 277 |
+
# Square
|
| 278 |
+
([[10, 10, 8, 11],
|
| 279 |
+
[9, 8, 1, 1],
|
| 280 |
+
[9, 7, 4, 10]],
|
| 281 |
+
[10, 1, 4]),
|
| 282 |
+
|
| 283 |
+
# Rectangular variant
|
| 284 |
+
([[10, float("inf"), float("inf")],
|
| 285 |
+
[float("inf"), float("inf"), 1],
|
| 286 |
+
[float("inf"), 7, float("inf")]],
|
| 287 |
+
[10, 1, 7])
|
| 288 |
+
])
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
@pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases)
|
| 292 |
+
def test_min_weight_full_matching_small_inputs(sign, test_case):
|
| 293 |
+
linear_sum_assignment_assertions(
|
| 294 |
+
min_weight_full_bipartite_matching, csr_matrix, sign, test_case)
|
.venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_pydata_sparse.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import scipy.sparse as sp
|
| 5 |
+
import scipy.sparse.csgraph as spgraph
|
| 6 |
+
|
| 7 |
+
from numpy.testing import assert_equal
|
| 8 |
+
|
| 9 |
+
try:
|
| 10 |
+
import sparse
|
| 11 |
+
except Exception:
|
| 12 |
+
sparse = None
|
| 13 |
+
|
| 14 |
+
pytestmark = pytest.mark.skipif(sparse is None,
|
| 15 |
+
reason="pydata/sparse not installed")
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
msg = "pydata/sparse (0.15.1) does not implement necessary operations"
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
sparse_params = (pytest.param("COO"),
|
| 22 |
+
pytest.param("DOK", marks=[pytest.mark.xfail(reason=msg)]))
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@pytest.fixture(params=sparse_params)
|
| 26 |
+
def sparse_cls(request):
|
| 27 |
+
return getattr(sparse, request.param)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@pytest.fixture
|
| 31 |
+
def graphs(sparse_cls):
|
| 32 |
+
graph = [
|
| 33 |
+
[0, 1, 1, 0, 0],
|
| 34 |
+
[0, 0, 1, 0, 0],
|
| 35 |
+
[0, 0, 0, 0, 0],
|
| 36 |
+
[0, 0, 0, 0, 1],
|
| 37 |
+
[0, 0, 0, 0, 0],
|
| 38 |
+
]
|
| 39 |
+
A_dense = np.array(graph)
|
| 40 |
+
A_sparse = sparse_cls(A_dense)
|
| 41 |
+
return A_dense, A_sparse
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@pytest.mark.parametrize(
|
| 45 |
+
"func",
|
| 46 |
+
[
|
| 47 |
+
spgraph.shortest_path,
|
| 48 |
+
spgraph.dijkstra,
|
| 49 |
+
spgraph.floyd_warshall,
|
| 50 |
+
spgraph.bellman_ford,
|
| 51 |
+
spgraph.johnson,
|
| 52 |
+
spgraph.reverse_cuthill_mckee,
|
| 53 |
+
spgraph.maximum_bipartite_matching,
|
| 54 |
+
spgraph.structural_rank,
|
| 55 |
+
]
|
| 56 |
+
)
|
| 57 |
+
def test_csgraph_equiv(func, graphs):
|
| 58 |
+
A_dense, A_sparse = graphs
|
| 59 |
+
actual = func(A_sparse)
|
| 60 |
+
desired = func(sp.csc_matrix(A_dense))
|
| 61 |
+
assert_equal(actual, desired)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def test_connected_components(graphs):
|
| 65 |
+
A_dense, A_sparse = graphs
|
| 66 |
+
func = spgraph.connected_components
|
| 67 |
+
|
| 68 |
+
actual_comp, actual_labels = func(A_sparse)
|
| 69 |
+
desired_comp, desired_labels, = func(sp.csc_matrix(A_dense))
|
| 70 |
+
|
| 71 |
+
assert actual_comp == desired_comp
|
| 72 |
+
assert_equal(actual_labels, desired_labels)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def test_laplacian(graphs):
|
| 76 |
+
A_dense, A_sparse = graphs
|
| 77 |
+
sparse_cls = type(A_sparse)
|
| 78 |
+
func = spgraph.laplacian
|
| 79 |
+
|
| 80 |
+
actual = func(A_sparse)
|
| 81 |
+
desired = func(sp.csc_matrix(A_dense))
|
| 82 |
+
|
| 83 |
+
assert isinstance(actual, sparse_cls)
|
| 84 |
+
|
| 85 |
+
assert_equal(actual.todense(), desired.todense())
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
@pytest.mark.parametrize(
|
| 89 |
+
"func", [spgraph.breadth_first_order, spgraph.depth_first_order]
|
| 90 |
+
)
|
| 91 |
+
def test_order_search(graphs, func):
|
| 92 |
+
A_dense, A_sparse = graphs
|
| 93 |
+
|
| 94 |
+
actual = func(A_sparse, 0)
|
| 95 |
+
desired = func(sp.csc_matrix(A_dense), 0)
|
| 96 |
+
|
| 97 |
+
assert_equal(actual, desired)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@pytest.mark.parametrize(
|
| 101 |
+
"func", [spgraph.breadth_first_tree, spgraph.depth_first_tree]
|
| 102 |
+
)
|
| 103 |
+
def test_tree_search(graphs, func):
|
| 104 |
+
A_dense, A_sparse = graphs
|
| 105 |
+
sparse_cls = type(A_sparse)
|
| 106 |
+
|
| 107 |
+
actual = func(A_sparse, 0)
|
| 108 |
+
desired = func(sp.csc_matrix(A_dense), 0)
|
| 109 |
+
|
| 110 |
+
assert isinstance(actual, sparse_cls)
|
| 111 |
+
|
| 112 |
+
assert_equal(actual.todense(), desired.todense())
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def test_minimum_spanning_tree(graphs):
|
| 116 |
+
A_dense, A_sparse = graphs
|
| 117 |
+
sparse_cls = type(A_sparse)
|
| 118 |
+
func = spgraph.minimum_spanning_tree
|
| 119 |
+
|
| 120 |
+
actual = func(A_sparse)
|
| 121 |
+
desired = func(sp.csc_matrix(A_dense))
|
| 122 |
+
|
| 123 |
+
assert isinstance(actual, sparse_cls)
|
| 124 |
+
|
| 125 |
+
assert_equal(actual.todense(), desired.todense())
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def test_maximum_flow(graphs):
|
| 129 |
+
A_dense, A_sparse = graphs
|
| 130 |
+
sparse_cls = type(A_sparse)
|
| 131 |
+
func = spgraph.maximum_flow
|
| 132 |
+
|
| 133 |
+
actual = func(A_sparse, 0, 2)
|
| 134 |
+
desired = func(sp.csr_matrix(A_dense), 0, 2)
|
| 135 |
+
|
| 136 |
+
assert actual.flow_value == desired.flow_value
|
| 137 |
+
assert isinstance(actual.flow, sparse_cls)
|
| 138 |
+
|
| 139 |
+
assert_equal(actual.flow.todense(), desired.flow.todense())
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def test_min_weight_full_bipartite_matching(graphs):
|
| 143 |
+
A_dense, A_sparse = graphs
|
| 144 |
+
func = spgraph.min_weight_full_bipartite_matching
|
| 145 |
+
|
| 146 |
+
actual = func(A_sparse[0:2, 1:3])
|
| 147 |
+
desired = func(sp.csc_matrix(A_dense)[0:2, 1:3])
|
| 148 |
+
|
| 149 |
+
assert_equal(actual, desired)
|
.venv/Lib/site-packages/scipy/sparse/csgraph/tests/test_reordering.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy.testing import assert_equal
|
| 3 |
+
from scipy.sparse.csgraph import reverse_cuthill_mckee, structural_rank
|
| 4 |
+
from scipy.sparse import csc_matrix, csr_matrix, coo_matrix
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def test_graph_reverse_cuthill_mckee():
|
| 8 |
+
A = np.array([[1, 0, 0, 0, 1, 0, 0, 0],
|
| 9 |
+
[0, 1, 1, 0, 0, 1, 0, 1],
|
| 10 |
+
[0, 1, 1, 0, 1, 0, 0, 0],
|
| 11 |
+
[0, 0, 0, 1, 0, 0, 1, 0],
|
| 12 |
+
[1, 0, 1, 0, 1, 0, 0, 0],
|
| 13 |
+
[0, 1, 0, 0, 0, 1, 0, 1],
|
| 14 |
+
[0, 0, 0, 1, 0, 0, 1, 0],
|
| 15 |
+
[0, 1, 0, 0, 0, 1, 0, 1]], dtype=int)
|
| 16 |
+
|
| 17 |
+
graph = csr_matrix(A)
|
| 18 |
+
perm = reverse_cuthill_mckee(graph)
|
| 19 |
+
correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0])
|
| 20 |
+
assert_equal(perm, correct_perm)
|
| 21 |
+
|
| 22 |
+
# Test int64 indices input
|
| 23 |
+
graph.indices = graph.indices.astype('int64')
|
| 24 |
+
graph.indptr = graph.indptr.astype('int64')
|
| 25 |
+
perm = reverse_cuthill_mckee(graph, True)
|
| 26 |
+
assert_equal(perm, correct_perm)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def test_graph_reverse_cuthill_mckee_ordering():
|
| 30 |
+
data = np.ones(63,dtype=int)
|
| 31 |
+
rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2,
|
| 32 |
+
2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
|
| 33 |
+
6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9,
|
| 34 |
+
9, 10, 10, 10, 10, 10, 11, 11, 11, 11,
|
| 35 |
+
12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
|
| 36 |
+
14, 15, 15, 15, 15, 15])
|
| 37 |
+
cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2,
|
| 38 |
+
7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13,
|
| 39 |
+
15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13,
|
| 40 |
+
1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11,
|
| 41 |
+
4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14,
|
| 42 |
+
5, 7, 10, 13, 15])
|
| 43 |
+
graph = coo_matrix((data, (rows,cols))).tocsr()
|
| 44 |
+
perm = reverse_cuthill_mckee(graph)
|
| 45 |
+
correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15,
|
| 46 |
+
0, 13, 7, 5, 9, 11, 1, 3])
|
| 47 |
+
assert_equal(perm, correct_perm)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def test_graph_structural_rank():
|
| 51 |
+
# Test square matrix #1
|
| 52 |
+
A = csc_matrix([[1, 1, 0],
|
| 53 |
+
[1, 0, 1],
|
| 54 |
+
[0, 1, 0]])
|
| 55 |
+
assert_equal(structural_rank(A), 3)
|
| 56 |
+
|
| 57 |
+
# Test square matrix #2
|
| 58 |
+
rows = np.array([0,0,0,0,0,1,1,2,2,3,3,3,3,3,3,4,4,5,5,6,6,7,7])
|
| 59 |
+
cols = np.array([0,1,2,3,4,2,5,2,6,0,1,3,5,6,7,4,5,5,6,2,6,2,4])
|
| 60 |
+
data = np.ones_like(rows)
|
| 61 |
+
B = coo_matrix((data,(rows,cols)), shape=(8,8))
|
| 62 |
+
assert_equal(structural_rank(B), 6)
|
| 63 |
+
|
| 64 |
+
#Test non-square matrix
|
| 65 |
+
C = csc_matrix([[1, 0, 2, 0],
|
| 66 |
+
[2, 0, 4, 0]])
|
| 67 |
+
assert_equal(structural_rank(C), 2)
|
| 68 |
+
|
| 69 |
+
#Test tall matrix
|
| 70 |
+
assert_equal(structural_rank(C.T), 2)
|
.venv/Lib/site-packages/scipy/sparse/linalg/__init__.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Sparse linear algebra (:mod:`scipy.sparse.linalg`)
|
| 3 |
+
==================================================
|
| 4 |
+
|
| 5 |
+
.. currentmodule:: scipy.sparse.linalg
|
| 6 |
+
|
| 7 |
+
Abstract linear operators
|
| 8 |
+
-------------------------
|
| 9 |
+
|
| 10 |
+
.. autosummary::
|
| 11 |
+
:toctree: generated/
|
| 12 |
+
|
| 13 |
+
LinearOperator -- abstract representation of a linear operator
|
| 14 |
+
aslinearoperator -- convert an object to an abstract linear operator
|
| 15 |
+
|
| 16 |
+
Matrix Operations
|
| 17 |
+
-----------------
|
| 18 |
+
|
| 19 |
+
.. autosummary::
|
| 20 |
+
:toctree: generated/
|
| 21 |
+
|
| 22 |
+
inv -- compute the sparse matrix inverse
|
| 23 |
+
expm -- compute the sparse matrix exponential
|
| 24 |
+
expm_multiply -- compute the product of a matrix exponential and a matrix
|
| 25 |
+
matrix_power -- compute the matrix power by raising a matrix to an exponent
|
| 26 |
+
|
| 27 |
+
Matrix norms
|
| 28 |
+
------------
|
| 29 |
+
|
| 30 |
+
.. autosummary::
|
| 31 |
+
:toctree: generated/
|
| 32 |
+
|
| 33 |
+
norm -- Norm of a sparse matrix
|
| 34 |
+
onenormest -- Estimate the 1-norm of a sparse matrix
|
| 35 |
+
|
| 36 |
+
Solving linear problems
|
| 37 |
+
-----------------------
|
| 38 |
+
|
| 39 |
+
Direct methods for linear equation systems:
|
| 40 |
+
|
| 41 |
+
.. autosummary::
|
| 42 |
+
:toctree: generated/
|
| 43 |
+
|
| 44 |
+
spsolve -- Solve the sparse linear system Ax=b
|
| 45 |
+
spsolve_triangular -- Solve sparse linear system Ax=b for a triangular A.
|
| 46 |
+
factorized -- Pre-factorize matrix to a function solving a linear system
|
| 47 |
+
MatrixRankWarning -- Warning on exactly singular matrices
|
| 48 |
+
use_solver -- Select direct solver to use
|
| 49 |
+
|
| 50 |
+
Iterative methods for linear equation systems:
|
| 51 |
+
|
| 52 |
+
.. autosummary::
|
| 53 |
+
:toctree: generated/
|
| 54 |
+
|
| 55 |
+
bicg -- Use BIConjugate Gradient iteration to solve Ax = b
|
| 56 |
+
bicgstab -- Use BIConjugate Gradient STABilized iteration to solve Ax = b
|
| 57 |
+
cg -- Use Conjugate Gradient iteration to solve Ax = b
|
| 58 |
+
cgs -- Use Conjugate Gradient Squared iteration to solve Ax = b
|
| 59 |
+
gmres -- Use Generalized Minimal RESidual iteration to solve Ax = b
|
| 60 |
+
lgmres -- Solve a matrix equation using the LGMRES algorithm
|
| 61 |
+
minres -- Use MINimum RESidual iteration to solve Ax = b
|
| 62 |
+
qmr -- Use Quasi-Minimal Residual iteration to solve Ax = b
|
| 63 |
+
gcrotmk -- Solve a matrix equation using the GCROT(m,k) algorithm
|
| 64 |
+
tfqmr -- Use Transpose-Free Quasi-Minimal Residual iteration to solve Ax = b
|
| 65 |
+
|
| 66 |
+
Iterative methods for least-squares problems:
|
| 67 |
+
|
| 68 |
+
.. autosummary::
|
| 69 |
+
:toctree: generated/
|
| 70 |
+
|
| 71 |
+
lsqr -- Find the least-squares solution to a sparse linear equation system
|
| 72 |
+
lsmr -- Find the least-squares solution to a sparse linear equation system
|
| 73 |
+
|
| 74 |
+
Matrix factorizations
|
| 75 |
+
---------------------
|
| 76 |
+
|
| 77 |
+
Eigenvalue problems:
|
| 78 |
+
|
| 79 |
+
.. autosummary::
|
| 80 |
+
:toctree: generated/
|
| 81 |
+
|
| 82 |
+
eigs -- Find k eigenvalues and eigenvectors of the square matrix A
|
| 83 |
+
eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix
|
| 84 |
+
lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning
|
| 85 |
+
|
| 86 |
+
Singular values problems:
|
| 87 |
+
|
| 88 |
+
.. autosummary::
|
| 89 |
+
:toctree: generated/
|
| 90 |
+
|
| 91 |
+
svds -- Compute k singular values/vectors for a sparse matrix
|
| 92 |
+
|
| 93 |
+
The `svds` function supports the following solvers:
|
| 94 |
+
|
| 95 |
+
.. toctree::
|
| 96 |
+
|
| 97 |
+
sparse.linalg.svds-arpack
|
| 98 |
+
sparse.linalg.svds-lobpcg
|
| 99 |
+
sparse.linalg.svds-propack
|
| 100 |
+
|
| 101 |
+
Complete or incomplete LU factorizations
|
| 102 |
+
|
| 103 |
+
.. autosummary::
|
| 104 |
+
:toctree: generated/
|
| 105 |
+
|
| 106 |
+
splu -- Compute a LU decomposition for a sparse matrix
|
| 107 |
+
spilu -- Compute an incomplete LU decomposition for a sparse matrix
|
| 108 |
+
SuperLU -- Object representing an LU factorization
|
| 109 |
+
|
| 110 |
+
Sparse arrays with structure
|
| 111 |
+
----------------------------
|
| 112 |
+
|
| 113 |
+
.. autosummary::
|
| 114 |
+
:toctree: generated/
|
| 115 |
+
|
| 116 |
+
LaplacianNd -- Laplacian on a uniform rectangular grid in ``N`` dimensions
|
| 117 |
+
|
| 118 |
+
Exceptions
|
| 119 |
+
----------
|
| 120 |
+
|
| 121 |
+
.. autosummary::
|
| 122 |
+
:toctree: generated/
|
| 123 |
+
|
| 124 |
+
ArpackNoConvergence
|
| 125 |
+
ArpackError
|
| 126 |
+
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
from ._isolve import *
|
| 130 |
+
from ._dsolve import *
|
| 131 |
+
from ._interface import *
|
| 132 |
+
from ._eigen import *
|
| 133 |
+
from ._matfuncs import *
|
| 134 |
+
from ._onenormest import *
|
| 135 |
+
from ._norm import *
|
| 136 |
+
from ._expm_multiply import *
|
| 137 |
+
from ._special_sparse_arrays import *
|
| 138 |
+
|
| 139 |
+
# Deprecated namespaces, to be removed in v2.0.0
|
| 140 |
+
from . import isolve, dsolve, interface, eigen, matfuncs
|
| 141 |
+
|
| 142 |
+
__all__ = [s for s in dir() if not s.startswith('_')]
|
| 143 |
+
|
| 144 |
+
from scipy._lib._testutils import PytestTester
|
| 145 |
+
test = PytestTester(__name__)
|
| 146 |
+
del PytestTester
|
.venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/__init__.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Linear Solvers
|
| 3 |
+
==============
|
| 4 |
+
|
| 5 |
+
The default solver is SuperLU (included in the scipy distribution),
|
| 6 |
+
which can solve real or complex linear systems in both single and
|
| 7 |
+
double precisions. It is automatically replaced by UMFPACK, if
|
| 8 |
+
available. Note that UMFPACK works in double precision only, so
|
| 9 |
+
switch it off by::
|
| 10 |
+
|
| 11 |
+
>>> from scipy.sparse.linalg import spsolve, use_solver
|
| 12 |
+
>>> use_solver(useUmfpack=False)
|
| 13 |
+
|
| 14 |
+
to solve in the single precision. See also use_solver documentation.
|
| 15 |
+
|
| 16 |
+
Example session::
|
| 17 |
+
|
| 18 |
+
>>> from scipy.sparse import csc_matrix, spdiags
|
| 19 |
+
>>> from numpy import array
|
| 20 |
+
>>>
|
| 21 |
+
>>> print("Inverting a sparse linear system:")
|
| 22 |
+
>>> print("The sparse matrix (constructed from diagonals):")
|
| 23 |
+
>>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
|
| 24 |
+
>>> b = array([1, 2, 3, 4, 5])
|
| 25 |
+
>>> print("Solve: single precision complex:")
|
| 26 |
+
>>> use_solver( useUmfpack = False )
|
| 27 |
+
>>> a = a.astype('F')
|
| 28 |
+
>>> x = spsolve(a, b)
|
| 29 |
+
>>> print(x)
|
| 30 |
+
>>> print("Error: ", a@x-b)
|
| 31 |
+
>>>
|
| 32 |
+
>>> print("Solve: double precision complex:")
|
| 33 |
+
>>> use_solver( useUmfpack = True )
|
| 34 |
+
>>> a = a.astype('D')
|
| 35 |
+
>>> x = spsolve(a, b)
|
| 36 |
+
>>> print(x)
|
| 37 |
+
>>> print("Error: ", a@x-b)
|
| 38 |
+
>>>
|
| 39 |
+
>>> print("Solve: double precision:")
|
| 40 |
+
>>> a = a.astype('d')
|
| 41 |
+
>>> x = spsolve(a, b)
|
| 42 |
+
>>> print(x)
|
| 43 |
+
>>> print("Error: ", a@x-b)
|
| 44 |
+
>>>
|
| 45 |
+
>>> print("Solve: single precision:")
|
| 46 |
+
>>> use_solver( useUmfpack = False )
|
| 47 |
+
>>> a = a.astype('f')
|
| 48 |
+
>>> x = spsolve(a, b.astype('f'))
|
| 49 |
+
>>> print(x)
|
| 50 |
+
>>> print("Error: ", a@x-b)
|
| 51 |
+
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
#import umfpack
|
| 55 |
+
#__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) )
|
| 56 |
+
#del umfpack
|
| 57 |
+
|
| 58 |
+
from .linsolve import *
|
| 59 |
+
from ._superlu import SuperLU
|
| 60 |
+
from . import _add_newdocs
|
| 61 |
+
from . import linsolve
|
| 62 |
+
|
| 63 |
+
__all__ = [
|
| 64 |
+
'MatrixRankWarning', 'SuperLU', 'factorized',
|
| 65 |
+
'spilu', 'splu', 'spsolve',
|
| 66 |
+
'spsolve_triangular', 'use_solver'
|
| 67 |
+
]
|
| 68 |
+
|
| 69 |
+
from scipy._lib._testutils import PytestTester
|
| 70 |
+
test = PytestTester(__name__)
|
| 71 |
+
del PytestTester
|
.venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (2.11 kB). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-39.pyc
ADDED
|
Binary file (3.7 kB). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-39.pyc
ADDED
|
Binary file (21.4 kB). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numpy.lib import add_newdoc
|
| 2 |
+
|
| 3 |
+
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU',
|
| 4 |
+
"""
|
| 5 |
+
LU factorization of a sparse matrix.
|
| 6 |
+
|
| 7 |
+
Factorization is represented as::
|
| 8 |
+
|
| 9 |
+
Pr @ A @ Pc = L @ U
|
| 10 |
+
|
| 11 |
+
To construct these `SuperLU` objects, call the `splu` and `spilu`
|
| 12 |
+
functions.
|
| 13 |
+
|
| 14 |
+
Attributes
|
| 15 |
+
----------
|
| 16 |
+
shape
|
| 17 |
+
nnz
|
| 18 |
+
perm_c
|
| 19 |
+
perm_r
|
| 20 |
+
L
|
| 21 |
+
U
|
| 22 |
+
|
| 23 |
+
Methods
|
| 24 |
+
-------
|
| 25 |
+
solve
|
| 26 |
+
|
| 27 |
+
Notes
|
| 28 |
+
-----
|
| 29 |
+
|
| 30 |
+
.. versionadded:: 0.14.0
|
| 31 |
+
|
| 32 |
+
Examples
|
| 33 |
+
--------
|
| 34 |
+
The LU decomposition can be used to solve matrix equations. Consider:
|
| 35 |
+
|
| 36 |
+
>>> import numpy as np
|
| 37 |
+
>>> from scipy.sparse import csc_matrix
|
| 38 |
+
>>> from scipy.sparse.linalg import splu
|
| 39 |
+
>>> A = csc_matrix([[1,2,0,4], [1,0,0,1], [1,0,2,1], [2,2,1,0.]])
|
| 40 |
+
|
| 41 |
+
This can be solved for a given right-hand side:
|
| 42 |
+
|
| 43 |
+
>>> lu = splu(A)
|
| 44 |
+
>>> b = np.array([1, 2, 3, 4])
|
| 45 |
+
>>> x = lu.solve(b)
|
| 46 |
+
>>> A.dot(x)
|
| 47 |
+
array([ 1., 2., 3., 4.])
|
| 48 |
+
|
| 49 |
+
The ``lu`` object also contains an explicit representation of the
|
| 50 |
+
decomposition. The permutations are represented as mappings of
|
| 51 |
+
indices:
|
| 52 |
+
|
| 53 |
+
>>> lu.perm_r
|
| 54 |
+
array([2, 1, 3, 0], dtype=int32) # may vary
|
| 55 |
+
>>> lu.perm_c
|
| 56 |
+
array([0, 1, 3, 2], dtype=int32) # may vary
|
| 57 |
+
|
| 58 |
+
The L and U factors are sparse matrices in CSC format:
|
| 59 |
+
|
| 60 |
+
>>> lu.L.toarray()
|
| 61 |
+
array([[ 1. , 0. , 0. , 0. ], # may vary
|
| 62 |
+
[ 0.5, 1. , 0. , 0. ],
|
| 63 |
+
[ 0.5, -1. , 1. , 0. ],
|
| 64 |
+
[ 0.5, 1. , 0. , 1. ]])
|
| 65 |
+
>>> lu.U.toarray()
|
| 66 |
+
array([[ 2. , 2. , 0. , 1. ], # may vary
|
| 67 |
+
[ 0. , -1. , 1. , -0.5],
|
| 68 |
+
[ 0. , 0. , 5. , -1. ],
|
| 69 |
+
[ 0. , 0. , 0. , 2. ]])
|
| 70 |
+
|
| 71 |
+
The permutation matrices can be constructed:
|
| 72 |
+
|
| 73 |
+
>>> Pr = csc_matrix((np.ones(4), (lu.perm_r, np.arange(4))))
|
| 74 |
+
>>> Pc = csc_matrix((np.ones(4), (np.arange(4), lu.perm_c)))
|
| 75 |
+
|
| 76 |
+
We can reassemble the original matrix:
|
| 77 |
+
|
| 78 |
+
>>> (Pr.T @ (lu.L @ lu.U) @ Pc.T).toarray()
|
| 79 |
+
array([[ 1., 2., 0., 4.],
|
| 80 |
+
[ 1., 0., 0., 1.],
|
| 81 |
+
[ 1., 0., 2., 1.],
|
| 82 |
+
[ 2., 2., 1., 0.]])
|
| 83 |
+
""")
|
| 84 |
+
|
| 85 |
+
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('solve',
|
| 86 |
+
"""
|
| 87 |
+
solve(rhs[, trans])
|
| 88 |
+
|
| 89 |
+
Solves linear system of equations with one or several right-hand sides.
|
| 90 |
+
|
| 91 |
+
Parameters
|
| 92 |
+
----------
|
| 93 |
+
rhs : ndarray, shape (n,) or (n, k)
|
| 94 |
+
Right hand side(s) of equation
|
| 95 |
+
trans : {'N', 'T', 'H'}, optional
|
| 96 |
+
Type of system to solve::
|
| 97 |
+
|
| 98 |
+
'N': A @ x == rhs (default)
|
| 99 |
+
'T': A^T @ x == rhs
|
| 100 |
+
'H': A^H @ x == rhs
|
| 101 |
+
|
| 102 |
+
i.e., normal, transposed, and hermitian conjugate.
|
| 103 |
+
|
| 104 |
+
Returns
|
| 105 |
+
-------
|
| 106 |
+
x : ndarray, shape ``rhs.shape``
|
| 107 |
+
Solution vector(s)
|
| 108 |
+
"""))
|
| 109 |
+
|
| 110 |
+
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('L',
|
| 111 |
+
"""
|
| 112 |
+
Lower triangular factor with unit diagonal as a
|
| 113 |
+
`scipy.sparse.csc_matrix`.
|
| 114 |
+
|
| 115 |
+
.. versionadded:: 0.14.0
|
| 116 |
+
"""))
|
| 117 |
+
|
| 118 |
+
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('U',
|
| 119 |
+
"""
|
| 120 |
+
Upper triangular factor as a `scipy.sparse.csc_matrix`.
|
| 121 |
+
|
| 122 |
+
.. versionadded:: 0.14.0
|
| 123 |
+
"""))
|
| 124 |
+
|
| 125 |
+
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('shape',
|
| 126 |
+
"""
|
| 127 |
+
Shape of the original matrix as a tuple of ints.
|
| 128 |
+
"""))
|
| 129 |
+
|
| 130 |
+
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('nnz',
|
| 131 |
+
"""
|
| 132 |
+
Number of nonzero elements in the matrix.
|
| 133 |
+
"""))
|
| 134 |
+
|
| 135 |
+
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_c',
|
| 136 |
+
"""
|
| 137 |
+
Permutation Pc represented as an array of indices.
|
| 138 |
+
|
| 139 |
+
The column permutation matrix can be reconstructed via:
|
| 140 |
+
|
| 141 |
+
>>> Pc = np.zeros((n, n))
|
| 142 |
+
>>> Pc[np.arange(n), perm_c] = 1
|
| 143 |
+
"""))
|
| 144 |
+
|
| 145 |
+
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_r',
|
| 146 |
+
"""
|
| 147 |
+
Permutation Pr represented as an array of indices.
|
| 148 |
+
|
| 149 |
+
The row permutation matrix can be reconstructed via:
|
| 150 |
+
|
| 151 |
+
>>> Pr = np.zeros((n, n))
|
| 152 |
+
>>> Pr[perm_r, np.arange(n)] = 1
|
| 153 |
+
"""))
|
.venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cp39-win_amd64.dll.a
ADDED
|
Binary file (1.55 kB). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cp39-win_amd64.pyd
ADDED
|
Binary file (412 kB). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py
ADDED
|
@@ -0,0 +1,746 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from warnings import warn
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy import asarray
|
| 5 |
+
from scipy.sparse import (issparse,
|
| 6 |
+
SparseEfficiencyWarning, csc_matrix, csr_matrix)
|
| 7 |
+
from scipy.sparse._sputils import is_pydata_spmatrix, convert_pydata_sparse_to_scipy
|
| 8 |
+
from scipy.linalg import LinAlgError
|
| 9 |
+
import copy
|
| 10 |
+
|
| 11 |
+
from . import _superlu
|
| 12 |
+
|
| 13 |
+
noScikit = False
|
| 14 |
+
try:
|
| 15 |
+
import scikits.umfpack as umfpack
|
| 16 |
+
except ImportError:
|
| 17 |
+
noScikit = True
|
| 18 |
+
|
| 19 |
+
useUmfpack = not noScikit
|
| 20 |
+
|
| 21 |
+
__all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized',
|
| 22 |
+
'MatrixRankWarning', 'spsolve_triangular']
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class MatrixRankWarning(UserWarning):
|
| 26 |
+
pass
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def use_solver(**kwargs):
|
| 30 |
+
"""
|
| 31 |
+
Select default sparse direct solver to be used.
|
| 32 |
+
|
| 33 |
+
Parameters
|
| 34 |
+
----------
|
| 35 |
+
useUmfpack : bool, optional
|
| 36 |
+
Use UMFPACK [1]_, [2]_, [3]_, [4]_. over SuperLU. Has effect only
|
| 37 |
+
if ``scikits.umfpack`` is installed. Default: True
|
| 38 |
+
assumeSortedIndices : bool, optional
|
| 39 |
+
Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix.
|
| 40 |
+
Has effect only if useUmfpack is True and ``scikits.umfpack`` is
|
| 41 |
+
installed. Default: False
|
| 42 |
+
|
| 43 |
+
Notes
|
| 44 |
+
-----
|
| 45 |
+
The default sparse solver is UMFPACK when available
|
| 46 |
+
(``scikits.umfpack`` is installed). This can be changed by passing
|
| 47 |
+
useUmfpack = False, which then causes the always present SuperLU
|
| 48 |
+
based solver to be used.
|
| 49 |
+
|
| 50 |
+
UMFPACK requires a CSR/CSC matrix to have sorted column/row indices. If
|
| 51 |
+
sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
|
| 52 |
+
to gain some speed.
|
| 53 |
+
|
| 54 |
+
References
|
| 55 |
+
----------
|
| 56 |
+
.. [1] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
|
| 57 |
+
multifrontal method with a column pre-ordering strategy, ACM
|
| 58 |
+
Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
|
| 59 |
+
https://dl.acm.org/doi/abs/10.1145/992200.992206
|
| 60 |
+
|
| 61 |
+
.. [2] T. A. Davis, A column pre-ordering strategy for the
|
| 62 |
+
unsymmetric-pattern multifrontal method, ACM Trans.
|
| 63 |
+
on Mathematical Software, 30(2), 2004, pp. 165--195.
|
| 64 |
+
https://dl.acm.org/doi/abs/10.1145/992200.992205
|
| 65 |
+
|
| 66 |
+
.. [3] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
|
| 67 |
+
method for unsymmetric sparse matrices, ACM Trans. on
|
| 68 |
+
Mathematical Software, 25(1), 1999, pp. 1--19.
|
| 69 |
+
https://doi.org/10.1145/305658.287640
|
| 70 |
+
|
| 71 |
+
.. [4] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
|
| 72 |
+
method for sparse LU factorization, SIAM J. Matrix Analysis and
|
| 73 |
+
Computations, 18(1), 1997, pp. 140--158.
|
| 74 |
+
https://doi.org/10.1137/S0895479894246905T.
|
| 75 |
+
|
| 76 |
+
Examples
|
| 77 |
+
--------
|
| 78 |
+
>>> import numpy as np
|
| 79 |
+
>>> from scipy.sparse.linalg import use_solver, spsolve
|
| 80 |
+
>>> from scipy.sparse import csc_matrix
|
| 81 |
+
>>> R = np.random.randn(5, 5)
|
| 82 |
+
>>> A = csc_matrix(R)
|
| 83 |
+
>>> b = np.random.randn(5)
|
| 84 |
+
>>> use_solver(useUmfpack=False) # enforce superLU over UMFPACK
|
| 85 |
+
>>> x = spsolve(A, b)
|
| 86 |
+
>>> np.allclose(A.dot(x), b)
|
| 87 |
+
True
|
| 88 |
+
>>> use_solver(useUmfpack=True) # reset umfPack usage to default
|
| 89 |
+
"""
|
| 90 |
+
if 'useUmfpack' in kwargs:
|
| 91 |
+
globals()['useUmfpack'] = kwargs['useUmfpack']
|
| 92 |
+
if useUmfpack and 'assumeSortedIndices' in kwargs:
|
| 93 |
+
umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices'])
|
| 94 |
+
|
| 95 |
+
def _get_umf_family(A):
|
| 96 |
+
"""Get umfpack family string given the sparse matrix dtype."""
|
| 97 |
+
_families = {
|
| 98 |
+
(np.float64, np.int32): 'di',
|
| 99 |
+
(np.complex128, np.int32): 'zi',
|
| 100 |
+
(np.float64, np.int64): 'dl',
|
| 101 |
+
(np.complex128, np.int64): 'zl'
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
# A.dtype.name can only be "float64" or
|
| 105 |
+
# "complex128" in control flow
|
| 106 |
+
f_type = getattr(np, A.dtype.name)
|
| 107 |
+
# control flow may allow for more index
|
| 108 |
+
# types to get through here
|
| 109 |
+
i_type = getattr(np, A.indices.dtype.name)
|
| 110 |
+
|
| 111 |
+
try:
|
| 112 |
+
family = _families[(f_type, i_type)]
|
| 113 |
+
|
| 114 |
+
except KeyError as e:
|
| 115 |
+
msg = ('only float64 or complex128 matrices with int32 or int64 '
|
| 116 |
+
f'indices are supported! (got: matrix: {f_type}, indices: {i_type})')
|
| 117 |
+
raise ValueError(msg) from e
|
| 118 |
+
|
| 119 |
+
# See gh-8278. Considered converting only if
|
| 120 |
+
# A.shape[0]*A.shape[1] > np.iinfo(np.int32).max,
|
| 121 |
+
# but that didn't always fix the issue.
|
| 122 |
+
family = family[0] + "l"
|
| 123 |
+
A_new = copy.copy(A)
|
| 124 |
+
A_new.indptr = np.asarray(A.indptr, dtype=np.int64)
|
| 125 |
+
A_new.indices = np.asarray(A.indices, dtype=np.int64)
|
| 126 |
+
|
| 127 |
+
return family, A_new
|
| 128 |
+
|
| 129 |
+
def _safe_downcast_indices(A):
|
| 130 |
+
# check for safe downcasting
|
| 131 |
+
max_value = np.iinfo(np.intc).max
|
| 132 |
+
|
| 133 |
+
if A.indptr[-1] > max_value: # indptr[-1] is max b/c indptr always sorted
|
| 134 |
+
raise ValueError("indptr values too large for SuperLU")
|
| 135 |
+
|
| 136 |
+
if max(*A.shape) > max_value: # only check large enough arrays
|
| 137 |
+
if np.any(A.indices > max_value):
|
| 138 |
+
raise ValueError("indices values too large for SuperLU")
|
| 139 |
+
|
| 140 |
+
indices = A.indices.astype(np.intc, copy=False)
|
| 141 |
+
indptr = A.indptr.astype(np.intc, copy=False)
|
| 142 |
+
return indices, indptr
|
| 143 |
+
|
| 144 |
+
def spsolve(A, b, permc_spec=None, use_umfpack=True):
|
| 145 |
+
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
|
| 146 |
+
|
| 147 |
+
Parameters
|
| 148 |
+
----------
|
| 149 |
+
A : ndarray or sparse matrix
|
| 150 |
+
The square matrix A will be converted into CSC or CSR form
|
| 151 |
+
b : ndarray or sparse matrix
|
| 152 |
+
The matrix or vector representing the right hand side of the equation.
|
| 153 |
+
If a vector, b.shape must be (n,) or (n, 1).
|
| 154 |
+
permc_spec : str, optional
|
| 155 |
+
How to permute the columns of the matrix for sparsity preservation.
|
| 156 |
+
(default: 'COLAMD')
|
| 157 |
+
|
| 158 |
+
- ``NATURAL``: natural ordering.
|
| 159 |
+
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
|
| 160 |
+
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
|
| 161 |
+
- ``COLAMD``: approximate minimum degree column ordering [1]_, [2]_.
|
| 162 |
+
|
| 163 |
+
use_umfpack : bool, optional
|
| 164 |
+
if True (default) then use UMFPACK for the solution [3]_, [4]_, [5]_,
|
| 165 |
+
[6]_ . This is only referenced if b is a vector and
|
| 166 |
+
``scikits.umfpack`` is installed.
|
| 167 |
+
|
| 168 |
+
Returns
|
| 169 |
+
-------
|
| 170 |
+
x : ndarray or sparse matrix
|
| 171 |
+
the solution of the sparse linear equation.
|
| 172 |
+
If b is a vector, then x is a vector of size A.shape[1]
|
| 173 |
+
If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])
|
| 174 |
+
|
| 175 |
+
Notes
|
| 176 |
+
-----
|
| 177 |
+
For solving the matrix expression AX = B, this solver assumes the resulting
|
| 178 |
+
matrix X is sparse, as is often the case for very sparse inputs. If the
|
| 179 |
+
resulting X is dense, the construction of this sparse result will be
|
| 180 |
+
relatively expensive. In that case, consider converting A to a dense
|
| 181 |
+
matrix and using scipy.linalg.solve or its variants.
|
| 182 |
+
|
| 183 |
+
References
|
| 184 |
+
----------
|
| 185 |
+
.. [1] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836:
|
| 186 |
+
COLAMD, an approximate column minimum degree ordering algorithm,
|
| 187 |
+
ACM Trans. on Mathematical Software, 30(3), 2004, pp. 377--380.
|
| 188 |
+
:doi:`10.1145/1024074.1024080`
|
| 189 |
+
|
| 190 |
+
.. [2] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, A column approximate
|
| 191 |
+
minimum degree ordering algorithm, ACM Trans. on Mathematical
|
| 192 |
+
Software, 30(3), 2004, pp. 353--376. :doi:`10.1145/1024074.1024079`
|
| 193 |
+
|
| 194 |
+
.. [3] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
|
| 195 |
+
multifrontal method with a column pre-ordering strategy, ACM
|
| 196 |
+
Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
|
| 197 |
+
https://dl.acm.org/doi/abs/10.1145/992200.992206
|
| 198 |
+
|
| 199 |
+
.. [4] T. A. Davis, A column pre-ordering strategy for the
|
| 200 |
+
unsymmetric-pattern multifrontal method, ACM Trans.
|
| 201 |
+
on Mathematical Software, 30(2), 2004, pp. 165--195.
|
| 202 |
+
https://dl.acm.org/doi/abs/10.1145/992200.992205
|
| 203 |
+
|
| 204 |
+
.. [5] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
|
| 205 |
+
method for unsymmetric sparse matrices, ACM Trans. on
|
| 206 |
+
Mathematical Software, 25(1), 1999, pp. 1--19.
|
| 207 |
+
https://doi.org/10.1145/305658.287640
|
| 208 |
+
|
| 209 |
+
.. [6] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
|
| 210 |
+
method for sparse LU factorization, SIAM J. Matrix Analysis and
|
| 211 |
+
Computations, 18(1), 1997, pp. 140--158.
|
| 212 |
+
https://doi.org/10.1137/S0895479894246905T.
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
Examples
|
| 216 |
+
--------
|
| 217 |
+
>>> import numpy as np
|
| 218 |
+
>>> from scipy.sparse import csc_matrix
|
| 219 |
+
>>> from scipy.sparse.linalg import spsolve
|
| 220 |
+
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
|
| 221 |
+
>>> B = csc_matrix([[2, 0], [-1, 0], [2, 0]], dtype=float)
|
| 222 |
+
>>> x = spsolve(A, B)
|
| 223 |
+
>>> np.allclose(A.dot(x).toarray(), B.toarray())
|
| 224 |
+
True
|
| 225 |
+
"""
|
| 226 |
+
is_pydata_sparse = is_pydata_spmatrix(b)
|
| 227 |
+
pydata_sparse_cls = b.__class__ if is_pydata_sparse else None
|
| 228 |
+
A = convert_pydata_sparse_to_scipy(A)
|
| 229 |
+
b = convert_pydata_sparse_to_scipy(b)
|
| 230 |
+
|
| 231 |
+
if not (issparse(A) and A.format in ("csc", "csr")):
|
| 232 |
+
A = csc_matrix(A)
|
| 233 |
+
warn('spsolve requires A be CSC or CSR matrix format',
|
| 234 |
+
SparseEfficiencyWarning, stacklevel=2)
|
| 235 |
+
|
| 236 |
+
# b is a vector only if b have shape (n,) or (n, 1)
|
| 237 |
+
b_is_sparse = issparse(b)
|
| 238 |
+
if not b_is_sparse:
|
| 239 |
+
b = asarray(b)
|
| 240 |
+
b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))
|
| 241 |
+
|
| 242 |
+
# sum duplicates for non-canonical format
|
| 243 |
+
A.sum_duplicates()
|
| 244 |
+
A = A._asfptype() # upcast to a floating point format
|
| 245 |
+
result_dtype = np.promote_types(A.dtype, b.dtype)
|
| 246 |
+
if A.dtype != result_dtype:
|
| 247 |
+
A = A.astype(result_dtype)
|
| 248 |
+
if b.dtype != result_dtype:
|
| 249 |
+
b = b.astype(result_dtype)
|
| 250 |
+
|
| 251 |
+
# validate input shapes
|
| 252 |
+
M, N = A.shape
|
| 253 |
+
if (M != N):
|
| 254 |
+
raise ValueError(f"matrix must be square (has shape {(M, N)})")
|
| 255 |
+
|
| 256 |
+
if M != b.shape[0]:
|
| 257 |
+
raise ValueError(f"matrix - rhs dimension mismatch ({A.shape} - {b.shape[0]})")
|
| 258 |
+
|
| 259 |
+
use_umfpack = use_umfpack and useUmfpack
|
| 260 |
+
|
| 261 |
+
if b_is_vector and use_umfpack:
|
| 262 |
+
if b_is_sparse:
|
| 263 |
+
b_vec = b.toarray()
|
| 264 |
+
else:
|
| 265 |
+
b_vec = b
|
| 266 |
+
b_vec = asarray(b_vec, dtype=A.dtype).ravel()
|
| 267 |
+
|
| 268 |
+
if noScikit:
|
| 269 |
+
raise RuntimeError('Scikits.umfpack not installed.')
|
| 270 |
+
|
| 271 |
+
if A.dtype.char not in 'dD':
|
| 272 |
+
raise ValueError("convert matrix data to double, please, using"
|
| 273 |
+
" .astype(), or set linsolve.useUmfpack = False")
|
| 274 |
+
|
| 275 |
+
umf_family, A = _get_umf_family(A)
|
| 276 |
+
umf = umfpack.UmfpackContext(umf_family)
|
| 277 |
+
x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec,
|
| 278 |
+
autoTranspose=True)
|
| 279 |
+
else:
|
| 280 |
+
if b_is_vector and b_is_sparse:
|
| 281 |
+
b = b.toarray()
|
| 282 |
+
b_is_sparse = False
|
| 283 |
+
|
| 284 |
+
if not b_is_sparse:
|
| 285 |
+
if A.format == "csc":
|
| 286 |
+
flag = 1 # CSC format
|
| 287 |
+
else:
|
| 288 |
+
flag = 0 # CSR format
|
| 289 |
+
|
| 290 |
+
indices = A.indices.astype(np.intc, copy=False)
|
| 291 |
+
indptr = A.indptr.astype(np.intc, copy=False)
|
| 292 |
+
options = dict(ColPerm=permc_spec)
|
| 293 |
+
x, info = _superlu.gssv(N, A.nnz, A.data, indices, indptr,
|
| 294 |
+
b, flag, options=options)
|
| 295 |
+
if info != 0:
|
| 296 |
+
warn("Matrix is exactly singular", MatrixRankWarning, stacklevel=2)
|
| 297 |
+
x.fill(np.nan)
|
| 298 |
+
if b_is_vector:
|
| 299 |
+
x = x.ravel()
|
| 300 |
+
else:
|
| 301 |
+
# b is sparse
|
| 302 |
+
Afactsolve = factorized(A)
|
| 303 |
+
|
| 304 |
+
if not (b.format == "csc" or is_pydata_spmatrix(b)):
|
| 305 |
+
warn('spsolve is more efficient when sparse b '
|
| 306 |
+
'is in the CSC matrix format',
|
| 307 |
+
SparseEfficiencyWarning, stacklevel=2)
|
| 308 |
+
b = csc_matrix(b)
|
| 309 |
+
|
| 310 |
+
# Create a sparse output matrix by repeatedly applying
|
| 311 |
+
# the sparse factorization to solve columns of b.
|
| 312 |
+
data_segs = []
|
| 313 |
+
row_segs = []
|
| 314 |
+
col_segs = []
|
| 315 |
+
for j in range(b.shape[1]):
|
| 316 |
+
# TODO: replace this with
|
| 317 |
+
# bj = b[:, j].toarray().ravel()
|
| 318 |
+
# once 1D sparse arrays are supported.
|
| 319 |
+
# That is a slightly faster code path.
|
| 320 |
+
bj = b[:, [j]].toarray().ravel()
|
| 321 |
+
xj = Afactsolve(bj)
|
| 322 |
+
w = np.flatnonzero(xj)
|
| 323 |
+
segment_length = w.shape[0]
|
| 324 |
+
row_segs.append(w)
|
| 325 |
+
col_segs.append(np.full(segment_length, j, dtype=int))
|
| 326 |
+
data_segs.append(np.asarray(xj[w], dtype=A.dtype))
|
| 327 |
+
sparse_data = np.concatenate(data_segs)
|
| 328 |
+
sparse_row = np.concatenate(row_segs)
|
| 329 |
+
sparse_col = np.concatenate(col_segs)
|
| 330 |
+
x = A.__class__((sparse_data, (sparse_row, sparse_col)),
|
| 331 |
+
shape=b.shape, dtype=A.dtype)
|
| 332 |
+
|
| 333 |
+
if is_pydata_sparse:
|
| 334 |
+
x = pydata_sparse_cls.from_scipy_sparse(x)
|
| 335 |
+
|
| 336 |
+
return x
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def splu(A, permc_spec=None, diag_pivot_thresh=None,
|
| 340 |
+
relax=None, panel_size=None, options=dict()):
|
| 341 |
+
"""
|
| 342 |
+
Compute the LU decomposition of a sparse, square matrix.
|
| 343 |
+
|
| 344 |
+
Parameters
|
| 345 |
+
----------
|
| 346 |
+
A : sparse matrix
|
| 347 |
+
Sparse matrix to factorize. Most efficient when provided in CSC
|
| 348 |
+
format. Other formats will be converted to CSC before factorization.
|
| 349 |
+
permc_spec : str, optional
|
| 350 |
+
How to permute the columns of the matrix for sparsity preservation.
|
| 351 |
+
(default: 'COLAMD')
|
| 352 |
+
|
| 353 |
+
- ``NATURAL``: natural ordering.
|
| 354 |
+
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
|
| 355 |
+
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
|
| 356 |
+
- ``COLAMD``: approximate minimum degree column ordering
|
| 357 |
+
|
| 358 |
+
diag_pivot_thresh : float, optional
|
| 359 |
+
Threshold used for a diagonal entry to be an acceptable pivot.
|
| 360 |
+
See SuperLU user's guide for details [1]_
|
| 361 |
+
relax : int, optional
|
| 362 |
+
Expert option for customizing the degree of relaxing supernodes.
|
| 363 |
+
See SuperLU user's guide for details [1]_
|
| 364 |
+
panel_size : int, optional
|
| 365 |
+
Expert option for customizing the panel size.
|
| 366 |
+
See SuperLU user's guide for details [1]_
|
| 367 |
+
options : dict, optional
|
| 368 |
+
Dictionary containing additional expert options to SuperLU.
|
| 369 |
+
See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument)
|
| 370 |
+
for more details. For example, you can specify
|
| 371 |
+
``options=dict(Equil=False, IterRefine='SINGLE'))``
|
| 372 |
+
to turn equilibration off and perform a single iterative refinement.
|
| 373 |
+
|
| 374 |
+
Returns
|
| 375 |
+
-------
|
| 376 |
+
invA : scipy.sparse.linalg.SuperLU
|
| 377 |
+
Object, which has a ``solve`` method.
|
| 378 |
+
|
| 379 |
+
See also
|
| 380 |
+
--------
|
| 381 |
+
spilu : incomplete LU decomposition
|
| 382 |
+
|
| 383 |
+
Notes
|
| 384 |
+
-----
|
| 385 |
+
This function uses the SuperLU library.
|
| 386 |
+
|
| 387 |
+
References
|
| 388 |
+
----------
|
| 389 |
+
.. [1] SuperLU https://portal.nersc.gov/project/sparse/superlu/
|
| 390 |
+
|
| 391 |
+
Examples
|
| 392 |
+
--------
|
| 393 |
+
>>> import numpy as np
|
| 394 |
+
>>> from scipy.sparse import csc_matrix
|
| 395 |
+
>>> from scipy.sparse.linalg import splu
|
| 396 |
+
>>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
|
| 397 |
+
>>> B = splu(A)
|
| 398 |
+
>>> x = np.array([1., 2., 3.], dtype=float)
|
| 399 |
+
>>> B.solve(x)
|
| 400 |
+
array([ 1. , -3. , -1.5])
|
| 401 |
+
>>> A.dot(B.solve(x))
|
| 402 |
+
array([ 1., 2., 3.])
|
| 403 |
+
>>> B.solve(A.dot(x))
|
| 404 |
+
array([ 1., 2., 3.])
|
| 405 |
+
"""
|
| 406 |
+
|
| 407 |
+
if is_pydata_spmatrix(A):
|
| 408 |
+
def csc_construct_func(*a, cls=type(A)):
|
| 409 |
+
return cls.from_scipy_sparse(csc_matrix(*a))
|
| 410 |
+
A = A.to_scipy_sparse().tocsc()
|
| 411 |
+
else:
|
| 412 |
+
csc_construct_func = csc_matrix
|
| 413 |
+
|
| 414 |
+
if not (issparse(A) and A.format == "csc"):
|
| 415 |
+
A = csc_matrix(A)
|
| 416 |
+
warn('splu converted its input to CSC format',
|
| 417 |
+
SparseEfficiencyWarning, stacklevel=2)
|
| 418 |
+
|
| 419 |
+
# sum duplicates for non-canonical format
|
| 420 |
+
A.sum_duplicates()
|
| 421 |
+
A = A._asfptype() # upcast to a floating point format
|
| 422 |
+
|
| 423 |
+
M, N = A.shape
|
| 424 |
+
if (M != N):
|
| 425 |
+
raise ValueError("can only factor square matrices") # is this true?
|
| 426 |
+
|
| 427 |
+
indices, indptr = _safe_downcast_indices(A)
|
| 428 |
+
|
| 429 |
+
_options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
|
| 430 |
+
PanelSize=panel_size, Relax=relax)
|
| 431 |
+
if options is not None:
|
| 432 |
+
_options.update(options)
|
| 433 |
+
|
| 434 |
+
# Ensure that no column permutations are applied
|
| 435 |
+
if (_options["ColPerm"] == "NATURAL"):
|
| 436 |
+
_options["SymmetricMode"] = True
|
| 437 |
+
|
| 438 |
+
return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
|
| 439 |
+
csc_construct_func=csc_construct_func,
|
| 440 |
+
ilu=False, options=_options)
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
|
| 444 |
+
diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
|
| 445 |
+
"""
|
| 446 |
+
Compute an incomplete LU decomposition for a sparse, square matrix.
|
| 447 |
+
|
| 448 |
+
The resulting object is an approximation to the inverse of `A`.
|
| 449 |
+
|
| 450 |
+
Parameters
|
| 451 |
+
----------
|
| 452 |
+
A : (N, N) array_like
|
| 453 |
+
Sparse matrix to factorize. Most efficient when provided in CSC format.
|
| 454 |
+
Other formats will be converted to CSC before factorization.
|
| 455 |
+
drop_tol : float, optional
|
| 456 |
+
Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
|
| 457 |
+
(default: 1e-4)
|
| 458 |
+
fill_factor : float, optional
|
| 459 |
+
Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
|
| 460 |
+
drop_rule : str, optional
|
| 461 |
+
Comma-separated string of drop rules to use.
|
| 462 |
+
Available rules: ``basic``, ``prows``, ``column``, ``area``,
|
| 463 |
+
``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
|
| 464 |
+
|
| 465 |
+
See SuperLU documentation for details.
|
| 466 |
+
|
| 467 |
+
Remaining other options
|
| 468 |
+
Same as for `splu`
|
| 469 |
+
|
| 470 |
+
Returns
|
| 471 |
+
-------
|
| 472 |
+
invA_approx : scipy.sparse.linalg.SuperLU
|
| 473 |
+
Object, which has a ``solve`` method.
|
| 474 |
+
|
| 475 |
+
See also
|
| 476 |
+
--------
|
| 477 |
+
splu : complete LU decomposition
|
| 478 |
+
|
| 479 |
+
Notes
|
| 480 |
+
-----
|
| 481 |
+
To improve the better approximation to the inverse, you may need to
|
| 482 |
+
increase `fill_factor` AND decrease `drop_tol`.
|
| 483 |
+
|
| 484 |
+
This function uses the SuperLU library.
|
| 485 |
+
|
| 486 |
+
Examples
|
| 487 |
+
--------
|
| 488 |
+
>>> import numpy as np
|
| 489 |
+
>>> from scipy.sparse import csc_matrix
|
| 490 |
+
>>> from scipy.sparse.linalg import spilu
|
| 491 |
+
>>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
|
| 492 |
+
>>> B = spilu(A)
|
| 493 |
+
>>> x = np.array([1., 2., 3.], dtype=float)
|
| 494 |
+
>>> B.solve(x)
|
| 495 |
+
array([ 1. , -3. , -1.5])
|
| 496 |
+
>>> A.dot(B.solve(x))
|
| 497 |
+
array([ 1., 2., 3.])
|
| 498 |
+
>>> B.solve(A.dot(x))
|
| 499 |
+
array([ 1., 2., 3.])
|
| 500 |
+
"""
|
| 501 |
+
|
| 502 |
+
if is_pydata_spmatrix(A):
|
| 503 |
+
def csc_construct_func(*a, cls=type(A)):
|
| 504 |
+
return cls.from_scipy_sparse(csc_matrix(*a))
|
| 505 |
+
A = A.to_scipy_sparse().tocsc()
|
| 506 |
+
else:
|
| 507 |
+
csc_construct_func = csc_matrix
|
| 508 |
+
|
| 509 |
+
if not (issparse(A) and A.format == "csc"):
|
| 510 |
+
A = csc_matrix(A)
|
| 511 |
+
warn('spilu converted its input to CSC format',
|
| 512 |
+
SparseEfficiencyWarning, stacklevel=2)
|
| 513 |
+
|
| 514 |
+
# sum duplicates for non-canonical format
|
| 515 |
+
A.sum_duplicates()
|
| 516 |
+
A = A._asfptype() # upcast to a floating point format
|
| 517 |
+
|
| 518 |
+
M, N = A.shape
|
| 519 |
+
if (M != N):
|
| 520 |
+
raise ValueError("can only factor square matrices") # is this true?
|
| 521 |
+
|
| 522 |
+
indices, indptr = _safe_downcast_indices(A)
|
| 523 |
+
|
| 524 |
+
_options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
|
| 525 |
+
ILU_FillFactor=fill_factor,
|
| 526 |
+
DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
|
| 527 |
+
PanelSize=panel_size, Relax=relax)
|
| 528 |
+
if options is not None:
|
| 529 |
+
_options.update(options)
|
| 530 |
+
|
| 531 |
+
# Ensure that no column permutations are applied
|
| 532 |
+
if (_options["ColPerm"] == "NATURAL"):
|
| 533 |
+
_options["SymmetricMode"] = True
|
| 534 |
+
|
| 535 |
+
return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
|
| 536 |
+
csc_construct_func=csc_construct_func,
|
| 537 |
+
ilu=True, options=_options)
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
def factorized(A):
|
| 541 |
+
"""
|
| 542 |
+
Return a function for solving a sparse linear system, with A pre-factorized.
|
| 543 |
+
|
| 544 |
+
Parameters
|
| 545 |
+
----------
|
| 546 |
+
A : (N, N) array_like
|
| 547 |
+
Input. A in CSC format is most efficient. A CSR format matrix will
|
| 548 |
+
be converted to CSC before factorization.
|
| 549 |
+
|
| 550 |
+
Returns
|
| 551 |
+
-------
|
| 552 |
+
solve : callable
|
| 553 |
+
To solve the linear system of equations given in `A`, the `solve`
|
| 554 |
+
callable should be passed an ndarray of shape (N,).
|
| 555 |
+
|
| 556 |
+
Examples
|
| 557 |
+
--------
|
| 558 |
+
>>> import numpy as np
|
| 559 |
+
>>> from scipy.sparse.linalg import factorized
|
| 560 |
+
>>> from scipy.sparse import csc_matrix
|
| 561 |
+
>>> A = np.array([[ 3. , 2. , -1. ],
|
| 562 |
+
... [ 2. , -2. , 4. ],
|
| 563 |
+
... [-1. , 0.5, -1. ]])
|
| 564 |
+
>>> solve = factorized(csc_matrix(A)) # Makes LU decomposition.
|
| 565 |
+
>>> rhs1 = np.array([1, -2, 0])
|
| 566 |
+
>>> solve(rhs1) # Uses the LU factors.
|
| 567 |
+
array([ 1., -2., -2.])
|
| 568 |
+
|
| 569 |
+
"""
|
| 570 |
+
if is_pydata_spmatrix(A):
|
| 571 |
+
A = A.to_scipy_sparse().tocsc()
|
| 572 |
+
|
| 573 |
+
if useUmfpack:
|
| 574 |
+
if noScikit:
|
| 575 |
+
raise RuntimeError('Scikits.umfpack not installed.')
|
| 576 |
+
|
| 577 |
+
if not (issparse(A) and A.format == "csc"):
|
| 578 |
+
A = csc_matrix(A)
|
| 579 |
+
warn('splu converted its input to CSC format',
|
| 580 |
+
SparseEfficiencyWarning, stacklevel=2)
|
| 581 |
+
|
| 582 |
+
A = A._asfptype() # upcast to a floating point format
|
| 583 |
+
|
| 584 |
+
if A.dtype.char not in 'dD':
|
| 585 |
+
raise ValueError("convert matrix data to double, please, using"
|
| 586 |
+
" .astype(), or set linsolve.useUmfpack = False")
|
| 587 |
+
|
| 588 |
+
umf_family, A = _get_umf_family(A)
|
| 589 |
+
umf = umfpack.UmfpackContext(umf_family)
|
| 590 |
+
|
| 591 |
+
# Make LU decomposition.
|
| 592 |
+
umf.numeric(A)
|
| 593 |
+
|
| 594 |
+
def solve(b):
|
| 595 |
+
with np.errstate(divide="ignore", invalid="ignore"):
|
| 596 |
+
# Ignoring warnings with numpy >= 1.23.0, see gh-16523
|
| 597 |
+
result = umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)
|
| 598 |
+
|
| 599 |
+
return result
|
| 600 |
+
|
| 601 |
+
return solve
|
| 602 |
+
else:
|
| 603 |
+
return splu(A).solve
|
| 604 |
+
|
| 605 |
+
|
| 606 |
+
def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False,
|
| 607 |
+
unit_diagonal=False):
|
| 608 |
+
"""
|
| 609 |
+
Solve the equation ``A x = b`` for `x`, assuming A is a triangular matrix.
|
| 610 |
+
|
| 611 |
+
Parameters
|
| 612 |
+
----------
|
| 613 |
+
A : (M, M) sparse matrix
|
| 614 |
+
A sparse square triangular matrix. Should be in CSR format.
|
| 615 |
+
b : (M,) or (M, N) array_like
|
| 616 |
+
Right-hand side matrix in ``A x = b``
|
| 617 |
+
lower : bool, optional
|
| 618 |
+
Whether `A` is a lower or upper triangular matrix.
|
| 619 |
+
Default is lower triangular matrix.
|
| 620 |
+
overwrite_A : bool, optional
|
| 621 |
+
Allow changing `A`. The indices of `A` are going to be sorted and zero
|
| 622 |
+
entries are going to be removed.
|
| 623 |
+
Enabling gives a performance gain. Default is False.
|
| 624 |
+
overwrite_b : bool, optional
|
| 625 |
+
Allow overwriting data in `b`.
|
| 626 |
+
Enabling gives a performance gain. Default is False.
|
| 627 |
+
If `overwrite_b` is True, it should be ensured that
|
| 628 |
+
`b` has an appropriate dtype to be able to store the result.
|
| 629 |
+
unit_diagonal : bool, optional
|
| 630 |
+
If True, diagonal elements of `a` are assumed to be 1 and will not be
|
| 631 |
+
referenced.
|
| 632 |
+
|
| 633 |
+
.. versionadded:: 1.4.0
|
| 634 |
+
|
| 635 |
+
Returns
|
| 636 |
+
-------
|
| 637 |
+
x : (M,) or (M, N) ndarray
|
| 638 |
+
Solution to the system ``A x = b``. Shape of return matches shape
|
| 639 |
+
of `b`.
|
| 640 |
+
|
| 641 |
+
Raises
|
| 642 |
+
------
|
| 643 |
+
LinAlgError
|
| 644 |
+
If `A` is singular or not triangular.
|
| 645 |
+
ValueError
|
| 646 |
+
If shape of `A` or shape of `b` do not match the requirements.
|
| 647 |
+
|
| 648 |
+
Notes
|
| 649 |
+
-----
|
| 650 |
+
.. versionadded:: 0.19.0
|
| 651 |
+
|
| 652 |
+
Examples
|
| 653 |
+
--------
|
| 654 |
+
>>> import numpy as np
|
| 655 |
+
>>> from scipy.sparse import csr_matrix
|
| 656 |
+
>>> from scipy.sparse.linalg import spsolve_triangular
|
| 657 |
+
>>> A = csr_matrix([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
|
| 658 |
+
>>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float)
|
| 659 |
+
>>> x = spsolve_triangular(A, B)
|
| 660 |
+
>>> np.allclose(A.dot(x), B)
|
| 661 |
+
True
|
| 662 |
+
"""
|
| 663 |
+
|
| 664 |
+
if is_pydata_spmatrix(A):
|
| 665 |
+
A = A.to_scipy_sparse().tocsr()
|
| 666 |
+
|
| 667 |
+
# Check the input for correct type and format.
|
| 668 |
+
if not (issparse(A) and A.format == "csr"):
|
| 669 |
+
warn('CSR matrix format is required. Converting to CSR matrix.',
|
| 670 |
+
SparseEfficiencyWarning, stacklevel=2)
|
| 671 |
+
A = csr_matrix(A)
|
| 672 |
+
elif not overwrite_A:
|
| 673 |
+
A = A.copy()
|
| 674 |
+
|
| 675 |
+
if A.shape[0] != A.shape[1]:
|
| 676 |
+
raise ValueError(
|
| 677 |
+
f'A must be a square matrix but its shape is {A.shape}.')
|
| 678 |
+
|
| 679 |
+
# sum duplicates for non-canonical format
|
| 680 |
+
A.sum_duplicates()
|
| 681 |
+
|
| 682 |
+
b = np.asanyarray(b)
|
| 683 |
+
|
| 684 |
+
if b.ndim not in [1, 2]:
|
| 685 |
+
raise ValueError(
|
| 686 |
+
f'b must have 1 or 2 dims but its shape is {b.shape}.')
|
| 687 |
+
if A.shape[0] != b.shape[0]:
|
| 688 |
+
raise ValueError(
|
| 689 |
+
'The size of the dimensions of A must be equal to '
|
| 690 |
+
'the size of the first dimension of b but the shape of A is '
|
| 691 |
+
f'{A.shape} and the shape of b is {b.shape}.'
|
| 692 |
+
)
|
| 693 |
+
|
| 694 |
+
# Init x as (a copy of) b.
|
| 695 |
+
x_dtype = np.result_type(A.data, b, np.float64)
|
| 696 |
+
if overwrite_b:
|
| 697 |
+
if np.can_cast(b.dtype, x_dtype, casting='same_kind'):
|
| 698 |
+
x = b
|
| 699 |
+
else:
|
| 700 |
+
raise ValueError(
|
| 701 |
+
f'Cannot overwrite b (dtype {b.dtype}) with result '
|
| 702 |
+
f'of type {x_dtype}.'
|
| 703 |
+
)
|
| 704 |
+
else:
|
| 705 |
+
x = b.astype(x_dtype, copy=True)
|
| 706 |
+
|
| 707 |
+
# Choose forward or backward order.
|
| 708 |
+
if lower:
|
| 709 |
+
row_indices = range(len(b))
|
| 710 |
+
else:
|
| 711 |
+
row_indices = range(len(b) - 1, -1, -1)
|
| 712 |
+
|
| 713 |
+
# Fill x iteratively.
|
| 714 |
+
for i in row_indices:
|
| 715 |
+
|
| 716 |
+
# Get indices for i-th row.
|
| 717 |
+
indptr_start = A.indptr[i]
|
| 718 |
+
indptr_stop = A.indptr[i + 1]
|
| 719 |
+
|
| 720 |
+
if lower:
|
| 721 |
+
A_diagonal_index_row_i = indptr_stop - 1
|
| 722 |
+
A_off_diagonal_indices_row_i = slice(indptr_start, indptr_stop - 1)
|
| 723 |
+
else:
|
| 724 |
+
A_diagonal_index_row_i = indptr_start
|
| 725 |
+
A_off_diagonal_indices_row_i = slice(indptr_start + 1, indptr_stop)
|
| 726 |
+
|
| 727 |
+
# Check regularity and triangularity of A.
|
| 728 |
+
if not unit_diagonal and (indptr_stop <= indptr_start
|
| 729 |
+
or A.indices[A_diagonal_index_row_i] < i):
|
| 730 |
+
raise LinAlgError(
|
| 731 |
+
f'A is singular: diagonal {i} is zero.')
|
| 732 |
+
if not unit_diagonal and A.indices[A_diagonal_index_row_i] > i:
|
| 733 |
+
raise LinAlgError(
|
| 734 |
+
'A is not triangular: A[{}, {}] is nonzero.'
|
| 735 |
+
''.format(i, A.indices[A_diagonal_index_row_i]))
|
| 736 |
+
|
| 737 |
+
# Incorporate off-diagonal entries.
|
| 738 |
+
A_column_indices_in_row_i = A.indices[A_off_diagonal_indices_row_i]
|
| 739 |
+
A_values_in_row_i = A.data[A_off_diagonal_indices_row_i]
|
| 740 |
+
x[i] -= np.dot(x[A_column_indices_in_row_i].T, A_values_in_row_i)
|
| 741 |
+
|
| 742 |
+
# Compute i-th entry of x.
|
| 743 |
+
if not unit_diagonal:
|
| 744 |
+
x[i] /= A.data[A_diagonal_index_row_i]
|
| 745 |
+
|
| 746 |
+
return x
|
.venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/tests/__init__.py
ADDED
|
File without changes
|
.venv/Lib/site-packages/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py
ADDED
|
@@ -0,0 +1,805 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import threading
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy import array, finfo, arange, eye, all, unique, ones, dot
|
| 6 |
+
import numpy.random as random
|
| 7 |
+
from numpy.testing import (
|
| 8 |
+
assert_array_almost_equal, assert_almost_equal,
|
| 9 |
+
assert_equal, assert_array_equal, assert_, assert_allclose,
|
| 10 |
+
assert_warns, suppress_warnings)
|
| 11 |
+
import pytest
|
| 12 |
+
from pytest import raises as assert_raises
|
| 13 |
+
|
| 14 |
+
import scipy.linalg
|
| 15 |
+
from scipy.linalg import norm, inv
|
| 16 |
+
from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix,
|
| 17 |
+
csr_matrix, identity, issparse, dok_matrix, lil_matrix, bsr_matrix)
|
| 18 |
+
from scipy.sparse.linalg import SuperLU
|
| 19 |
+
from scipy.sparse.linalg._dsolve import (spsolve, use_solver, splu, spilu,
|
| 20 |
+
MatrixRankWarning, _superlu, spsolve_triangular, factorized)
|
| 21 |
+
import scipy.sparse
|
| 22 |
+
|
| 23 |
+
from scipy._lib._testutils import check_free_memory
|
| 24 |
+
from scipy._lib._util import ComplexWarning
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
sup_sparse_efficiency = suppress_warnings()
|
| 28 |
+
sup_sparse_efficiency.filter(SparseEfficiencyWarning)
|
| 29 |
+
|
| 30 |
+
# scikits.umfpack is not a SciPy dependency but it is optionally used in
|
| 31 |
+
# dsolve, so check whether it's available
|
| 32 |
+
try:
|
| 33 |
+
import scikits.umfpack as umfpack
|
| 34 |
+
has_umfpack = True
|
| 35 |
+
except ImportError:
|
| 36 |
+
has_umfpack = False
|
| 37 |
+
|
| 38 |
+
def toarray(a):
|
| 39 |
+
if issparse(a):
|
| 40 |
+
return a.toarray()
|
| 41 |
+
else:
|
| 42 |
+
return a
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def setup_bug_8278():
|
| 46 |
+
N = 2 ** 6
|
| 47 |
+
h = 1/N
|
| 48 |
+
Ah1D = scipy.sparse.diags([-1, 2, -1], [-1, 0, 1],
|
| 49 |
+
shape=(N-1, N-1))/(h**2)
|
| 50 |
+
eyeN = scipy.sparse.eye(N - 1)
|
| 51 |
+
A = (scipy.sparse.kron(eyeN, scipy.sparse.kron(eyeN, Ah1D))
|
| 52 |
+
+ scipy.sparse.kron(eyeN, scipy.sparse.kron(Ah1D, eyeN))
|
| 53 |
+
+ scipy.sparse.kron(Ah1D, scipy.sparse.kron(eyeN, eyeN)))
|
| 54 |
+
b = np.random.rand((N-1)**3)
|
| 55 |
+
return A, b
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class TestFactorized:
|
| 59 |
+
def setup_method(self):
|
| 60 |
+
n = 5
|
| 61 |
+
d = arange(n) + 1
|
| 62 |
+
self.n = n
|
| 63 |
+
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc()
|
| 64 |
+
random.seed(1234)
|
| 65 |
+
|
| 66 |
+
def _check_singular(self):
|
| 67 |
+
A = csc_matrix((5,5), dtype='d')
|
| 68 |
+
b = ones(5)
|
| 69 |
+
assert_array_almost_equal(0. * b, factorized(A)(b))
|
| 70 |
+
|
| 71 |
+
def _check_non_singular(self):
|
| 72 |
+
# Make a diagonal dominant, to make sure it is not singular
|
| 73 |
+
n = 5
|
| 74 |
+
a = csc_matrix(random.rand(n, n))
|
| 75 |
+
b = ones(n)
|
| 76 |
+
|
| 77 |
+
expected = splu(a).solve(b)
|
| 78 |
+
assert_array_almost_equal(factorized(a)(b), expected)
|
| 79 |
+
|
| 80 |
+
def test_singular_without_umfpack(self):
|
| 81 |
+
use_solver(useUmfpack=False)
|
| 82 |
+
with assert_raises(RuntimeError, match="Factor is exactly singular"):
|
| 83 |
+
self._check_singular()
|
| 84 |
+
|
| 85 |
+
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
|
| 86 |
+
def test_singular_with_umfpack(self):
|
| 87 |
+
use_solver(useUmfpack=True)
|
| 88 |
+
with suppress_warnings() as sup:
|
| 89 |
+
sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars")
|
| 90 |
+
assert_warns(umfpack.UmfpackWarning, self._check_singular)
|
| 91 |
+
|
| 92 |
+
def test_non_singular_without_umfpack(self):
|
| 93 |
+
use_solver(useUmfpack=False)
|
| 94 |
+
self._check_non_singular()
|
| 95 |
+
|
| 96 |
+
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
|
| 97 |
+
def test_non_singular_with_umfpack(self):
|
| 98 |
+
use_solver(useUmfpack=True)
|
| 99 |
+
self._check_non_singular()
|
| 100 |
+
|
| 101 |
+
def test_cannot_factorize_nonsquare_matrix_without_umfpack(self):
|
| 102 |
+
use_solver(useUmfpack=False)
|
| 103 |
+
msg = "can only factor square matrices"
|
| 104 |
+
with assert_raises(ValueError, match=msg):
|
| 105 |
+
factorized(self.A[:, :4])
|
| 106 |
+
|
| 107 |
+
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
|
| 108 |
+
def test_factorizes_nonsquare_matrix_with_umfpack(self):
|
| 109 |
+
use_solver(useUmfpack=True)
|
| 110 |
+
# does not raise
|
| 111 |
+
factorized(self.A[:,:4])
|
| 112 |
+
|
| 113 |
+
def test_call_with_incorrectly_sized_matrix_without_umfpack(self):
|
| 114 |
+
use_solver(useUmfpack=False)
|
| 115 |
+
solve = factorized(self.A)
|
| 116 |
+
b = random.rand(4)
|
| 117 |
+
B = random.rand(4, 3)
|
| 118 |
+
BB = random.rand(self.n, 3, 9)
|
| 119 |
+
|
| 120 |
+
with assert_raises(ValueError, match="is of incompatible size"):
|
| 121 |
+
solve(b)
|
| 122 |
+
with assert_raises(ValueError, match="is of incompatible size"):
|
| 123 |
+
solve(B)
|
| 124 |
+
with assert_raises(ValueError,
|
| 125 |
+
match="object too deep for desired array"):
|
| 126 |
+
solve(BB)
|
| 127 |
+
|
| 128 |
+
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
|
| 129 |
+
def test_call_with_incorrectly_sized_matrix_with_umfpack(self):
|
| 130 |
+
use_solver(useUmfpack=True)
|
| 131 |
+
solve = factorized(self.A)
|
| 132 |
+
b = random.rand(4)
|
| 133 |
+
B = random.rand(4, 3)
|
| 134 |
+
BB = random.rand(self.n, 3, 9)
|
| 135 |
+
|
| 136 |
+
# does not raise
|
| 137 |
+
solve(b)
|
| 138 |
+
msg = "object too deep for desired array"
|
| 139 |
+
with assert_raises(ValueError, match=msg):
|
| 140 |
+
solve(B)
|
| 141 |
+
with assert_raises(ValueError, match=msg):
|
| 142 |
+
solve(BB)
|
| 143 |
+
|
| 144 |
+
def test_call_with_cast_to_complex_without_umfpack(self):
|
| 145 |
+
use_solver(useUmfpack=False)
|
| 146 |
+
solve = factorized(self.A)
|
| 147 |
+
b = random.rand(4)
|
| 148 |
+
for t in [np.complex64, np.complex128]:
|
| 149 |
+
with assert_raises(TypeError, match="Cannot cast array data"):
|
| 150 |
+
solve(b.astype(t))
|
| 151 |
+
|
| 152 |
+
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
|
| 153 |
+
def test_call_with_cast_to_complex_with_umfpack(self):
|
| 154 |
+
use_solver(useUmfpack=True)
|
| 155 |
+
solve = factorized(self.A)
|
| 156 |
+
b = random.rand(4)
|
| 157 |
+
for t in [np.complex64, np.complex128]:
|
| 158 |
+
assert_warns(ComplexWarning, solve, b.astype(t))
|
| 159 |
+
|
| 160 |
+
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
|
| 161 |
+
def test_assume_sorted_indices_flag(self):
|
| 162 |
+
# a sparse matrix with unsorted indices
|
| 163 |
+
unsorted_inds = np.array([2, 0, 1, 0])
|
| 164 |
+
data = np.array([10, 16, 5, 0.4])
|
| 165 |
+
indptr = np.array([0, 1, 2, 4])
|
| 166 |
+
A = csc_matrix((data, unsorted_inds, indptr), (3, 3))
|
| 167 |
+
b = ones(3)
|
| 168 |
+
|
| 169 |
+
# should raise when incorrectly assuming indices are sorted
|
| 170 |
+
use_solver(useUmfpack=True, assumeSortedIndices=True)
|
| 171 |
+
with assert_raises(RuntimeError,
|
| 172 |
+
match="UMFPACK_ERROR_invalid_matrix"):
|
| 173 |
+
factorized(A)
|
| 174 |
+
|
| 175 |
+
# should sort indices and succeed when not assuming indices are sorted
|
| 176 |
+
use_solver(useUmfpack=True, assumeSortedIndices=False)
|
| 177 |
+
expected = splu(A.copy()).solve(b)
|
| 178 |
+
|
| 179 |
+
assert_equal(A.has_sorted_indices, 0)
|
| 180 |
+
assert_array_almost_equal(factorized(A)(b), expected)
|
| 181 |
+
|
| 182 |
+
@pytest.mark.slow
|
| 183 |
+
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
|
| 184 |
+
def test_bug_8278(self):
|
| 185 |
+
check_free_memory(8000)
|
| 186 |
+
use_solver(useUmfpack=True)
|
| 187 |
+
A, b = setup_bug_8278()
|
| 188 |
+
A = A.tocsc()
|
| 189 |
+
f = factorized(A)
|
| 190 |
+
x = f(b)
|
| 191 |
+
assert_array_almost_equal(A @ x, b)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
class TestLinsolve:
|
| 195 |
+
def setup_method(self):
|
| 196 |
+
use_solver(useUmfpack=False)
|
| 197 |
+
|
| 198 |
+
def test_singular(self):
|
| 199 |
+
A = csc_matrix((5,5), dtype='d')
|
| 200 |
+
b = array([1, 2, 3, 4, 5],dtype='d')
|
| 201 |
+
with suppress_warnings() as sup:
|
| 202 |
+
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
|
| 203 |
+
x = spsolve(A, b)
|
| 204 |
+
assert_(not np.isfinite(x).any())
|
| 205 |
+
|
| 206 |
+
def test_singular_gh_3312(self):
|
| 207 |
+
# "Bad" test case that leads SuperLU to call LAPACK with invalid
|
| 208 |
+
# arguments. Check that it fails moderately gracefully.
|
| 209 |
+
ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
|
| 210 |
+
v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
|
| 211 |
+
A = csc_matrix((v, ij.T), shape=(20, 20))
|
| 212 |
+
b = np.arange(20)
|
| 213 |
+
|
| 214 |
+
try:
|
| 215 |
+
# should either raise a runtime error or return value
|
| 216 |
+
# appropriate for singular input (which yields the warning)
|
| 217 |
+
with suppress_warnings() as sup:
|
| 218 |
+
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
|
| 219 |
+
x = spsolve(A, b)
|
| 220 |
+
assert not np.isfinite(x).any()
|
| 221 |
+
except RuntimeError:
|
| 222 |
+
pass
|
| 223 |
+
|
| 224 |
+
@pytest.mark.parametrize('format', ['csc', 'csr'])
|
| 225 |
+
@pytest.mark.parametrize('idx_dtype', [np.int32, np.int64])
|
| 226 |
+
def test_twodiags(self, format: str, idx_dtype: np.dtype):
|
| 227 |
+
A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5,
|
| 228 |
+
format=format)
|
| 229 |
+
b = array([1, 2, 3, 4, 5])
|
| 230 |
+
|
| 231 |
+
# condition number of A
|
| 232 |
+
cond_A = norm(A.toarray(), 2) * norm(inv(A.toarray()), 2)
|
| 233 |
+
|
| 234 |
+
for t in ['f','d','F','D']:
|
| 235 |
+
eps = finfo(t).eps # floating point epsilon
|
| 236 |
+
b = b.astype(t)
|
| 237 |
+
Asp = A.astype(t)
|
| 238 |
+
Asp.indices = Asp.indices.astype(idx_dtype, copy=False)
|
| 239 |
+
Asp.indptr = Asp.indptr.astype(idx_dtype, copy=False)
|
| 240 |
+
|
| 241 |
+
x = spsolve(Asp, b)
|
| 242 |
+
assert_(norm(b - Asp@x) < 10 * cond_A * eps)
|
| 243 |
+
|
| 244 |
+
def test_bvector_smoketest(self):
|
| 245 |
+
Adense = array([[0., 1., 1.],
|
| 246 |
+
[1., 0., 1.],
|
| 247 |
+
[0., 0., 1.]])
|
| 248 |
+
As = csc_matrix(Adense)
|
| 249 |
+
random.seed(1234)
|
| 250 |
+
x = random.randn(3)
|
| 251 |
+
b = As@x
|
| 252 |
+
x2 = spsolve(As, b)
|
| 253 |
+
|
| 254 |
+
assert_array_almost_equal(x, x2)
|
| 255 |
+
|
| 256 |
+
def test_bmatrix_smoketest(self):
|
| 257 |
+
Adense = array([[0., 1., 1.],
|
| 258 |
+
[1., 0., 1.],
|
| 259 |
+
[0., 0., 1.]])
|
| 260 |
+
As = csc_matrix(Adense)
|
| 261 |
+
random.seed(1234)
|
| 262 |
+
x = random.randn(3, 4)
|
| 263 |
+
Bdense = As.dot(x)
|
| 264 |
+
Bs = csc_matrix(Bdense)
|
| 265 |
+
x2 = spsolve(As, Bs)
|
| 266 |
+
assert_array_almost_equal(x, x2.toarray())
|
| 267 |
+
|
| 268 |
+
@sup_sparse_efficiency
|
| 269 |
+
def test_non_square(self):
|
| 270 |
+
# A is not square.
|
| 271 |
+
A = ones((3, 4))
|
| 272 |
+
b = ones((4, 1))
|
| 273 |
+
assert_raises(ValueError, spsolve, A, b)
|
| 274 |
+
# A2 and b2 have incompatible shapes.
|
| 275 |
+
A2 = csc_matrix(eye(3))
|
| 276 |
+
b2 = array([1.0, 2.0])
|
| 277 |
+
assert_raises(ValueError, spsolve, A2, b2)
|
| 278 |
+
|
| 279 |
+
@sup_sparse_efficiency
|
| 280 |
+
def test_example_comparison(self):
|
| 281 |
+
row = array([0,0,1,2,2,2])
|
| 282 |
+
col = array([0,2,2,0,1,2])
|
| 283 |
+
data = array([1,2,3,-4,5,6])
|
| 284 |
+
sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float)
|
| 285 |
+
M = sM.toarray()
|
| 286 |
+
|
| 287 |
+
row = array([0,0,1,1,0,0])
|
| 288 |
+
col = array([0,2,1,1,0,0])
|
| 289 |
+
data = array([1,1,1,1,1,1])
|
| 290 |
+
sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float)
|
| 291 |
+
N = sN.toarray()
|
| 292 |
+
|
| 293 |
+
sX = spsolve(sM, sN)
|
| 294 |
+
X = scipy.linalg.solve(M, N)
|
| 295 |
+
|
| 296 |
+
assert_array_almost_equal(X, sX.toarray())
|
| 297 |
+
|
| 298 |
+
@sup_sparse_efficiency
|
| 299 |
+
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
|
| 300 |
+
def test_shape_compatibility(self):
|
| 301 |
+
use_solver(useUmfpack=True)
|
| 302 |
+
A = csc_matrix([[1., 0], [0, 2]])
|
| 303 |
+
bs = [
|
| 304 |
+
[1, 6],
|
| 305 |
+
array([1, 6]),
|
| 306 |
+
[[1], [6]],
|
| 307 |
+
array([[1], [6]]),
|
| 308 |
+
csc_matrix([[1], [6]]),
|
| 309 |
+
csr_matrix([[1], [6]]),
|
| 310 |
+
dok_matrix([[1], [6]]),
|
| 311 |
+
bsr_matrix([[1], [6]]),
|
| 312 |
+
array([[1., 2., 3.], [6., 8., 10.]]),
|
| 313 |
+
csc_matrix([[1., 2., 3.], [6., 8., 10.]]),
|
| 314 |
+
csr_matrix([[1., 2., 3.], [6., 8., 10.]]),
|
| 315 |
+
dok_matrix([[1., 2., 3.], [6., 8., 10.]]),
|
| 316 |
+
bsr_matrix([[1., 2., 3.], [6., 8., 10.]]),
|
| 317 |
+
]
|
| 318 |
+
|
| 319 |
+
for b in bs:
|
| 320 |
+
x = np.linalg.solve(A.toarray(), toarray(b))
|
| 321 |
+
for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]:
|
| 322 |
+
x1 = spsolve(spmattype(A), b, use_umfpack=True)
|
| 323 |
+
x2 = spsolve(spmattype(A), b, use_umfpack=False)
|
| 324 |
+
|
| 325 |
+
# check solution
|
| 326 |
+
if x.ndim == 2 and x.shape[1] == 1:
|
| 327 |
+
# interprets also these as "vectors"
|
| 328 |
+
x = x.ravel()
|
| 329 |
+
|
| 330 |
+
assert_array_almost_equal(toarray(x1), x,
|
| 331 |
+
err_msg=repr((b, spmattype, 1)))
|
| 332 |
+
assert_array_almost_equal(toarray(x2), x,
|
| 333 |
+
err_msg=repr((b, spmattype, 2)))
|
| 334 |
+
|
| 335 |
+
# dense vs. sparse output ("vectors" are always dense)
|
| 336 |
+
if issparse(b) and x.ndim > 1:
|
| 337 |
+
assert_(issparse(x1), repr((b, spmattype, 1)))
|
| 338 |
+
assert_(issparse(x2), repr((b, spmattype, 2)))
|
| 339 |
+
else:
|
| 340 |
+
assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
|
| 341 |
+
assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
|
| 342 |
+
|
| 343 |
+
# check output shape
|
| 344 |
+
if x.ndim == 1:
|
| 345 |
+
# "vector"
|
| 346 |
+
assert_equal(x1.shape, (A.shape[1],))
|
| 347 |
+
assert_equal(x2.shape, (A.shape[1],))
|
| 348 |
+
else:
|
| 349 |
+
# "matrix"
|
| 350 |
+
assert_equal(x1.shape, x.shape)
|
| 351 |
+
assert_equal(x2.shape, x.shape)
|
| 352 |
+
|
| 353 |
+
A = csc_matrix((3, 3))
|
| 354 |
+
b = csc_matrix((1, 3))
|
| 355 |
+
assert_raises(ValueError, spsolve, A, b)
|
| 356 |
+
|
| 357 |
+
@sup_sparse_efficiency
|
| 358 |
+
def test_ndarray_support(self):
|
| 359 |
+
A = array([[1., 2.], [2., 0.]])
|
| 360 |
+
x = array([[1., 1.], [0.5, -0.5]])
|
| 361 |
+
b = array([[2., 0.], [2., 2.]])
|
| 362 |
+
|
| 363 |
+
assert_array_almost_equal(x, spsolve(A, b))
|
| 364 |
+
|
| 365 |
+
def test_gssv_badinput(self):
|
| 366 |
+
N = 10
|
| 367 |
+
d = arange(N) + 1.0
|
| 368 |
+
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N)
|
| 369 |
+
|
| 370 |
+
for spmatrix in (csc_matrix, csr_matrix):
|
| 371 |
+
A = spmatrix(A)
|
| 372 |
+
b = np.arange(N)
|
| 373 |
+
|
| 374 |
+
def not_c_contig(x):
|
| 375 |
+
return x.repeat(2)[::2]
|
| 376 |
+
|
| 377 |
+
def not_1dim(x):
|
| 378 |
+
return x[:,None]
|
| 379 |
+
|
| 380 |
+
def bad_type(x):
|
| 381 |
+
return x.astype(bool)
|
| 382 |
+
|
| 383 |
+
def too_short(x):
|
| 384 |
+
return x[:-1]
|
| 385 |
+
|
| 386 |
+
badops = [not_c_contig, not_1dim, bad_type, too_short]
|
| 387 |
+
|
| 388 |
+
for badop in badops:
|
| 389 |
+
msg = f"{spmatrix!r} {badop!r}"
|
| 390 |
+
# Not C-contiguous
|
| 391 |
+
assert_raises((ValueError, TypeError), _superlu.gssv,
|
| 392 |
+
N, A.nnz, badop(A.data), A.indices, A.indptr,
|
| 393 |
+
b, int(spmatrix == csc_matrix), err_msg=msg)
|
| 394 |
+
assert_raises((ValueError, TypeError), _superlu.gssv,
|
| 395 |
+
N, A.nnz, A.data, badop(A.indices), A.indptr,
|
| 396 |
+
b, int(spmatrix == csc_matrix), err_msg=msg)
|
| 397 |
+
assert_raises((ValueError, TypeError), _superlu.gssv,
|
| 398 |
+
N, A.nnz, A.data, A.indices, badop(A.indptr),
|
| 399 |
+
b, int(spmatrix == csc_matrix), err_msg=msg)
|
| 400 |
+
|
| 401 |
+
def test_sparsity_preservation(self):
|
| 402 |
+
ident = csc_matrix([
|
| 403 |
+
[1, 0, 0],
|
| 404 |
+
[0, 1, 0],
|
| 405 |
+
[0, 0, 1]])
|
| 406 |
+
b = csc_matrix([
|
| 407 |
+
[0, 1],
|
| 408 |
+
[1, 0],
|
| 409 |
+
[0, 0]])
|
| 410 |
+
x = spsolve(ident, b)
|
| 411 |
+
assert_equal(ident.nnz, 3)
|
| 412 |
+
assert_equal(b.nnz, 2)
|
| 413 |
+
assert_equal(x.nnz, 2)
|
| 414 |
+
assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12)
|
| 415 |
+
|
| 416 |
+
def test_dtype_cast(self):
|
| 417 |
+
A_real = scipy.sparse.csr_matrix([[1, 2, 0],
|
| 418 |
+
[0, 0, 3],
|
| 419 |
+
[4, 0, 5]])
|
| 420 |
+
A_complex = scipy.sparse.csr_matrix([[1, 2, 0],
|
| 421 |
+
[0, 0, 3],
|
| 422 |
+
[4, 0, 5 + 1j]])
|
| 423 |
+
b_real = np.array([1,1,1])
|
| 424 |
+
b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])
|
| 425 |
+
x = spsolve(A_real, b_real)
|
| 426 |
+
assert_(np.issubdtype(x.dtype, np.floating))
|
| 427 |
+
x = spsolve(A_real, b_complex)
|
| 428 |
+
assert_(np.issubdtype(x.dtype, np.complexfloating))
|
| 429 |
+
x = spsolve(A_complex, b_real)
|
| 430 |
+
assert_(np.issubdtype(x.dtype, np.complexfloating))
|
| 431 |
+
x = spsolve(A_complex, b_complex)
|
| 432 |
+
assert_(np.issubdtype(x.dtype, np.complexfloating))
|
| 433 |
+
|
| 434 |
+
@pytest.mark.slow
|
| 435 |
+
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
|
| 436 |
+
def test_bug_8278(self):
|
| 437 |
+
check_free_memory(8000)
|
| 438 |
+
use_solver(useUmfpack=True)
|
| 439 |
+
A, b = setup_bug_8278()
|
| 440 |
+
x = spsolve(A, b)
|
| 441 |
+
assert_array_almost_equal(A @ x, b)
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
class TestSplu:
|
| 445 |
+
def setup_method(self):
|
| 446 |
+
use_solver(useUmfpack=False)
|
| 447 |
+
n = 40
|
| 448 |
+
d = arange(n) + 1
|
| 449 |
+
self.n = n
|
| 450 |
+
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n, format='csc')
|
| 451 |
+
random.seed(1234)
|
| 452 |
+
|
| 453 |
+
def _smoketest(self, spxlu, check, dtype, idx_dtype):
|
| 454 |
+
if np.issubdtype(dtype, np.complexfloating):
|
| 455 |
+
A = self.A + 1j*self.A.T
|
| 456 |
+
else:
|
| 457 |
+
A = self.A
|
| 458 |
+
|
| 459 |
+
A = A.astype(dtype)
|
| 460 |
+
A.indices = A.indices.astype(idx_dtype, copy=False)
|
| 461 |
+
A.indptr = A.indptr.astype(idx_dtype, copy=False)
|
| 462 |
+
lu = spxlu(A)
|
| 463 |
+
|
| 464 |
+
rng = random.RandomState(1234)
|
| 465 |
+
|
| 466 |
+
# Input shapes
|
| 467 |
+
for k in [None, 1, 2, self.n, self.n+2]:
|
| 468 |
+
msg = f"k={k!r}"
|
| 469 |
+
|
| 470 |
+
if k is None:
|
| 471 |
+
b = rng.rand(self.n)
|
| 472 |
+
else:
|
| 473 |
+
b = rng.rand(self.n, k)
|
| 474 |
+
|
| 475 |
+
if np.issubdtype(dtype, np.complexfloating):
|
| 476 |
+
b = b + 1j*rng.rand(*b.shape)
|
| 477 |
+
b = b.astype(dtype)
|
| 478 |
+
|
| 479 |
+
x = lu.solve(b)
|
| 480 |
+
check(A, b, x, msg)
|
| 481 |
+
|
| 482 |
+
x = lu.solve(b, 'T')
|
| 483 |
+
check(A.T, b, x, msg)
|
| 484 |
+
|
| 485 |
+
x = lu.solve(b, 'H')
|
| 486 |
+
check(A.T.conj(), b, x, msg)
|
| 487 |
+
|
| 488 |
+
@sup_sparse_efficiency
|
| 489 |
+
def test_splu_smoketest(self):
|
| 490 |
+
self._internal_test_splu_smoketest()
|
| 491 |
+
|
| 492 |
+
def _internal_test_splu_smoketest(self):
|
| 493 |
+
# Check that splu works at all
|
| 494 |
+
def check(A, b, x, msg=""):
|
| 495 |
+
eps = np.finfo(A.dtype).eps
|
| 496 |
+
r = A @ x
|
| 497 |
+
assert_(abs(r - b).max() < 1e3*eps, msg)
|
| 498 |
+
|
| 499 |
+
for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
|
| 500 |
+
for idx_dtype in [np.int32, np.int64]:
|
| 501 |
+
self._smoketest(splu, check, dtype, idx_dtype)
|
| 502 |
+
|
| 503 |
+
@sup_sparse_efficiency
|
| 504 |
+
def test_spilu_smoketest(self):
|
| 505 |
+
self._internal_test_spilu_smoketest()
|
| 506 |
+
|
| 507 |
+
def _internal_test_spilu_smoketest(self):
|
| 508 |
+
errors = []
|
| 509 |
+
|
| 510 |
+
def check(A, b, x, msg=""):
|
| 511 |
+
r = A @ x
|
| 512 |
+
err = abs(r - b).max()
|
| 513 |
+
assert_(err < 1e-2, msg)
|
| 514 |
+
if b.dtype in (np.float64, np.complex128):
|
| 515 |
+
errors.append(err)
|
| 516 |
+
|
| 517 |
+
for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
|
| 518 |
+
for idx_dtype in [np.int32, np.int64]:
|
| 519 |
+
self._smoketest(spilu, check, dtype, idx_dtype)
|
| 520 |
+
|
| 521 |
+
assert_(max(errors) > 1e-5)
|
| 522 |
+
|
| 523 |
+
@sup_sparse_efficiency
|
| 524 |
+
def test_spilu_drop_rule(self):
|
| 525 |
+
# Test passing in the drop_rule argument to spilu.
|
| 526 |
+
A = identity(2)
|
| 527 |
+
|
| 528 |
+
rules = [
|
| 529 |
+
b'basic,area'.decode('ascii'), # unicode
|
| 530 |
+
b'basic,area', # ascii
|
| 531 |
+
[b'basic', b'area'.decode('ascii')]
|
| 532 |
+
]
|
| 533 |
+
for rule in rules:
|
| 534 |
+
# Argument should be accepted
|
| 535 |
+
assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))
|
| 536 |
+
|
| 537 |
+
def test_splu_nnz0(self):
|
| 538 |
+
A = csc_matrix((5,5), dtype='d')
|
| 539 |
+
assert_raises(RuntimeError, splu, A)
|
| 540 |
+
|
| 541 |
+
def test_spilu_nnz0(self):
|
| 542 |
+
A = csc_matrix((5,5), dtype='d')
|
| 543 |
+
assert_raises(RuntimeError, spilu, A)
|
| 544 |
+
|
| 545 |
+
def test_splu_basic(self):
|
| 546 |
+
# Test basic splu functionality.
|
| 547 |
+
n = 30
|
| 548 |
+
rng = random.RandomState(12)
|
| 549 |
+
a = rng.rand(n, n)
|
| 550 |
+
a[a < 0.95] = 0
|
| 551 |
+
# First test with a singular matrix
|
| 552 |
+
a[:, 0] = 0
|
| 553 |
+
a_ = csc_matrix(a)
|
| 554 |
+
# Matrix is exactly singular
|
| 555 |
+
assert_raises(RuntimeError, splu, a_)
|
| 556 |
+
|
| 557 |
+
# Make a diagonal dominant, to make sure it is not singular
|
| 558 |
+
a += 4*eye(n)
|
| 559 |
+
a_ = csc_matrix(a)
|
| 560 |
+
lu = splu(a_)
|
| 561 |
+
b = ones(n)
|
| 562 |
+
x = lu.solve(b)
|
| 563 |
+
assert_almost_equal(dot(a, x), b)
|
| 564 |
+
|
| 565 |
+
def test_splu_perm(self):
|
| 566 |
+
# Test the permutation vectors exposed by splu.
|
| 567 |
+
n = 30
|
| 568 |
+
a = random.random((n, n))
|
| 569 |
+
a[a < 0.95] = 0
|
| 570 |
+
# Make a diagonal dominant, to make sure it is not singular
|
| 571 |
+
a += 4*eye(n)
|
| 572 |
+
a_ = csc_matrix(a)
|
| 573 |
+
lu = splu(a_)
|
| 574 |
+
# Check that the permutation indices do belong to [0, n-1].
|
| 575 |
+
for perm in (lu.perm_r, lu.perm_c):
|
| 576 |
+
assert_(all(perm > -1))
|
| 577 |
+
assert_(all(perm < n))
|
| 578 |
+
assert_equal(len(unique(perm)), len(perm))
|
| 579 |
+
|
| 580 |
+
# Now make a symmetric, and test that the two permutation vectors are
|
| 581 |
+
# the same
|
| 582 |
+
# Note: a += a.T relies on undefined behavior.
|
| 583 |
+
a = a + a.T
|
| 584 |
+
a_ = csc_matrix(a)
|
| 585 |
+
lu = splu(a_)
|
| 586 |
+
assert_array_equal(lu.perm_r, lu.perm_c)
|
| 587 |
+
|
| 588 |
+
@pytest.mark.parametrize("splu_fun, rtol", [(splu, 1e-7), (spilu, 1e-1)])
|
| 589 |
+
def test_natural_permc(self, splu_fun, rtol):
|
| 590 |
+
# Test that the "NATURAL" permc_spec does not permute the matrix
|
| 591 |
+
np.random.seed(42)
|
| 592 |
+
n = 500
|
| 593 |
+
p = 0.01
|
| 594 |
+
A = scipy.sparse.random(n, n, p)
|
| 595 |
+
x = np.random.rand(n)
|
| 596 |
+
# Make A diagonal dominant to make sure it is not singular
|
| 597 |
+
A += (n+1)*scipy.sparse.identity(n)
|
| 598 |
+
A_ = csc_matrix(A)
|
| 599 |
+
b = A_ @ x
|
| 600 |
+
|
| 601 |
+
# without permc_spec, permutation is not identity
|
| 602 |
+
lu = splu_fun(A_)
|
| 603 |
+
assert_(np.any(lu.perm_c != np.arange(n)))
|
| 604 |
+
|
| 605 |
+
# with permc_spec="NATURAL", permutation is identity
|
| 606 |
+
lu = splu_fun(A_, permc_spec="NATURAL")
|
| 607 |
+
assert_array_equal(lu.perm_c, np.arange(n))
|
| 608 |
+
|
| 609 |
+
# Also, lu decomposition is valid
|
| 610 |
+
x2 = lu.solve(b)
|
| 611 |
+
assert_allclose(x, x2, rtol=rtol)
|
| 612 |
+
|
| 613 |
+
@pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount")
|
| 614 |
+
def test_lu_refcount(self):
|
| 615 |
+
# Test that we are keeping track of the reference count with splu.
|
| 616 |
+
n = 30
|
| 617 |
+
a = random.random((n, n))
|
| 618 |
+
a[a < 0.95] = 0
|
| 619 |
+
# Make a diagonal dominant, to make sure it is not singular
|
| 620 |
+
a += 4*eye(n)
|
| 621 |
+
a_ = csc_matrix(a)
|
| 622 |
+
lu = splu(a_)
|
| 623 |
+
|
| 624 |
+
# And now test that we don't have a refcount bug
|
| 625 |
+
rc = sys.getrefcount(lu)
|
| 626 |
+
for attr in ('perm_r', 'perm_c'):
|
| 627 |
+
perm = getattr(lu, attr)
|
| 628 |
+
assert_equal(sys.getrefcount(lu), rc + 1)
|
| 629 |
+
del perm
|
| 630 |
+
assert_equal(sys.getrefcount(lu), rc)
|
| 631 |
+
|
| 632 |
+
def test_bad_inputs(self):
|
| 633 |
+
A = self.A.tocsc()
|
| 634 |
+
|
| 635 |
+
assert_raises(ValueError, splu, A[:,:4])
|
| 636 |
+
assert_raises(ValueError, spilu, A[:,:4])
|
| 637 |
+
|
| 638 |
+
for lu in [splu(A), spilu(A)]:
|
| 639 |
+
b = random.rand(42)
|
| 640 |
+
B = random.rand(42, 3)
|
| 641 |
+
BB = random.rand(self.n, 3, 9)
|
| 642 |
+
assert_raises(ValueError, lu.solve, b)
|
| 643 |
+
assert_raises(ValueError, lu.solve, B)
|
| 644 |
+
assert_raises(ValueError, lu.solve, BB)
|
| 645 |
+
assert_raises(TypeError, lu.solve,
|
| 646 |
+
b.astype(np.complex64))
|
| 647 |
+
assert_raises(TypeError, lu.solve,
|
| 648 |
+
b.astype(np.complex128))
|
| 649 |
+
|
| 650 |
+
@sup_sparse_efficiency
|
| 651 |
+
def test_superlu_dlamch_i386_nan(self):
|
| 652 |
+
# SuperLU 4.3 calls some functions returning floats without
|
| 653 |
+
# declaring them. On i386@linux call convention, this fails to
|
| 654 |
+
# clear floating point registers after call. As a result, NaN
|
| 655 |
+
# can appear in the next floating point operation made.
|
| 656 |
+
#
|
| 657 |
+
# Here's a test case that triggered the issue.
|
| 658 |
+
n = 8
|
| 659 |
+
d = np.arange(n) + 1
|
| 660 |
+
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
|
| 661 |
+
A = A.astype(np.float32)
|
| 662 |
+
spilu(A)
|
| 663 |
+
A = A + 1j*A
|
| 664 |
+
B = A.A
|
| 665 |
+
assert_(not np.isnan(B).any())
|
| 666 |
+
|
| 667 |
+
@sup_sparse_efficiency
|
| 668 |
+
def test_lu_attr(self):
|
| 669 |
+
|
| 670 |
+
def check(dtype, complex_2=False):
|
| 671 |
+
A = self.A.astype(dtype)
|
| 672 |
+
|
| 673 |
+
if complex_2:
|
| 674 |
+
A = A + 1j*A.T
|
| 675 |
+
|
| 676 |
+
n = A.shape[0]
|
| 677 |
+
lu = splu(A)
|
| 678 |
+
|
| 679 |
+
# Check that the decomposition is as advertised
|
| 680 |
+
|
| 681 |
+
Pc = np.zeros((n, n))
|
| 682 |
+
Pc[np.arange(n), lu.perm_c] = 1
|
| 683 |
+
|
| 684 |
+
Pr = np.zeros((n, n))
|
| 685 |
+
Pr[lu.perm_r, np.arange(n)] = 1
|
| 686 |
+
|
| 687 |
+
Ad = A.toarray()
|
| 688 |
+
lhs = Pr.dot(Ad).dot(Pc)
|
| 689 |
+
rhs = (lu.L @ lu.U).toarray()
|
| 690 |
+
|
| 691 |
+
eps = np.finfo(dtype).eps
|
| 692 |
+
|
| 693 |
+
assert_allclose(lhs, rhs, atol=100*eps)
|
| 694 |
+
|
| 695 |
+
check(np.float32)
|
| 696 |
+
check(np.float64)
|
| 697 |
+
check(np.complex64)
|
| 698 |
+
check(np.complex128)
|
| 699 |
+
check(np.complex64, True)
|
| 700 |
+
check(np.complex128, True)
|
| 701 |
+
|
| 702 |
+
@pytest.mark.slow
|
| 703 |
+
@sup_sparse_efficiency
|
| 704 |
+
def test_threads_parallel(self):
|
| 705 |
+
oks = []
|
| 706 |
+
|
| 707 |
+
def worker():
|
| 708 |
+
try:
|
| 709 |
+
self.test_splu_basic()
|
| 710 |
+
self._internal_test_splu_smoketest()
|
| 711 |
+
self._internal_test_spilu_smoketest()
|
| 712 |
+
oks.append(True)
|
| 713 |
+
except Exception:
|
| 714 |
+
pass
|
| 715 |
+
|
| 716 |
+
threads = [threading.Thread(target=worker)
|
| 717 |
+
for k in range(20)]
|
| 718 |
+
for t in threads:
|
| 719 |
+
t.start()
|
| 720 |
+
for t in threads:
|
| 721 |
+
t.join()
|
| 722 |
+
|
| 723 |
+
assert_equal(len(oks), 20)
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
class TestSpsolveTriangular:
|
| 727 |
+
def setup_method(self):
|
| 728 |
+
use_solver(useUmfpack=False)
|
| 729 |
+
|
| 730 |
+
def test_zero_diagonal(self):
|
| 731 |
+
n = 5
|
| 732 |
+
rng = np.random.default_rng(43876432987)
|
| 733 |
+
A = rng.standard_normal((n, n))
|
| 734 |
+
b = np.arange(n)
|
| 735 |
+
A = scipy.sparse.tril(A, k=0, format='csr')
|
| 736 |
+
|
| 737 |
+
x = spsolve_triangular(A, b, unit_diagonal=True, lower=True)
|
| 738 |
+
|
| 739 |
+
A.setdiag(1)
|
| 740 |
+
assert_allclose(A.dot(x), b)
|
| 741 |
+
|
| 742 |
+
# Regression test from gh-15199
|
| 743 |
+
A = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]], dtype=np.float64)
|
| 744 |
+
b = np.array([1., 2., 3.])
|
| 745 |
+
with suppress_warnings() as sup:
|
| 746 |
+
sup.filter(SparseEfficiencyWarning, "CSR matrix format is")
|
| 747 |
+
spsolve_triangular(A, b, unit_diagonal=True)
|
| 748 |
+
|
| 749 |
+
def test_singular(self):
|
| 750 |
+
n = 5
|
| 751 |
+
A = csr_matrix((n, n))
|
| 752 |
+
b = np.arange(n)
|
| 753 |
+
for lower in (True, False):
|
| 754 |
+
assert_raises(scipy.linalg.LinAlgError,
|
| 755 |
+
spsolve_triangular, A, b, lower=lower)
|
| 756 |
+
|
| 757 |
+
@sup_sparse_efficiency
|
| 758 |
+
def test_bad_shape(self):
|
| 759 |
+
# A is not square.
|
| 760 |
+
A = np.zeros((3, 4))
|
| 761 |
+
b = ones((4, 1))
|
| 762 |
+
assert_raises(ValueError, spsolve_triangular, A, b)
|
| 763 |
+
# A2 and b2 have incompatible shapes.
|
| 764 |
+
A2 = csr_matrix(eye(3))
|
| 765 |
+
b2 = array([1.0, 2.0])
|
| 766 |
+
assert_raises(ValueError, spsolve_triangular, A2, b2)
|
| 767 |
+
|
| 768 |
+
@sup_sparse_efficiency
|
| 769 |
+
def test_input_types(self):
|
| 770 |
+
A = array([[1., 0.], [1., 2.]])
|
| 771 |
+
b = array([[2., 0.], [2., 2.]])
|
| 772 |
+
for matrix_type in (array, csc_matrix, csr_matrix):
|
| 773 |
+
x = spsolve_triangular(matrix_type(A), b, lower=True)
|
| 774 |
+
assert_array_almost_equal(A.dot(x), b)
|
| 775 |
+
|
| 776 |
+
@pytest.mark.slow
|
| 777 |
+
@pytest.mark.timeout(120) # prerelease_deps_coverage_64bit_blas job
|
| 778 |
+
@sup_sparse_efficiency
|
| 779 |
+
def test_random(self):
|
| 780 |
+
def random_triangle_matrix(n, lower=True):
|
| 781 |
+
A = scipy.sparse.random(n, n, density=0.1, format='coo')
|
| 782 |
+
if lower:
|
| 783 |
+
A = scipy.sparse.tril(A)
|
| 784 |
+
else:
|
| 785 |
+
A = scipy.sparse.triu(A)
|
| 786 |
+
A = A.tocsr(copy=False)
|
| 787 |
+
for i in range(n):
|
| 788 |
+
A[i, i] = np.random.rand() + 1
|
| 789 |
+
return A
|
| 790 |
+
|
| 791 |
+
np.random.seed(1234)
|
| 792 |
+
for lower in (True, False):
|
| 793 |
+
for n in (10, 10**2, 10**3):
|
| 794 |
+
A = random_triangle_matrix(n, lower=lower)
|
| 795 |
+
for m in (1, 10):
|
| 796 |
+
for b in (np.random.rand(n, m),
|
| 797 |
+
np.random.randint(-9, 9, (n, m)),
|
| 798 |
+
np.random.randint(-9, 9, (n, m)) +
|
| 799 |
+
np.random.randint(-9, 9, (n, m)) * 1j):
|
| 800 |
+
x = spsolve_triangular(A, b, lower=lower)
|
| 801 |
+
assert_array_almost_equal(A.dot(x), b)
|
| 802 |
+
x = spsolve_triangular(A, b, lower=lower,
|
| 803 |
+
unit_diagonal=True)
|
| 804 |
+
A.setdiag(1)
|
| 805 |
+
assert_array_almost_equal(A.dot(x), b)
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/__init__.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Sparse Eigenvalue Solvers
|
| 3 |
+
-------------------------
|
| 4 |
+
|
| 5 |
+
The submodules of sparse.linalg._eigen:
|
| 6 |
+
1. lobpcg: Locally Optimal Block Preconditioned Conjugate Gradient Method
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
from .arpack import *
|
| 10 |
+
from .lobpcg import *
|
| 11 |
+
from ._svds import svds
|
| 12 |
+
|
| 13 |
+
from . import arpack
|
| 14 |
+
|
| 15 |
+
__all__ = [
|
| 16 |
+
'ArpackError', 'ArpackNoConvergence',
|
| 17 |
+
'eigs', 'eigsh', 'lobpcg', 'svds'
|
| 18 |
+
]
|
| 19 |
+
|
| 20 |
+
from scipy._lib._testutils import PytestTester
|
| 21 |
+
test = PytestTester(__name__)
|
| 22 |
+
del PytestTester
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (657 Bytes). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/__pycache__/_svds.cpython-39.pyc
ADDED
|
Binary file (17.3 kB). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/_svds.py
ADDED
|
@@ -0,0 +1,545 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from .arpack import _arpack # type: ignore[attr-defined]
|
| 4 |
+
from . import eigsh
|
| 5 |
+
|
| 6 |
+
from scipy._lib._util import check_random_state
|
| 7 |
+
from scipy.sparse.linalg._interface import LinearOperator, aslinearoperator
|
| 8 |
+
from scipy.sparse.linalg._eigen.lobpcg import lobpcg # type: ignore[no-redef]
|
| 9 |
+
from scipy.sparse.linalg._svdp import _svdp
|
| 10 |
+
from scipy.linalg import svd
|
| 11 |
+
|
| 12 |
+
arpack_int = _arpack.timing.nbx.dtype
|
| 13 |
+
__all__ = ['svds']
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _herm(x):
|
| 17 |
+
return x.T.conj()
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _iv(A, k, ncv, tol, which, v0, maxiter,
|
| 21 |
+
return_singular, solver, random_state):
|
| 22 |
+
|
| 23 |
+
# input validation/standardization for `solver`
|
| 24 |
+
# out of order because it's needed for other parameters
|
| 25 |
+
solver = str(solver).lower()
|
| 26 |
+
solvers = {"arpack", "lobpcg", "propack"}
|
| 27 |
+
if solver not in solvers:
|
| 28 |
+
raise ValueError(f"solver must be one of {solvers}.")
|
| 29 |
+
|
| 30 |
+
# input validation/standardization for `A`
|
| 31 |
+
A = aslinearoperator(A) # this takes care of some input validation
|
| 32 |
+
if not (np.issubdtype(A.dtype, np.complexfloating)
|
| 33 |
+
or np.issubdtype(A.dtype, np.floating)):
|
| 34 |
+
message = "`A` must be of floating or complex floating data type."
|
| 35 |
+
raise ValueError(message)
|
| 36 |
+
if np.prod(A.shape) == 0:
|
| 37 |
+
message = "`A` must not be empty."
|
| 38 |
+
raise ValueError(message)
|
| 39 |
+
|
| 40 |
+
# input validation/standardization for `k`
|
| 41 |
+
kmax = min(A.shape) if solver == 'propack' else min(A.shape) - 1
|
| 42 |
+
if int(k) != k or not (0 < k <= kmax):
|
| 43 |
+
message = "`k` must be an integer satisfying `0 < k < min(A.shape)`."
|
| 44 |
+
raise ValueError(message)
|
| 45 |
+
k = int(k)
|
| 46 |
+
|
| 47 |
+
# input validation/standardization for `ncv`
|
| 48 |
+
if solver == "arpack" and ncv is not None:
|
| 49 |
+
if int(ncv) != ncv or not (k < ncv < min(A.shape)):
|
| 50 |
+
message = ("`ncv` must be an integer satisfying "
|
| 51 |
+
"`k < ncv < min(A.shape)`.")
|
| 52 |
+
raise ValueError(message)
|
| 53 |
+
ncv = int(ncv)
|
| 54 |
+
|
| 55 |
+
# input validation/standardization for `tol`
|
| 56 |
+
if tol < 0 or not np.isfinite(tol):
|
| 57 |
+
message = "`tol` must be a non-negative floating point value."
|
| 58 |
+
raise ValueError(message)
|
| 59 |
+
tol = float(tol)
|
| 60 |
+
|
| 61 |
+
# input validation/standardization for `which`
|
| 62 |
+
which = str(which).upper()
|
| 63 |
+
whichs = {'LM', 'SM'}
|
| 64 |
+
if which not in whichs:
|
| 65 |
+
raise ValueError(f"`which` must be in {whichs}.")
|
| 66 |
+
|
| 67 |
+
# input validation/standardization for `v0`
|
| 68 |
+
if v0 is not None:
|
| 69 |
+
v0 = np.atleast_1d(v0)
|
| 70 |
+
if not (np.issubdtype(v0.dtype, np.complexfloating)
|
| 71 |
+
or np.issubdtype(v0.dtype, np.floating)):
|
| 72 |
+
message = ("`v0` must be of floating or complex floating "
|
| 73 |
+
"data type.")
|
| 74 |
+
raise ValueError(message)
|
| 75 |
+
|
| 76 |
+
shape = (A.shape[0],) if solver == 'propack' else (min(A.shape),)
|
| 77 |
+
if v0.shape != shape:
|
| 78 |
+
message = f"`v0` must have shape {shape}."
|
| 79 |
+
raise ValueError(message)
|
| 80 |
+
|
| 81 |
+
# input validation/standardization for `maxiter`
|
| 82 |
+
if maxiter is not None and (int(maxiter) != maxiter or maxiter <= 0):
|
| 83 |
+
message = "`maxiter` must be a positive integer."
|
| 84 |
+
raise ValueError(message)
|
| 85 |
+
maxiter = int(maxiter) if maxiter is not None else maxiter
|
| 86 |
+
|
| 87 |
+
# input validation/standardization for `return_singular_vectors`
|
| 88 |
+
# not going to be flexible with this; too complicated for little gain
|
| 89 |
+
rs_options = {True, False, "vh", "u"}
|
| 90 |
+
if return_singular not in rs_options:
|
| 91 |
+
raise ValueError(f"`return_singular_vectors` must be in {rs_options}.")
|
| 92 |
+
|
| 93 |
+
random_state = check_random_state(random_state)
|
| 94 |
+
|
| 95 |
+
return (A, k, ncv, tol, which, v0, maxiter,
|
| 96 |
+
return_singular, solver, random_state)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None,
|
| 100 |
+
maxiter=None, return_singular_vectors=True,
|
| 101 |
+
solver='arpack', random_state=None, options=None):
|
| 102 |
+
"""
|
| 103 |
+
Partial singular value decomposition of a sparse matrix.
|
| 104 |
+
|
| 105 |
+
Compute the largest or smallest `k` singular values and corresponding
|
| 106 |
+
singular vectors of a sparse matrix `A`. The order in which the singular
|
| 107 |
+
values are returned is not guaranteed.
|
| 108 |
+
|
| 109 |
+
In the descriptions below, let ``M, N = A.shape``.
|
| 110 |
+
|
| 111 |
+
Parameters
|
| 112 |
+
----------
|
| 113 |
+
A : ndarray, sparse matrix, or LinearOperator
|
| 114 |
+
Matrix to decompose of a floating point numeric dtype.
|
| 115 |
+
k : int, default: 6
|
| 116 |
+
Number of singular values and singular vectors to compute.
|
| 117 |
+
Must satisfy ``1 <= k <= kmax``, where ``kmax=min(M, N)`` for
|
| 118 |
+
``solver='propack'`` and ``kmax=min(M, N) - 1`` otherwise.
|
| 119 |
+
ncv : int, optional
|
| 120 |
+
When ``solver='arpack'``, this is the number of Lanczos vectors
|
| 121 |
+
generated. See :ref:`'arpack' <sparse.linalg.svds-arpack>` for details.
|
| 122 |
+
When ``solver='lobpcg'`` or ``solver='propack'``, this parameter is
|
| 123 |
+
ignored.
|
| 124 |
+
tol : float, optional
|
| 125 |
+
Tolerance for singular values. Zero (default) means machine precision.
|
| 126 |
+
which : {'LM', 'SM'}
|
| 127 |
+
Which `k` singular values to find: either the largest magnitude ('LM')
|
| 128 |
+
or smallest magnitude ('SM') singular values.
|
| 129 |
+
v0 : ndarray, optional
|
| 130 |
+
The starting vector for iteration; see method-specific
|
| 131 |
+
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`,
|
| 132 |
+
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or
|
| 133 |
+
:ref:`'propack' <sparse.linalg.svds-propack>` for details.
|
| 134 |
+
maxiter : int, optional
|
| 135 |
+
Maximum number of iterations; see method-specific
|
| 136 |
+
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`,
|
| 137 |
+
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or
|
| 138 |
+
:ref:`'propack' <sparse.linalg.svds-propack>` for details.
|
| 139 |
+
return_singular_vectors : {True, False, "u", "vh"}
|
| 140 |
+
Singular values are always computed and returned; this parameter
|
| 141 |
+
controls the computation and return of singular vectors.
|
| 142 |
+
|
| 143 |
+
- ``True``: return singular vectors.
|
| 144 |
+
- ``False``: do not return singular vectors.
|
| 145 |
+
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
|
| 146 |
+
return ``None`` for the right singular vectors. Otherwise, compute
|
| 147 |
+
all singular vectors.
|
| 148 |
+
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
|
| 149 |
+
return ``None`` for the left singular vectors. Otherwise, compute
|
| 150 |
+
all singular vectors.
|
| 151 |
+
|
| 152 |
+
If ``solver='propack'``, the option is respected regardless of the
|
| 153 |
+
matrix shape.
|
| 154 |
+
|
| 155 |
+
solver : {'arpack', 'propack', 'lobpcg'}, optional
|
| 156 |
+
The solver used.
|
| 157 |
+
:ref:`'arpack' <sparse.linalg.svds-arpack>`,
|
| 158 |
+
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`, and
|
| 159 |
+
:ref:`'propack' <sparse.linalg.svds-propack>` are supported.
|
| 160 |
+
Default: `'arpack'`.
|
| 161 |
+
random_state : {None, int, `numpy.random.Generator`,
|
| 162 |
+
`numpy.random.RandomState`}, optional
|
| 163 |
+
|
| 164 |
+
Pseudorandom number generator state used to generate resamples.
|
| 165 |
+
|
| 166 |
+
If `random_state` is ``None`` (or `np.random`), the
|
| 167 |
+
`numpy.random.RandomState` singleton is used.
|
| 168 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
| 169 |
+
seeded with `random_state`.
|
| 170 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
| 171 |
+
instance then that instance is used.
|
| 172 |
+
options : dict, optional
|
| 173 |
+
A dictionary of solver-specific options. No solver-specific options
|
| 174 |
+
are currently supported; this parameter is reserved for future use.
|
| 175 |
+
|
| 176 |
+
Returns
|
| 177 |
+
-------
|
| 178 |
+
u : ndarray, shape=(M, k)
|
| 179 |
+
Unitary matrix having left singular vectors as columns.
|
| 180 |
+
s : ndarray, shape=(k,)
|
| 181 |
+
The singular values.
|
| 182 |
+
vh : ndarray, shape=(k, N)
|
| 183 |
+
Unitary matrix having right singular vectors as rows.
|
| 184 |
+
|
| 185 |
+
Notes
|
| 186 |
+
-----
|
| 187 |
+
This is a naive implementation using ARPACK or LOBPCG as an eigensolver
|
| 188 |
+
on the matrix ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on
|
| 189 |
+
which one is smaller size, followed by the Rayleigh-Ritz method
|
| 190 |
+
as postprocessing; see
|
| 191 |
+
Using the normal matrix, in Rayleigh-Ritz method, (2022, Nov. 19),
|
| 192 |
+
Wikipedia, https://w.wiki/4zms.
|
| 193 |
+
|
| 194 |
+
Alternatively, the PROPACK solver can be called.
|
| 195 |
+
|
| 196 |
+
Choices of the input matrix `A` numeric dtype may be limited.
|
| 197 |
+
Only ``solver="lobpcg"`` supports all floating point dtypes
|
| 198 |
+
real: 'np.float32', 'np.float64', 'np.longdouble' and
|
| 199 |
+
complex: 'np.complex64', 'np.complex128', 'np.clongdouble'.
|
| 200 |
+
The ``solver="arpack"`` supports only
|
| 201 |
+
'np.float32', 'np.float64', and 'np.complex128'.
|
| 202 |
+
|
| 203 |
+
Examples
|
| 204 |
+
--------
|
| 205 |
+
Construct a matrix `A` from singular values and vectors.
|
| 206 |
+
|
| 207 |
+
>>> import numpy as np
|
| 208 |
+
>>> from scipy import sparse, linalg, stats
|
| 209 |
+
>>> from scipy.sparse.linalg import svds, aslinearoperator, LinearOperator
|
| 210 |
+
|
| 211 |
+
Construct a dense matrix `A` from singular values and vectors.
|
| 212 |
+
|
| 213 |
+
>>> rng = np.random.default_rng(258265244568965474821194062361901728911)
|
| 214 |
+
>>> orthogonal = stats.ortho_group.rvs(10, random_state=rng)
|
| 215 |
+
>>> s = [1e-3, 1, 2, 3, 4] # non-zero singular values
|
| 216 |
+
>>> u = orthogonal[:, :5] # left singular vectors
|
| 217 |
+
>>> vT = orthogonal[:, 5:].T # right singular vectors
|
| 218 |
+
>>> A = u @ np.diag(s) @ vT
|
| 219 |
+
|
| 220 |
+
With only four singular values/vectors, the SVD approximates the original
|
| 221 |
+
matrix.
|
| 222 |
+
|
| 223 |
+
>>> u4, s4, vT4 = svds(A, k=4)
|
| 224 |
+
>>> A4 = u4 @ np.diag(s4) @ vT4
|
| 225 |
+
>>> np.allclose(A4, A, atol=1e-3)
|
| 226 |
+
True
|
| 227 |
+
|
| 228 |
+
With all five non-zero singular values/vectors, we can reproduce
|
| 229 |
+
the original matrix more accurately.
|
| 230 |
+
|
| 231 |
+
>>> u5, s5, vT5 = svds(A, k=5)
|
| 232 |
+
>>> A5 = u5 @ np.diag(s5) @ vT5
|
| 233 |
+
>>> np.allclose(A5, A)
|
| 234 |
+
True
|
| 235 |
+
|
| 236 |
+
The singular values match the expected singular values.
|
| 237 |
+
|
| 238 |
+
>>> np.allclose(s5, s)
|
| 239 |
+
True
|
| 240 |
+
|
| 241 |
+
Since the singular values are not close to each other in this example,
|
| 242 |
+
every singular vector matches as expected up to a difference in sign.
|
| 243 |
+
|
| 244 |
+
>>> (np.allclose(np.abs(u5), np.abs(u)) and
|
| 245 |
+
... np.allclose(np.abs(vT5), np.abs(vT)))
|
| 246 |
+
True
|
| 247 |
+
|
| 248 |
+
The singular vectors are also orthogonal.
|
| 249 |
+
|
| 250 |
+
>>> (np.allclose(u5.T @ u5, np.eye(5)) and
|
| 251 |
+
... np.allclose(vT5 @ vT5.T, np.eye(5)))
|
| 252 |
+
True
|
| 253 |
+
|
| 254 |
+
If there are (nearly) multiple singular values, the corresponding
|
| 255 |
+
individual singular vectors may be unstable, but the whole invariant
|
| 256 |
+
subspace containing all such singular vectors is computed accurately
|
| 257 |
+
as can be measured by angles between subspaces via 'subspace_angles'.
|
| 258 |
+
|
| 259 |
+
>>> rng = np.random.default_rng(178686584221410808734965903901790843963)
|
| 260 |
+
>>> s = [1, 1 + 1e-6] # non-zero singular values
|
| 261 |
+
>>> u, _ = np.linalg.qr(rng.standard_normal((99, 2)))
|
| 262 |
+
>>> v, _ = np.linalg.qr(rng.standard_normal((99, 2)))
|
| 263 |
+
>>> vT = v.T
|
| 264 |
+
>>> A = u @ np.diag(s) @ vT
|
| 265 |
+
>>> A = A.astype(np.float32)
|
| 266 |
+
>>> u2, s2, vT2 = svds(A, k=2, random_state=rng)
|
| 267 |
+
>>> np.allclose(s2, s)
|
| 268 |
+
True
|
| 269 |
+
|
| 270 |
+
The angles between the individual exact and computed singular vectors
|
| 271 |
+
may not be so small. To check use:
|
| 272 |
+
|
| 273 |
+
>>> (linalg.subspace_angles(u2[:, :1], u[:, :1]) +
|
| 274 |
+
... linalg.subspace_angles(u2[:, 1:], u[:, 1:]))
|
| 275 |
+
array([0.06562513]) # may vary
|
| 276 |
+
>>> (linalg.subspace_angles(vT2[:1, :].T, vT[:1, :].T) +
|
| 277 |
+
... linalg.subspace_angles(vT2[1:, :].T, vT[1:, :].T))
|
| 278 |
+
array([0.06562507]) # may vary
|
| 279 |
+
|
| 280 |
+
As opposed to the angles between the 2-dimensional invariant subspaces
|
| 281 |
+
that these vectors span, which are small for rights singular vectors
|
| 282 |
+
|
| 283 |
+
>>> linalg.subspace_angles(u2, u).sum() < 1e-6
|
| 284 |
+
True
|
| 285 |
+
|
| 286 |
+
as well as for left singular vectors.
|
| 287 |
+
|
| 288 |
+
>>> linalg.subspace_angles(vT2.T, vT.T).sum() < 1e-6
|
| 289 |
+
True
|
| 290 |
+
|
| 291 |
+
The next example follows that of 'sklearn.decomposition.TruncatedSVD'.
|
| 292 |
+
|
| 293 |
+
>>> rng = np.random.RandomState(0)
|
| 294 |
+
>>> X_dense = rng.random(size=(100, 100))
|
| 295 |
+
>>> X_dense[:, 2 * np.arange(50)] = 0
|
| 296 |
+
>>> X = sparse.csr_matrix(X_dense)
|
| 297 |
+
>>> _, singular_values, _ = svds(X, k=5, random_state=rng)
|
| 298 |
+
>>> print(singular_values)
|
| 299 |
+
[ 4.3293... 4.4491... 4.5420... 4.5987... 35.2410...]
|
| 300 |
+
|
| 301 |
+
The function can be called without the transpose of the input matrix
|
| 302 |
+
ever explicitly constructed.
|
| 303 |
+
|
| 304 |
+
>>> rng = np.random.default_rng(102524723947864966825913730119128190974)
|
| 305 |
+
>>> G = sparse.rand(8, 9, density=0.5, random_state=rng)
|
| 306 |
+
>>> Glo = aslinearoperator(G)
|
| 307 |
+
>>> _, singular_values_svds, _ = svds(Glo, k=5, random_state=rng)
|
| 308 |
+
>>> _, singular_values_svd, _ = linalg.svd(G.toarray())
|
| 309 |
+
>>> np.allclose(singular_values_svds, singular_values_svd[-4::-1])
|
| 310 |
+
True
|
| 311 |
+
|
| 312 |
+
The most memory efficient scenario is where neither
|
| 313 |
+
the original matrix, nor its transpose, is explicitly constructed.
|
| 314 |
+
Our example computes the smallest singular values and vectors
|
| 315 |
+
of 'LinearOperator' constructed from the numpy function 'np.diff' used
|
| 316 |
+
column-wise to be consistent with 'LinearOperator' operating on columns.
|
| 317 |
+
|
| 318 |
+
>>> diff0 = lambda a: np.diff(a, axis=0)
|
| 319 |
+
|
| 320 |
+
Let us create the matrix from 'diff0' to be used for validation only.
|
| 321 |
+
|
| 322 |
+
>>> n = 5 # The dimension of the space.
|
| 323 |
+
>>> M_from_diff0 = diff0(np.eye(n))
|
| 324 |
+
>>> print(M_from_diff0.astype(int))
|
| 325 |
+
[[-1 1 0 0 0]
|
| 326 |
+
[ 0 -1 1 0 0]
|
| 327 |
+
[ 0 0 -1 1 0]
|
| 328 |
+
[ 0 0 0 -1 1]]
|
| 329 |
+
|
| 330 |
+
The matrix 'M_from_diff0' is bi-diagonal and could be alternatively
|
| 331 |
+
created directly by
|
| 332 |
+
|
| 333 |
+
>>> M = - np.eye(n - 1, n, dtype=int)
|
| 334 |
+
>>> np.fill_diagonal(M[:,1:], 1)
|
| 335 |
+
>>> np.allclose(M, M_from_diff0)
|
| 336 |
+
True
|
| 337 |
+
|
| 338 |
+
Its transpose
|
| 339 |
+
|
| 340 |
+
>>> print(M.T)
|
| 341 |
+
[[-1 0 0 0]
|
| 342 |
+
[ 1 -1 0 0]
|
| 343 |
+
[ 0 1 -1 0]
|
| 344 |
+
[ 0 0 1 -1]
|
| 345 |
+
[ 0 0 0 1]]
|
| 346 |
+
|
| 347 |
+
can be viewed as the incidence matrix; see
|
| 348 |
+
Incidence matrix, (2022, Nov. 19), Wikipedia, https://w.wiki/5YXU,
|
| 349 |
+
of a linear graph with 5 vertices and 4 edges. The 5x5 normal matrix
|
| 350 |
+
``M.T @ M`` thus is
|
| 351 |
+
|
| 352 |
+
>>> print(M.T @ M)
|
| 353 |
+
[[ 1 -1 0 0 0]
|
| 354 |
+
[-1 2 -1 0 0]
|
| 355 |
+
[ 0 -1 2 -1 0]
|
| 356 |
+
[ 0 0 -1 2 -1]
|
| 357 |
+
[ 0 0 0 -1 1]]
|
| 358 |
+
|
| 359 |
+
the graph Laplacian, while the actually used in 'svds' smaller size
|
| 360 |
+
4x4 normal matrix ``M @ M.T``
|
| 361 |
+
|
| 362 |
+
>>> print(M @ M.T)
|
| 363 |
+
[[ 2 -1 0 0]
|
| 364 |
+
[-1 2 -1 0]
|
| 365 |
+
[ 0 -1 2 -1]
|
| 366 |
+
[ 0 0 -1 2]]
|
| 367 |
+
|
| 368 |
+
is the so-called edge-based Laplacian; see
|
| 369 |
+
Symmetric Laplacian via the incidence matrix, in Laplacian matrix,
|
| 370 |
+
(2022, Nov. 19), Wikipedia, https://w.wiki/5YXW.
|
| 371 |
+
|
| 372 |
+
The 'LinearOperator' setup needs the options 'rmatvec' and 'rmatmat'
|
| 373 |
+
of multiplication by the matrix transpose ``M.T``, but we want to be
|
| 374 |
+
matrix-free to save memory, so knowing how ``M.T`` looks like, we
|
| 375 |
+
manually construct the following function to be
|
| 376 |
+
used in ``rmatmat=diff0t``.
|
| 377 |
+
|
| 378 |
+
>>> def diff0t(a):
|
| 379 |
+
... if a.ndim == 1:
|
| 380 |
+
... a = a[:,np.newaxis] # Turn 1D into 2D array
|
| 381 |
+
... d = np.zeros((a.shape[0] + 1, a.shape[1]), dtype=a.dtype)
|
| 382 |
+
... d[0, :] = - a[0, :]
|
| 383 |
+
... d[1:-1, :] = a[0:-1, :] - a[1:, :]
|
| 384 |
+
... d[-1, :] = a[-1, :]
|
| 385 |
+
... return d
|
| 386 |
+
|
| 387 |
+
We check that our function 'diff0t' for the matrix transpose is valid.
|
| 388 |
+
|
| 389 |
+
>>> np.allclose(M.T, diff0t(np.eye(n-1)))
|
| 390 |
+
True
|
| 391 |
+
|
| 392 |
+
Now we setup our matrix-free 'LinearOperator' called 'diff0_func_aslo'
|
| 393 |
+
and for validation the matrix-based 'diff0_matrix_aslo'.
|
| 394 |
+
|
| 395 |
+
>>> def diff0_func_aslo_def(n):
|
| 396 |
+
... return LinearOperator(matvec=diff0,
|
| 397 |
+
... matmat=diff0,
|
| 398 |
+
... rmatvec=diff0t,
|
| 399 |
+
... rmatmat=diff0t,
|
| 400 |
+
... shape=(n - 1, n))
|
| 401 |
+
>>> diff0_func_aslo = diff0_func_aslo_def(n)
|
| 402 |
+
>>> diff0_matrix_aslo = aslinearoperator(M_from_diff0)
|
| 403 |
+
|
| 404 |
+
And validate both the matrix and its transpose in 'LinearOperator'.
|
| 405 |
+
|
| 406 |
+
>>> np.allclose(diff0_func_aslo(np.eye(n)),
|
| 407 |
+
... diff0_matrix_aslo(np.eye(n)))
|
| 408 |
+
True
|
| 409 |
+
>>> np.allclose(diff0_func_aslo.T(np.eye(n-1)),
|
| 410 |
+
... diff0_matrix_aslo.T(np.eye(n-1)))
|
| 411 |
+
True
|
| 412 |
+
|
| 413 |
+
Having the 'LinearOperator' setup validated, we run the solver.
|
| 414 |
+
|
| 415 |
+
>>> n = 100
|
| 416 |
+
>>> diff0_func_aslo = diff0_func_aslo_def(n)
|
| 417 |
+
>>> u, s, vT = svds(diff0_func_aslo, k=3, which='SM')
|
| 418 |
+
|
| 419 |
+
The singular values squared and the singular vectors are known
|
| 420 |
+
explicitly; see
|
| 421 |
+
Pure Dirichlet boundary conditions, in
|
| 422 |
+
Eigenvalues and eigenvectors of the second derivative,
|
| 423 |
+
(2022, Nov. 19), Wikipedia, https://w.wiki/5YX6,
|
| 424 |
+
since 'diff' corresponds to first
|
| 425 |
+
derivative, and its smaller size n-1 x n-1 normal matrix
|
| 426 |
+
``M @ M.T`` represent the discrete second derivative with the Dirichlet
|
| 427 |
+
boundary conditions. We use these analytic expressions for validation.
|
| 428 |
+
|
| 429 |
+
>>> se = 2. * np.sin(np.pi * np.arange(1, 4) / (2. * n))
|
| 430 |
+
>>> ue = np.sqrt(2 / n) * np.sin(np.pi * np.outer(np.arange(1, n),
|
| 431 |
+
... np.arange(1, 4)) / n)
|
| 432 |
+
>>> np.allclose(s, se, atol=1e-3)
|
| 433 |
+
True
|
| 434 |
+
>>> print(np.allclose(np.abs(u), np.abs(ue), atol=1e-6))
|
| 435 |
+
True
|
| 436 |
+
|
| 437 |
+
"""
|
| 438 |
+
args = _iv(A, k, ncv, tol, which, v0, maxiter, return_singular_vectors,
|
| 439 |
+
solver, random_state)
|
| 440 |
+
(A, k, ncv, tol, which, v0, maxiter,
|
| 441 |
+
return_singular_vectors, solver, random_state) = args
|
| 442 |
+
|
| 443 |
+
largest = (which == 'LM')
|
| 444 |
+
n, m = A.shape
|
| 445 |
+
|
| 446 |
+
if n >= m:
|
| 447 |
+
X_dot = A.matvec
|
| 448 |
+
X_matmat = A.matmat
|
| 449 |
+
XH_dot = A.rmatvec
|
| 450 |
+
XH_mat = A.rmatmat
|
| 451 |
+
transpose = False
|
| 452 |
+
else:
|
| 453 |
+
X_dot = A.rmatvec
|
| 454 |
+
X_matmat = A.rmatmat
|
| 455 |
+
XH_dot = A.matvec
|
| 456 |
+
XH_mat = A.matmat
|
| 457 |
+
transpose = True
|
| 458 |
+
|
| 459 |
+
dtype = getattr(A, 'dtype', None)
|
| 460 |
+
if dtype is None:
|
| 461 |
+
dtype = A.dot(np.zeros([m, 1])).dtype
|
| 462 |
+
|
| 463 |
+
def matvec_XH_X(x):
|
| 464 |
+
return XH_dot(X_dot(x))
|
| 465 |
+
|
| 466 |
+
def matmat_XH_X(x):
|
| 467 |
+
return XH_mat(X_matmat(x))
|
| 468 |
+
|
| 469 |
+
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype,
|
| 470 |
+
matmat=matmat_XH_X,
|
| 471 |
+
shape=(min(A.shape), min(A.shape)))
|
| 472 |
+
|
| 473 |
+
# Get a low rank approximation of the implicitly defined gramian matrix.
|
| 474 |
+
# This is not a stable way to approach the problem.
|
| 475 |
+
if solver == 'lobpcg':
|
| 476 |
+
|
| 477 |
+
if k == 1 and v0 is not None:
|
| 478 |
+
X = np.reshape(v0, (-1, 1))
|
| 479 |
+
else:
|
| 480 |
+
X = random_state.standard_normal(size=(min(A.shape), k))
|
| 481 |
+
|
| 482 |
+
_, eigvec = lobpcg(XH_X, X, tol=tol ** 2, maxiter=maxiter,
|
| 483 |
+
largest=largest)
|
| 484 |
+
|
| 485 |
+
elif solver == 'propack':
|
| 486 |
+
jobu = return_singular_vectors in {True, 'u'}
|
| 487 |
+
jobv = return_singular_vectors in {True, 'vh'}
|
| 488 |
+
irl_mode = (which == 'SM')
|
| 489 |
+
res = _svdp(A, k=k, tol=tol**2, which=which, maxiter=None,
|
| 490 |
+
compute_u=jobu, compute_v=jobv, irl_mode=irl_mode,
|
| 491 |
+
kmax=maxiter, v0=v0, random_state=random_state)
|
| 492 |
+
|
| 493 |
+
u, s, vh, _ = res # but we'll ignore bnd, the last output
|
| 494 |
+
|
| 495 |
+
# PROPACK order appears to be largest first. `svds` output order is not
|
| 496 |
+
# guaranteed, according to documentation, but for ARPACK and LOBPCG
|
| 497 |
+
# they actually are ordered smallest to largest, so reverse for
|
| 498 |
+
# consistency.
|
| 499 |
+
s = s[::-1]
|
| 500 |
+
u = u[:, ::-1]
|
| 501 |
+
vh = vh[::-1]
|
| 502 |
+
|
| 503 |
+
u = u if jobu else None
|
| 504 |
+
vh = vh if jobv else None
|
| 505 |
+
|
| 506 |
+
if return_singular_vectors:
|
| 507 |
+
return u, s, vh
|
| 508 |
+
else:
|
| 509 |
+
return s
|
| 510 |
+
|
| 511 |
+
elif solver == 'arpack' or solver is None:
|
| 512 |
+
if v0 is None:
|
| 513 |
+
v0 = random_state.standard_normal(size=(min(A.shape),))
|
| 514 |
+
_, eigvec = eigsh(XH_X, k=k, tol=tol ** 2, maxiter=maxiter,
|
| 515 |
+
ncv=ncv, which=which, v0=v0)
|
| 516 |
+
# arpack do not guarantee exactly orthonormal eigenvectors
|
| 517 |
+
# for clustered eigenvalues, especially in complex arithmetic
|
| 518 |
+
eigvec, _ = np.linalg.qr(eigvec)
|
| 519 |
+
|
| 520 |
+
# the eigenvectors eigvec must be orthonomal here; see gh-16712
|
| 521 |
+
Av = X_matmat(eigvec)
|
| 522 |
+
if not return_singular_vectors:
|
| 523 |
+
s = svd(Av, compute_uv=False, overwrite_a=True)
|
| 524 |
+
return s[::-1]
|
| 525 |
+
|
| 526 |
+
# compute the left singular vectors of X and update the right ones
|
| 527 |
+
# accordingly
|
| 528 |
+
u, s, vh = svd(Av, full_matrices=False, overwrite_a=True)
|
| 529 |
+
u = u[:, ::-1]
|
| 530 |
+
s = s[::-1]
|
| 531 |
+
vh = vh[::-1]
|
| 532 |
+
|
| 533 |
+
jobu = return_singular_vectors in {True, 'u'}
|
| 534 |
+
jobv = return_singular_vectors in {True, 'vh'}
|
| 535 |
+
|
| 536 |
+
if transpose:
|
| 537 |
+
u_tmp = eigvec @ _herm(vh) if jobu else None
|
| 538 |
+
vh = _herm(u) if jobv else None
|
| 539 |
+
u = u_tmp
|
| 540 |
+
else:
|
| 541 |
+
if not jobu:
|
| 542 |
+
u = None
|
| 543 |
+
vh = vh @ _herm(eigvec) if jobv else None
|
| 544 |
+
|
| 545 |
+
return u, s, vh
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/_svds_doc.py
ADDED
|
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def _svds_arpack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
|
| 2 |
+
maxiter=None, return_singular_vectors=True,
|
| 3 |
+
solver='arpack', random_state=None):
|
| 4 |
+
"""
|
| 5 |
+
Partial singular value decomposition of a sparse matrix using ARPACK.
|
| 6 |
+
|
| 7 |
+
Compute the largest or smallest `k` singular values and corresponding
|
| 8 |
+
singular vectors of a sparse matrix `A`. The order in which the singular
|
| 9 |
+
values are returned is not guaranteed.
|
| 10 |
+
|
| 11 |
+
In the descriptions below, let ``M, N = A.shape``.
|
| 12 |
+
|
| 13 |
+
Parameters
|
| 14 |
+
----------
|
| 15 |
+
A : sparse matrix or LinearOperator
|
| 16 |
+
Matrix to decompose.
|
| 17 |
+
k : int, optional
|
| 18 |
+
Number of singular values and singular vectors to compute.
|
| 19 |
+
Must satisfy ``1 <= k <= min(M, N) - 1``.
|
| 20 |
+
Default is 6.
|
| 21 |
+
ncv : int, optional
|
| 22 |
+
The number of Lanczos vectors generated.
|
| 23 |
+
The default is ``min(n, max(2*k + 1, 20))``.
|
| 24 |
+
If specified, must satistify ``k + 1 < ncv < min(M, N)``; ``ncv > 2*k``
|
| 25 |
+
is recommended.
|
| 26 |
+
tol : float, optional
|
| 27 |
+
Tolerance for singular values. Zero (default) means machine precision.
|
| 28 |
+
which : {'LM', 'SM'}
|
| 29 |
+
Which `k` singular values to find: either the largest magnitude ('LM')
|
| 30 |
+
or smallest magnitude ('SM') singular values.
|
| 31 |
+
v0 : ndarray, optional
|
| 32 |
+
The starting vector for iteration:
|
| 33 |
+
an (approximate) left singular vector if ``N > M`` and a right singular
|
| 34 |
+
vector otherwise. Must be of length ``min(M, N)``.
|
| 35 |
+
Default: random
|
| 36 |
+
maxiter : int, optional
|
| 37 |
+
Maximum number of Arnoldi update iterations allowed;
|
| 38 |
+
default is ``min(M, N) * 10``.
|
| 39 |
+
return_singular_vectors : {True, False, "u", "vh"}
|
| 40 |
+
Singular values are always computed and returned; this parameter
|
| 41 |
+
controls the computation and return of singular vectors.
|
| 42 |
+
|
| 43 |
+
- ``True``: return singular vectors.
|
| 44 |
+
- ``False``: do not return singular vectors.
|
| 45 |
+
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
|
| 46 |
+
return ``None`` for the right singular vectors. Otherwise, compute
|
| 47 |
+
all singular vectors.
|
| 48 |
+
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
|
| 49 |
+
return ``None`` for the left singular vectors. Otherwise, compute
|
| 50 |
+
all singular vectors.
|
| 51 |
+
|
| 52 |
+
solver : {'arpack', 'propack', 'lobpcg'}, optional
|
| 53 |
+
This is the solver-specific documentation for ``solver='arpack'``.
|
| 54 |
+
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>` and
|
| 55 |
+
:ref:`'propack' <sparse.linalg.svds-propack>`
|
| 56 |
+
are also supported.
|
| 57 |
+
random_state : {None, int, `numpy.random.Generator`,
|
| 58 |
+
`numpy.random.RandomState`}, optional
|
| 59 |
+
|
| 60 |
+
Pseudorandom number generator state used to generate resamples.
|
| 61 |
+
|
| 62 |
+
If `random_state` is ``None`` (or `np.random`), the
|
| 63 |
+
`numpy.random.RandomState` singleton is used.
|
| 64 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
| 65 |
+
seeded with `random_state`.
|
| 66 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
| 67 |
+
instance then that instance is used.
|
| 68 |
+
options : dict, optional
|
| 69 |
+
A dictionary of solver-specific options. No solver-specific options
|
| 70 |
+
are currently supported; this parameter is reserved for future use.
|
| 71 |
+
|
| 72 |
+
Returns
|
| 73 |
+
-------
|
| 74 |
+
u : ndarray, shape=(M, k)
|
| 75 |
+
Unitary matrix having left singular vectors as columns.
|
| 76 |
+
s : ndarray, shape=(k,)
|
| 77 |
+
The singular values.
|
| 78 |
+
vh : ndarray, shape=(k, N)
|
| 79 |
+
Unitary matrix having right singular vectors as rows.
|
| 80 |
+
|
| 81 |
+
Notes
|
| 82 |
+
-----
|
| 83 |
+
This is a naive implementation using ARPACK as an eigensolver
|
| 84 |
+
on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
|
| 85 |
+
efficient.
|
| 86 |
+
|
| 87 |
+
Examples
|
| 88 |
+
--------
|
| 89 |
+
Construct a matrix ``A`` from singular values and vectors.
|
| 90 |
+
|
| 91 |
+
>>> import numpy as np
|
| 92 |
+
>>> from scipy.stats import ortho_group
|
| 93 |
+
>>> from scipy.sparse import csc_matrix, diags
|
| 94 |
+
>>> from scipy.sparse.linalg import svds
|
| 95 |
+
>>> rng = np.random.default_rng()
|
| 96 |
+
>>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
|
| 97 |
+
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
|
| 98 |
+
>>> u = orthogonal[:, :5] # left singular vectors
|
| 99 |
+
>>> vT = orthogonal[:, 5:].T # right singular vectors
|
| 100 |
+
>>> A = u @ diags(s) @ vT
|
| 101 |
+
|
| 102 |
+
With only three singular values/vectors, the SVD approximates the original
|
| 103 |
+
matrix.
|
| 104 |
+
|
| 105 |
+
>>> u2, s2, vT2 = svds(A, k=3, solver='arpack')
|
| 106 |
+
>>> A2 = u2 @ np.diag(s2) @ vT2
|
| 107 |
+
>>> np.allclose(A2, A.toarray(), atol=1e-3)
|
| 108 |
+
True
|
| 109 |
+
|
| 110 |
+
With all five singular values/vectors, we can reproduce the original
|
| 111 |
+
matrix.
|
| 112 |
+
|
| 113 |
+
>>> u3, s3, vT3 = svds(A, k=5, solver='arpack')
|
| 114 |
+
>>> A3 = u3 @ np.diag(s3) @ vT3
|
| 115 |
+
>>> np.allclose(A3, A.toarray())
|
| 116 |
+
True
|
| 117 |
+
|
| 118 |
+
The singular values match the expected singular values, and the singular
|
| 119 |
+
vectors are as expected up to a difference in sign.
|
| 120 |
+
|
| 121 |
+
>>> (np.allclose(s3, s) and
|
| 122 |
+
... np.allclose(np.abs(u3), np.abs(u.toarray())) and
|
| 123 |
+
... np.allclose(np.abs(vT3), np.abs(vT.toarray())))
|
| 124 |
+
True
|
| 125 |
+
|
| 126 |
+
The singular vectors are also orthogonal.
|
| 127 |
+
|
| 128 |
+
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
|
| 129 |
+
... np.allclose(vT3 @ vT3.T, np.eye(5)))
|
| 130 |
+
True
|
| 131 |
+
"""
|
| 132 |
+
pass
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def _svds_lobpcg_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
|
| 136 |
+
maxiter=None, return_singular_vectors=True,
|
| 137 |
+
solver='lobpcg', random_state=None):
|
| 138 |
+
"""
|
| 139 |
+
Partial singular value decomposition of a sparse matrix using LOBPCG.
|
| 140 |
+
|
| 141 |
+
Compute the largest or smallest `k` singular values and corresponding
|
| 142 |
+
singular vectors of a sparse matrix `A`. The order in which the singular
|
| 143 |
+
values are returned is not guaranteed.
|
| 144 |
+
|
| 145 |
+
In the descriptions below, let ``M, N = A.shape``.
|
| 146 |
+
|
| 147 |
+
Parameters
|
| 148 |
+
----------
|
| 149 |
+
A : sparse matrix or LinearOperator
|
| 150 |
+
Matrix to decompose.
|
| 151 |
+
k : int, default: 6
|
| 152 |
+
Number of singular values and singular vectors to compute.
|
| 153 |
+
Must satisfy ``1 <= k <= min(M, N) - 1``.
|
| 154 |
+
ncv : int, optional
|
| 155 |
+
Ignored.
|
| 156 |
+
tol : float, optional
|
| 157 |
+
Tolerance for singular values. Zero (default) means machine precision.
|
| 158 |
+
which : {'LM', 'SM'}
|
| 159 |
+
Which `k` singular values to find: either the largest magnitude ('LM')
|
| 160 |
+
or smallest magnitude ('SM') singular values.
|
| 161 |
+
v0 : ndarray, optional
|
| 162 |
+
If `k` is 1, the starting vector for iteration:
|
| 163 |
+
an (approximate) left singular vector if ``N > M`` and a right singular
|
| 164 |
+
vector otherwise. Must be of length ``min(M, N)``.
|
| 165 |
+
Ignored otherwise.
|
| 166 |
+
Default: random
|
| 167 |
+
maxiter : int, default: 20
|
| 168 |
+
Maximum number of iterations.
|
| 169 |
+
return_singular_vectors : {True, False, "u", "vh"}
|
| 170 |
+
Singular values are always computed and returned; this parameter
|
| 171 |
+
controls the computation and return of singular vectors.
|
| 172 |
+
|
| 173 |
+
- ``True``: return singular vectors.
|
| 174 |
+
- ``False``: do not return singular vectors.
|
| 175 |
+
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
|
| 176 |
+
return ``None`` for the right singular vectors. Otherwise, compute
|
| 177 |
+
all singular vectors.
|
| 178 |
+
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
|
| 179 |
+
return ``None`` for the left singular vectors. Otherwise, compute
|
| 180 |
+
all singular vectors.
|
| 181 |
+
|
| 182 |
+
solver : {'arpack', 'propack', 'lobpcg'}, optional
|
| 183 |
+
This is the solver-specific documentation for ``solver='lobpcg'``.
|
| 184 |
+
:ref:`'arpack' <sparse.linalg.svds-arpack>` and
|
| 185 |
+
:ref:`'propack' <sparse.linalg.svds-propack>`
|
| 186 |
+
are also supported.
|
| 187 |
+
random_state : {None, int, `numpy.random.Generator`,
|
| 188 |
+
`numpy.random.RandomState`}, optional
|
| 189 |
+
|
| 190 |
+
Pseudorandom number generator state used to generate resamples.
|
| 191 |
+
|
| 192 |
+
If `random_state` is ``None`` (or `np.random`), the
|
| 193 |
+
`numpy.random.RandomState` singleton is used.
|
| 194 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
| 195 |
+
seeded with `random_state`.
|
| 196 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
| 197 |
+
instance then that instance is used.
|
| 198 |
+
options : dict, optional
|
| 199 |
+
A dictionary of solver-specific options. No solver-specific options
|
| 200 |
+
are currently supported; this parameter is reserved for future use.
|
| 201 |
+
|
| 202 |
+
Returns
|
| 203 |
+
-------
|
| 204 |
+
u : ndarray, shape=(M, k)
|
| 205 |
+
Unitary matrix having left singular vectors as columns.
|
| 206 |
+
s : ndarray, shape=(k,)
|
| 207 |
+
The singular values.
|
| 208 |
+
vh : ndarray, shape=(k, N)
|
| 209 |
+
Unitary matrix having right singular vectors as rows.
|
| 210 |
+
|
| 211 |
+
Notes
|
| 212 |
+
-----
|
| 213 |
+
This is a naive implementation using LOBPCG as an eigensolver
|
| 214 |
+
on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
|
| 215 |
+
efficient.
|
| 216 |
+
|
| 217 |
+
Examples
|
| 218 |
+
--------
|
| 219 |
+
Construct a matrix ``A`` from singular values and vectors.
|
| 220 |
+
|
| 221 |
+
>>> import numpy as np
|
| 222 |
+
>>> from scipy.stats import ortho_group
|
| 223 |
+
>>> from scipy.sparse import csc_matrix, diags
|
| 224 |
+
>>> from scipy.sparse.linalg import svds
|
| 225 |
+
>>> rng = np.random.default_rng()
|
| 226 |
+
>>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
|
| 227 |
+
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
|
| 228 |
+
>>> u = orthogonal[:, :5] # left singular vectors
|
| 229 |
+
>>> vT = orthogonal[:, 5:].T # right singular vectors
|
| 230 |
+
>>> A = u @ diags(s) @ vT
|
| 231 |
+
|
| 232 |
+
With only three singular values/vectors, the SVD approximates the original
|
| 233 |
+
matrix.
|
| 234 |
+
|
| 235 |
+
>>> u2, s2, vT2 = svds(A, k=3, solver='lobpcg')
|
| 236 |
+
>>> A2 = u2 @ np.diag(s2) @ vT2
|
| 237 |
+
>>> np.allclose(A2, A.toarray(), atol=1e-3)
|
| 238 |
+
True
|
| 239 |
+
|
| 240 |
+
With all five singular values/vectors, we can reproduce the original
|
| 241 |
+
matrix.
|
| 242 |
+
|
| 243 |
+
>>> u3, s3, vT3 = svds(A, k=5, solver='lobpcg')
|
| 244 |
+
>>> A3 = u3 @ np.diag(s3) @ vT3
|
| 245 |
+
>>> np.allclose(A3, A.toarray())
|
| 246 |
+
True
|
| 247 |
+
|
| 248 |
+
The singular values match the expected singular values, and the singular
|
| 249 |
+
vectors are as expected up to a difference in sign.
|
| 250 |
+
|
| 251 |
+
>>> (np.allclose(s3, s) and
|
| 252 |
+
... np.allclose(np.abs(u3), np.abs(u.todense())) and
|
| 253 |
+
... np.allclose(np.abs(vT3), np.abs(vT.todense())))
|
| 254 |
+
True
|
| 255 |
+
|
| 256 |
+
The singular vectors are also orthogonal.
|
| 257 |
+
|
| 258 |
+
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
|
| 259 |
+
... np.allclose(vT3 @ vT3.T, np.eye(5)))
|
| 260 |
+
True
|
| 261 |
+
|
| 262 |
+
"""
|
| 263 |
+
pass
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def _svds_propack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
|
| 267 |
+
maxiter=None, return_singular_vectors=True,
|
| 268 |
+
solver='propack', random_state=None):
|
| 269 |
+
"""
|
| 270 |
+
Partial singular value decomposition of a sparse matrix using PROPACK.
|
| 271 |
+
|
| 272 |
+
Compute the largest or smallest `k` singular values and corresponding
|
| 273 |
+
singular vectors of a sparse matrix `A`. The order in which the singular
|
| 274 |
+
values are returned is not guaranteed.
|
| 275 |
+
|
| 276 |
+
In the descriptions below, let ``M, N = A.shape``.
|
| 277 |
+
|
| 278 |
+
Parameters
|
| 279 |
+
----------
|
| 280 |
+
A : sparse matrix or LinearOperator
|
| 281 |
+
Matrix to decompose. If `A` is a ``LinearOperator``
|
| 282 |
+
object, it must define both ``matvec`` and ``rmatvec`` methods.
|
| 283 |
+
k : int, default: 6
|
| 284 |
+
Number of singular values and singular vectors to compute.
|
| 285 |
+
Must satisfy ``1 <= k <= min(M, N)``.
|
| 286 |
+
ncv : int, optional
|
| 287 |
+
Ignored.
|
| 288 |
+
tol : float, optional
|
| 289 |
+
The desired relative accuracy for computed singular values.
|
| 290 |
+
Zero (default) means machine precision.
|
| 291 |
+
which : {'LM', 'SM'}
|
| 292 |
+
Which `k` singular values to find: either the largest magnitude ('LM')
|
| 293 |
+
or smallest magnitude ('SM') singular values. Note that choosing
|
| 294 |
+
``which='SM'`` will force the ``irl`` option to be set ``True``.
|
| 295 |
+
v0 : ndarray, optional
|
| 296 |
+
Starting vector for iterations: must be of length ``A.shape[0]``.
|
| 297 |
+
If not specified, PROPACK will generate a starting vector.
|
| 298 |
+
maxiter : int, optional
|
| 299 |
+
Maximum number of iterations / maximal dimension of the Krylov
|
| 300 |
+
subspace. Default is ``10 * k``.
|
| 301 |
+
return_singular_vectors : {True, False, "u", "vh"}
|
| 302 |
+
Singular values are always computed and returned; this parameter
|
| 303 |
+
controls the computation and return of singular vectors.
|
| 304 |
+
|
| 305 |
+
- ``True``: return singular vectors.
|
| 306 |
+
- ``False``: do not return singular vectors.
|
| 307 |
+
- ``"u"``: compute only the left singular vectors; return ``None`` for
|
| 308 |
+
the right singular vectors.
|
| 309 |
+
- ``"vh"``: compute only the right singular vectors; return ``None``
|
| 310 |
+
for the left singular vectors.
|
| 311 |
+
|
| 312 |
+
solver : {'arpack', 'propack', 'lobpcg'}, optional
|
| 313 |
+
This is the solver-specific documentation for ``solver='propack'``.
|
| 314 |
+
:ref:`'arpack' <sparse.linalg.svds-arpack>` and
|
| 315 |
+
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`
|
| 316 |
+
are also supported.
|
| 317 |
+
random_state : {None, int, `numpy.random.Generator`,
|
| 318 |
+
`numpy.random.RandomState`}, optional
|
| 319 |
+
|
| 320 |
+
Pseudorandom number generator state used to generate resamples.
|
| 321 |
+
|
| 322 |
+
If `random_state` is ``None`` (or `np.random`), the
|
| 323 |
+
`numpy.random.RandomState` singleton is used.
|
| 324 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
| 325 |
+
seeded with `random_state`.
|
| 326 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
| 327 |
+
instance then that instance is used.
|
| 328 |
+
options : dict, optional
|
| 329 |
+
A dictionary of solver-specific options. No solver-specific options
|
| 330 |
+
are currently supported; this parameter is reserved for future use.
|
| 331 |
+
|
| 332 |
+
Returns
|
| 333 |
+
-------
|
| 334 |
+
u : ndarray, shape=(M, k)
|
| 335 |
+
Unitary matrix having left singular vectors as columns.
|
| 336 |
+
s : ndarray, shape=(k,)
|
| 337 |
+
The singular values.
|
| 338 |
+
vh : ndarray, shape=(k, N)
|
| 339 |
+
Unitary matrix having right singular vectors as rows.
|
| 340 |
+
|
| 341 |
+
Notes
|
| 342 |
+
-----
|
| 343 |
+
This is an interface to the Fortran library PROPACK [1]_.
|
| 344 |
+
The current default is to run with IRL mode disabled unless seeking the
|
| 345 |
+
smallest singular values/vectors (``which='SM'``).
|
| 346 |
+
|
| 347 |
+
References
|
| 348 |
+
----------
|
| 349 |
+
|
| 350 |
+
.. [1] Larsen, Rasmus Munk. "PROPACK-Software for large and sparse SVD
|
| 351 |
+
calculations." Available online. URL
|
| 352 |
+
http://sun.stanford.edu/~rmunk/PROPACK (2004): 2008-2009.
|
| 353 |
+
|
| 354 |
+
Examples
|
| 355 |
+
--------
|
| 356 |
+
Construct a matrix ``A`` from singular values and vectors.
|
| 357 |
+
|
| 358 |
+
>>> import numpy as np
|
| 359 |
+
>>> from scipy.stats import ortho_group
|
| 360 |
+
>>> from scipy.sparse import csc_matrix, diags
|
| 361 |
+
>>> from scipy.sparse.linalg import svds
|
| 362 |
+
>>> rng = np.random.default_rng()
|
| 363 |
+
>>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
|
| 364 |
+
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
|
| 365 |
+
>>> u = orthogonal[:, :5] # left singular vectors
|
| 366 |
+
>>> vT = orthogonal[:, 5:].T # right singular vectors
|
| 367 |
+
>>> A = u @ diags(s) @ vT
|
| 368 |
+
|
| 369 |
+
With only three singular values/vectors, the SVD approximates the original
|
| 370 |
+
matrix.
|
| 371 |
+
|
| 372 |
+
>>> u2, s2, vT2 = svds(A, k=3, solver='propack')
|
| 373 |
+
>>> A2 = u2 @ np.diag(s2) @ vT2
|
| 374 |
+
>>> np.allclose(A2, A.todense(), atol=1e-3)
|
| 375 |
+
True
|
| 376 |
+
|
| 377 |
+
With all five singular values/vectors, we can reproduce the original
|
| 378 |
+
matrix.
|
| 379 |
+
|
| 380 |
+
>>> u3, s3, vT3 = svds(A, k=5, solver='propack')
|
| 381 |
+
>>> A3 = u3 @ np.diag(s3) @ vT3
|
| 382 |
+
>>> np.allclose(A3, A.todense())
|
| 383 |
+
True
|
| 384 |
+
|
| 385 |
+
The singular values match the expected singular values, and the singular
|
| 386 |
+
vectors are as expected up to a difference in sign.
|
| 387 |
+
|
| 388 |
+
>>> (np.allclose(s3, s) and
|
| 389 |
+
... np.allclose(np.abs(u3), np.abs(u.toarray())) and
|
| 390 |
+
... np.allclose(np.abs(vT3), np.abs(vT.toarray())))
|
| 391 |
+
True
|
| 392 |
+
|
| 393 |
+
The singular vectors are also orthogonal.
|
| 394 |
+
|
| 395 |
+
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
|
| 396 |
+
... np.allclose(vT3 @ vT3.T, np.eye(5)))
|
| 397 |
+
True
|
| 398 |
+
|
| 399 |
+
"""
|
| 400 |
+
pass
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/COPYING
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
BSD Software License
|
| 3 |
+
|
| 4 |
+
Pertains to ARPACK and P_ARPACK
|
| 5 |
+
|
| 6 |
+
Copyright (c) 1996-2008 Rice University.
|
| 7 |
+
Developed by D.C. Sorensen, R.B. Lehoucq, C. Yang, and K. Maschhoff.
|
| 8 |
+
All rights reserved.
|
| 9 |
+
|
| 10 |
+
Arpack has been renamed to arpack-ng.
|
| 11 |
+
|
| 12 |
+
Copyright (c) 2001-2011 - Scilab Enterprises
|
| 13 |
+
Updated by Allan Cornet, Sylvestre Ledru.
|
| 14 |
+
|
| 15 |
+
Copyright (c) 2010 - Jordi Gutiérrez Hermoso (Octave patch)
|
| 16 |
+
|
| 17 |
+
Copyright (c) 2007 - Sébastien Fabbro (gentoo patch)
|
| 18 |
+
|
| 19 |
+
Redistribution and use in source and binary forms, with or without
|
| 20 |
+
modification, are permitted provided that the following conditions are
|
| 21 |
+
met:
|
| 22 |
+
|
| 23 |
+
- Redistributions of source code must retain the above copyright
|
| 24 |
+
notice, this list of conditions and the following disclaimer.
|
| 25 |
+
|
| 26 |
+
- Redistributions in binary form must reproduce the above copyright
|
| 27 |
+
notice, this list of conditions and the following disclaimer listed
|
| 28 |
+
in this license in the documentation and/or other materials
|
| 29 |
+
provided with the distribution.
|
| 30 |
+
|
| 31 |
+
- Neither the name of the copyright holders nor the names of its
|
| 32 |
+
contributors may be used to endorse or promote products derived from
|
| 33 |
+
this software without specific prior written permission.
|
| 34 |
+
|
| 35 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 36 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 37 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 38 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 39 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 40 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 41 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 42 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 43 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 44 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 45 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Eigenvalue solver using iterative methods.
|
| 3 |
+
|
| 4 |
+
Find k eigenvectors and eigenvalues of a matrix A using the
|
| 5 |
+
Arnoldi/Lanczos iterative methods from ARPACK [1]_,[2]_.
|
| 6 |
+
|
| 7 |
+
These methods are most useful for large sparse matrices.
|
| 8 |
+
|
| 9 |
+
- eigs(A,k)
|
| 10 |
+
- eigsh(A,k)
|
| 11 |
+
|
| 12 |
+
References
|
| 13 |
+
----------
|
| 14 |
+
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
|
| 15 |
+
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
|
| 16 |
+
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
|
| 17 |
+
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
|
| 18 |
+
|
| 19 |
+
"""
|
| 20 |
+
from .arpack import *
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (781 Bytes). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/arpack.cpython-39.pyc
ADDED
|
Binary file (44.2 kB). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cp39-win_amd64.dll.a
ADDED
|
Binary file (1.54 kB). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cp39-win_amd64.pyd
ADDED
|
Binary file (800 kB). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/arpack.py
ADDED
|
@@ -0,0 +1,1702 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Find a few eigenvectors and eigenvalues of a matrix.
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
Uses ARPACK: https://github.com/opencollab/arpack-ng
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
# Wrapper implementation notes
|
| 9 |
+
#
|
| 10 |
+
# ARPACK Entry Points
|
| 11 |
+
# -------------------
|
| 12 |
+
# The entry points to ARPACK are
|
| 13 |
+
# - (s,d)seupd : single and double precision symmetric matrix
|
| 14 |
+
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
|
| 15 |
+
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
|
| 16 |
+
# and the *seupd (symmetric matrix) in eigsh().
|
| 17 |
+
# There is no specialized interface for complex Hermitian matrices.
|
| 18 |
+
# To find eigenvalues of a complex Hermitian matrix you
|
| 19 |
+
# may use eigsh(), but eigsh() will simply call eigs()
|
| 20 |
+
# and return the real part of the eigenvalues thus obtained.
|
| 21 |
+
|
| 22 |
+
# Number of eigenvalues returned and complex eigenvalues
|
| 23 |
+
# ------------------------------------------------------
|
| 24 |
+
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
|
| 25 |
+
# eigenvalues and eigenvectors in real (float,double) arrays.
|
| 26 |
+
# Since the eigenvalues and eigenvectors are, in general, complex
|
| 27 |
+
# ARPACK puts the real and imaginary parts in consecutive entries
|
| 28 |
+
# in real-valued arrays. This wrapper puts the real entries
|
| 29 |
+
# into complex data types and attempts to return the requested eigenvalues
|
| 30 |
+
# and eigenvectors.
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# Solver modes
|
| 34 |
+
# ------------
|
| 35 |
+
# ARPACK and handle shifted and shift-inverse computations
|
| 36 |
+
# for eigenvalues by providing a shift (sigma) and a solver.
|
| 37 |
+
|
| 38 |
+
import numpy as np
|
| 39 |
+
import warnings
|
| 40 |
+
from scipy.sparse.linalg._interface import aslinearoperator, LinearOperator
|
| 41 |
+
from scipy.sparse import eye, issparse
|
| 42 |
+
from scipy.linalg import eig, eigh, lu_factor, lu_solve
|
| 43 |
+
from scipy.sparse._sputils import isdense, is_pydata_spmatrix
|
| 44 |
+
from scipy.sparse.linalg import gmres, splu
|
| 45 |
+
from scipy._lib._util import _aligned_zeros
|
| 46 |
+
from scipy._lib._threadsafety import ReentrancyLock
|
| 47 |
+
|
| 48 |
+
from . import _arpack
|
| 49 |
+
arpack_int = _arpack.timing.nbx.dtype
|
| 50 |
+
|
| 51 |
+
__docformat__ = "restructuredtext en"
|
| 52 |
+
|
| 53 |
+
__all__ = ['eigs', 'eigsh', 'ArpackError', 'ArpackNoConvergence']
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
|
| 57 |
+
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
|
| 58 |
+
|
| 59 |
+
DNAUPD_ERRORS = {
|
| 60 |
+
0: "Normal exit.",
|
| 61 |
+
1: "Maximum number of iterations taken. "
|
| 62 |
+
"All possible eigenvalues of OP has been found. IPARAM(5) "
|
| 63 |
+
"returns the number of wanted converged Ritz values.",
|
| 64 |
+
2: "No longer an informational error. Deprecated starting "
|
| 65 |
+
"with release 2 of ARPACK.",
|
| 66 |
+
3: "No shifts could be applied during a cycle of the "
|
| 67 |
+
"Implicitly restarted Arnoldi iteration. One possibility "
|
| 68 |
+
"is to increase the size of NCV relative to NEV. ",
|
| 69 |
+
-1: "N must be positive.",
|
| 70 |
+
-2: "NEV must be positive.",
|
| 71 |
+
-3: "NCV-NEV >= 2 and less than or equal to N.",
|
| 72 |
+
-4: "The maximum number of Arnoldi update iterations allowed "
|
| 73 |
+
"must be greater than zero.",
|
| 74 |
+
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
|
| 75 |
+
-6: "BMAT must be one of 'I' or 'G'.",
|
| 76 |
+
-7: "Length of private work array WORKL is not sufficient.",
|
| 77 |
+
-8: "Error return from LAPACK eigenvalue calculation;",
|
| 78 |
+
-9: "Starting vector is zero.",
|
| 79 |
+
-10: "IPARAM(7) must be 1,2,3,4.",
|
| 80 |
+
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
|
| 81 |
+
-12: "IPARAM(1) must be equal to 0 or 1.",
|
| 82 |
+
-13: "NEV and WHICH = 'BE' are incompatible.",
|
| 83 |
+
-9999: "Could not build an Arnoldi factorization. "
|
| 84 |
+
"IPARAM(5) returns the size of the current Arnoldi "
|
| 85 |
+
"factorization. The user is advised to check that "
|
| 86 |
+
"enough workspace and array storage has been allocated."
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
SNAUPD_ERRORS = DNAUPD_ERRORS
|
| 90 |
+
|
| 91 |
+
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
|
| 92 |
+
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
|
| 93 |
+
|
| 94 |
+
CNAUPD_ERRORS = ZNAUPD_ERRORS
|
| 95 |
+
|
| 96 |
+
DSAUPD_ERRORS = {
|
| 97 |
+
0: "Normal exit.",
|
| 98 |
+
1: "Maximum number of iterations taken. "
|
| 99 |
+
"All possible eigenvalues of OP has been found.",
|
| 100 |
+
2: "No longer an informational error. Deprecated starting with "
|
| 101 |
+
"release 2 of ARPACK.",
|
| 102 |
+
3: "No shifts could be applied during a cycle of the Implicitly "
|
| 103 |
+
"restarted Arnoldi iteration. One possibility is to increase "
|
| 104 |
+
"the size of NCV relative to NEV. ",
|
| 105 |
+
-1: "N must be positive.",
|
| 106 |
+
-2: "NEV must be positive.",
|
| 107 |
+
-3: "NCV must be greater than NEV and less than or equal to N.",
|
| 108 |
+
-4: "The maximum number of Arnoldi update iterations allowed "
|
| 109 |
+
"must be greater than zero.",
|
| 110 |
+
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
|
| 111 |
+
-6: "BMAT must be one of 'I' or 'G'.",
|
| 112 |
+
-7: "Length of private work array WORKL is not sufficient.",
|
| 113 |
+
-8: "Error return from trid. eigenvalue calculation; "
|
| 114 |
+
"Informational error from LAPACK routine dsteqr .",
|
| 115 |
+
-9: "Starting vector is zero.",
|
| 116 |
+
-10: "IPARAM(7) must be 1,2,3,4,5.",
|
| 117 |
+
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
|
| 118 |
+
-12: "IPARAM(1) must be equal to 0 or 1.",
|
| 119 |
+
-13: "NEV and WHICH = 'BE' are incompatible. ",
|
| 120 |
+
-9999: "Could not build an Arnoldi factorization. "
|
| 121 |
+
"IPARAM(5) returns the size of the current Arnoldi "
|
| 122 |
+
"factorization. The user is advised to check that "
|
| 123 |
+
"enough workspace and array storage has been allocated.",
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
SSAUPD_ERRORS = DSAUPD_ERRORS
|
| 127 |
+
|
| 128 |
+
DNEUPD_ERRORS = {
|
| 129 |
+
0: "Normal exit.",
|
| 130 |
+
1: "The Schur form computed by LAPACK routine dlahqr "
|
| 131 |
+
"could not be reordered by LAPACK routine dtrsen. "
|
| 132 |
+
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
|
| 133 |
+
"increase the size of the arrays DR and DI to have "
|
| 134 |
+
"dimension at least dimension NCV and allocate at least NCV "
|
| 135 |
+
"columns for Z. NOTE: Not necessary if Z and V share "
|
| 136 |
+
"the same space. Please notify the authors if this error"
|
| 137 |
+
"occurs.",
|
| 138 |
+
-1: "N must be positive.",
|
| 139 |
+
-2: "NEV must be positive.",
|
| 140 |
+
-3: "NCV-NEV >= 2 and less than or equal to N.",
|
| 141 |
+
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
|
| 142 |
+
-6: "BMAT must be one of 'I' or 'G'.",
|
| 143 |
+
-7: "Length of private work WORKL array is not sufficient.",
|
| 144 |
+
-8: "Error return from calculation of a real Schur form. "
|
| 145 |
+
"Informational error from LAPACK routine dlahqr .",
|
| 146 |
+
-9: "Error return from calculation of eigenvectors. "
|
| 147 |
+
"Informational error from LAPACK routine dtrevc.",
|
| 148 |
+
-10: "IPARAM(7) must be 1,2,3,4.",
|
| 149 |
+
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
|
| 150 |
+
-12: "HOWMNY = 'S' not yet implemented",
|
| 151 |
+
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
|
| 152 |
+
-14: "DNAUPD did not find any eigenvalues to sufficient "
|
| 153 |
+
"accuracy.",
|
| 154 |
+
-15: "DNEUPD got a different count of the number of converged "
|
| 155 |
+
"Ritz values than DNAUPD got. This indicates the user "
|
| 156 |
+
"probably made an error in passing data from DNAUPD to "
|
| 157 |
+
"DNEUPD or that the data was modified before entering "
|
| 158 |
+
"DNEUPD",
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
|
| 162 |
+
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
|
| 163 |
+
"could not be reordered by LAPACK routine strsen . "
|
| 164 |
+
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
|
| 165 |
+
"increase the size of the arrays DR and DI to have "
|
| 166 |
+
"dimension at least dimension NCV and allocate at least "
|
| 167 |
+
"NCV columns for Z. NOTE: Not necessary if Z and V share "
|
| 168 |
+
"the same space. Please notify the authors if this error "
|
| 169 |
+
"occurs.")
|
| 170 |
+
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
|
| 171 |
+
"accuracy.")
|
| 172 |
+
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
|
| 173 |
+
"converged Ritz values than SNAUPD got. This indicates "
|
| 174 |
+
"the user probably made an error in passing data from "
|
| 175 |
+
"SNAUPD to SNEUPD or that the data was modified before "
|
| 176 |
+
"entering SNEUPD")
|
| 177 |
+
|
| 178 |
+
ZNEUPD_ERRORS = {0: "Normal exit.",
|
| 179 |
+
1: "The Schur form computed by LAPACK routine csheqr "
|
| 180 |
+
"could not be reordered by LAPACK routine ztrsen. "
|
| 181 |
+
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
|
| 182 |
+
"increase the size of the array D to have "
|
| 183 |
+
"dimension at least dimension NCV and allocate at least "
|
| 184 |
+
"NCV columns for Z. NOTE: Not necessary if Z and V share "
|
| 185 |
+
"the same space. Please notify the authors if this error "
|
| 186 |
+
"occurs.",
|
| 187 |
+
-1: "N must be positive.",
|
| 188 |
+
-2: "NEV must be positive.",
|
| 189 |
+
-3: "NCV-NEV >= 1 and less than or equal to N.",
|
| 190 |
+
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
|
| 191 |
+
-6: "BMAT must be one of 'I' or 'G'.",
|
| 192 |
+
-7: "Length of private work WORKL array is not sufficient.",
|
| 193 |
+
-8: "Error return from LAPACK eigenvalue calculation. "
|
| 194 |
+
"This should never happened.",
|
| 195 |
+
-9: "Error return from calculation of eigenvectors. "
|
| 196 |
+
"Informational error from LAPACK routine ztrevc.",
|
| 197 |
+
-10: "IPARAM(7) must be 1,2,3",
|
| 198 |
+
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
|
| 199 |
+
-12: "HOWMNY = 'S' not yet implemented",
|
| 200 |
+
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
|
| 201 |
+
-14: "ZNAUPD did not find any eigenvalues to sufficient "
|
| 202 |
+
"accuracy.",
|
| 203 |
+
-15: "ZNEUPD got a different count of the number of "
|
| 204 |
+
"converged Ritz values than ZNAUPD got. This "
|
| 205 |
+
"indicates the user probably made an error in passing "
|
| 206 |
+
"data from ZNAUPD to ZNEUPD or that the data was "
|
| 207 |
+
"modified before entering ZNEUPD"
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
|
| 211 |
+
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
|
| 212 |
+
"accuracy.")
|
| 213 |
+
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
|
| 214 |
+
"converged Ritz values than CNAUPD got. This indicates "
|
| 215 |
+
"the user probably made an error in passing data from "
|
| 216 |
+
"CNAUPD to CNEUPD or that the data was modified before "
|
| 217 |
+
"entering CNEUPD")
|
| 218 |
+
|
| 219 |
+
DSEUPD_ERRORS = {
|
| 220 |
+
0: "Normal exit.",
|
| 221 |
+
-1: "N must be positive.",
|
| 222 |
+
-2: "NEV must be positive.",
|
| 223 |
+
-3: "NCV must be greater than NEV and less than or equal to N.",
|
| 224 |
+
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
|
| 225 |
+
-6: "BMAT must be one of 'I' or 'G'.",
|
| 226 |
+
-7: "Length of private work WORKL array is not sufficient.",
|
| 227 |
+
-8: ("Error return from trid. eigenvalue calculation; "
|
| 228 |
+
"Information error from LAPACK routine dsteqr."),
|
| 229 |
+
-9: "Starting vector is zero.",
|
| 230 |
+
-10: "IPARAM(7) must be 1,2,3,4,5.",
|
| 231 |
+
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
|
| 232 |
+
-12: "NEV and WHICH = 'BE' are incompatible.",
|
| 233 |
+
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
|
| 234 |
+
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
|
| 235 |
+
-16: "HOWMNY = 'S' not yet implemented",
|
| 236 |
+
-17: ("DSEUPD got a different count of the number of converged "
|
| 237 |
+
"Ritz values than DSAUPD got. This indicates the user "
|
| 238 |
+
"probably made an error in passing data from DSAUPD to "
|
| 239 |
+
"DSEUPD or that the data was modified before entering "
|
| 240 |
+
"DSEUPD.")
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
|
| 244 |
+
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
|
| 245 |
+
"to sufficient accuracy.")
|
| 246 |
+
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
|
| 247 |
+
"converged "
|
| 248 |
+
"Ritz values than SSAUPD got. This indicates the user "
|
| 249 |
+
"probably made an error in passing data from SSAUPD to "
|
| 250 |
+
"SSEUPD or that the data was modified before entering "
|
| 251 |
+
"SSEUPD.")
|
| 252 |
+
|
| 253 |
+
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
|
| 254 |
+
's': SSAUPD_ERRORS}
|
| 255 |
+
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
|
| 256 |
+
's': SNAUPD_ERRORS,
|
| 257 |
+
'z': ZNAUPD_ERRORS,
|
| 258 |
+
'c': CNAUPD_ERRORS}
|
| 259 |
+
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
|
| 260 |
+
's': SSEUPD_ERRORS}
|
| 261 |
+
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
|
| 262 |
+
's': SNEUPD_ERRORS,
|
| 263 |
+
'z': ZNEUPD_ERRORS,
|
| 264 |
+
'c': CNEUPD_ERRORS}
|
| 265 |
+
|
| 266 |
+
# accepted values of parameter WHICH in _SEUPD
|
| 267 |
+
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
|
| 268 |
+
|
| 269 |
+
# accepted values of parameter WHICH in _NAUPD
|
| 270 |
+
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
class ArpackError(RuntimeError):
|
| 274 |
+
"""
|
| 275 |
+
ARPACK error
|
| 276 |
+
"""
|
| 277 |
+
|
| 278 |
+
def __init__(self, info, infodict=_NAUPD_ERRORS):
|
| 279 |
+
msg = infodict.get(info, "Unknown error")
|
| 280 |
+
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class ArpackNoConvergence(ArpackError):
|
| 284 |
+
"""
|
| 285 |
+
ARPACK iteration did not converge
|
| 286 |
+
|
| 287 |
+
Attributes
|
| 288 |
+
----------
|
| 289 |
+
eigenvalues : ndarray
|
| 290 |
+
Partial result. Converged eigenvalues.
|
| 291 |
+
eigenvectors : ndarray
|
| 292 |
+
Partial result. Converged eigenvectors.
|
| 293 |
+
|
| 294 |
+
"""
|
| 295 |
+
|
| 296 |
+
def __init__(self, msg, eigenvalues, eigenvectors):
|
| 297 |
+
ArpackError.__init__(self, -1, {-1: msg})
|
| 298 |
+
self.eigenvalues = eigenvalues
|
| 299 |
+
self.eigenvectors = eigenvectors
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def choose_ncv(k):
|
| 303 |
+
"""
|
| 304 |
+
Choose number of lanczos vectors based on target number
|
| 305 |
+
of singular/eigen values and vectors to compute, k.
|
| 306 |
+
"""
|
| 307 |
+
return max(2 * k + 1, 20)
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
class _ArpackParams:
|
| 311 |
+
def __init__(self, n, k, tp, mode=1, sigma=None,
|
| 312 |
+
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
|
| 313 |
+
if k <= 0:
|
| 314 |
+
raise ValueError("k must be positive, k=%d" % k)
|
| 315 |
+
|
| 316 |
+
if maxiter is None:
|
| 317 |
+
maxiter = n * 10
|
| 318 |
+
if maxiter <= 0:
|
| 319 |
+
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
|
| 320 |
+
|
| 321 |
+
if tp not in 'fdFD':
|
| 322 |
+
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
|
| 323 |
+
|
| 324 |
+
if v0 is not None:
|
| 325 |
+
# ARPACK overwrites its initial resid, make a copy
|
| 326 |
+
self.resid = np.array(v0, copy=True)
|
| 327 |
+
info = 1
|
| 328 |
+
else:
|
| 329 |
+
# ARPACK will use a random initial vector.
|
| 330 |
+
self.resid = np.zeros(n, tp)
|
| 331 |
+
info = 0
|
| 332 |
+
|
| 333 |
+
if sigma is None:
|
| 334 |
+
#sigma not used
|
| 335 |
+
self.sigma = 0
|
| 336 |
+
else:
|
| 337 |
+
self.sigma = sigma
|
| 338 |
+
|
| 339 |
+
if ncv is None:
|
| 340 |
+
ncv = choose_ncv(k)
|
| 341 |
+
ncv = min(ncv, n)
|
| 342 |
+
|
| 343 |
+
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
|
| 344 |
+
self.iparam = np.zeros(11, arpack_int)
|
| 345 |
+
|
| 346 |
+
# set solver mode and parameters
|
| 347 |
+
ishfts = 1
|
| 348 |
+
self.mode = mode
|
| 349 |
+
self.iparam[0] = ishfts
|
| 350 |
+
self.iparam[2] = maxiter
|
| 351 |
+
self.iparam[3] = 1
|
| 352 |
+
self.iparam[6] = mode
|
| 353 |
+
|
| 354 |
+
self.n = n
|
| 355 |
+
self.tol = tol
|
| 356 |
+
self.k = k
|
| 357 |
+
self.maxiter = maxiter
|
| 358 |
+
self.ncv = ncv
|
| 359 |
+
self.which = which
|
| 360 |
+
self.tp = tp
|
| 361 |
+
self.info = info
|
| 362 |
+
|
| 363 |
+
self.converged = False
|
| 364 |
+
self.ido = 0
|
| 365 |
+
|
| 366 |
+
def _raise_no_convergence(self):
|
| 367 |
+
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
|
| 368 |
+
k_ok = self.iparam[4]
|
| 369 |
+
num_iter = self.iparam[2]
|
| 370 |
+
try:
|
| 371 |
+
ev, vec = self.extract(True)
|
| 372 |
+
except ArpackError as err:
|
| 373 |
+
msg = f"{msg} [{err}]"
|
| 374 |
+
ev = np.zeros((0,))
|
| 375 |
+
vec = np.zeros((self.n, 0))
|
| 376 |
+
k_ok = 0
|
| 377 |
+
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
class _SymmetricArpackParams(_ArpackParams):
|
| 381 |
+
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
|
| 382 |
+
Minv_matvec=None, sigma=None,
|
| 383 |
+
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
|
| 384 |
+
# The following modes are supported:
|
| 385 |
+
# mode = 1:
|
| 386 |
+
# Solve the standard eigenvalue problem:
|
| 387 |
+
# A*x = lambda*x :
|
| 388 |
+
# A - symmetric
|
| 389 |
+
# Arguments should be
|
| 390 |
+
# matvec = left multiplication by A
|
| 391 |
+
# M_matvec = None [not used]
|
| 392 |
+
# Minv_matvec = None [not used]
|
| 393 |
+
#
|
| 394 |
+
# mode = 2:
|
| 395 |
+
# Solve the general eigenvalue problem:
|
| 396 |
+
# A*x = lambda*M*x
|
| 397 |
+
# A - symmetric
|
| 398 |
+
# M - symmetric positive definite
|
| 399 |
+
# Arguments should be
|
| 400 |
+
# matvec = left multiplication by A
|
| 401 |
+
# M_matvec = left multiplication by M
|
| 402 |
+
# Minv_matvec = left multiplication by M^-1
|
| 403 |
+
#
|
| 404 |
+
# mode = 3:
|
| 405 |
+
# Solve the general eigenvalue problem in shift-invert mode:
|
| 406 |
+
# A*x = lambda*M*x
|
| 407 |
+
# A - symmetric
|
| 408 |
+
# M - symmetric positive semi-definite
|
| 409 |
+
# Arguments should be
|
| 410 |
+
# matvec = None [not used]
|
| 411 |
+
# M_matvec = left multiplication by M
|
| 412 |
+
# or None, if M is the identity
|
| 413 |
+
# Minv_matvec = left multiplication by [A-sigma*M]^-1
|
| 414 |
+
#
|
| 415 |
+
# mode = 4:
|
| 416 |
+
# Solve the general eigenvalue problem in Buckling mode:
|
| 417 |
+
# A*x = lambda*AG*x
|
| 418 |
+
# A - symmetric positive semi-definite
|
| 419 |
+
# AG - symmetric indefinite
|
| 420 |
+
# Arguments should be
|
| 421 |
+
# matvec = left multiplication by A
|
| 422 |
+
# M_matvec = None [not used]
|
| 423 |
+
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
|
| 424 |
+
#
|
| 425 |
+
# mode = 5:
|
| 426 |
+
# Solve the general eigenvalue problem in Cayley-transformed mode:
|
| 427 |
+
# A*x = lambda*M*x
|
| 428 |
+
# A - symmetric
|
| 429 |
+
# M - symmetric positive semi-definite
|
| 430 |
+
# Arguments should be
|
| 431 |
+
# matvec = left multiplication by A
|
| 432 |
+
# M_matvec = left multiplication by M
|
| 433 |
+
# or None, if M is the identity
|
| 434 |
+
# Minv_matvec = left multiplication by [A-sigma*M]^-1
|
| 435 |
+
if mode == 1:
|
| 436 |
+
if matvec is None:
|
| 437 |
+
raise ValueError("matvec must be specified for mode=1")
|
| 438 |
+
if M_matvec is not None:
|
| 439 |
+
raise ValueError("M_matvec cannot be specified for mode=1")
|
| 440 |
+
if Minv_matvec is not None:
|
| 441 |
+
raise ValueError("Minv_matvec cannot be specified for mode=1")
|
| 442 |
+
|
| 443 |
+
self.OP = matvec
|
| 444 |
+
self.B = lambda x: x
|
| 445 |
+
self.bmat = 'I'
|
| 446 |
+
elif mode == 2:
|
| 447 |
+
if matvec is None:
|
| 448 |
+
raise ValueError("matvec must be specified for mode=2")
|
| 449 |
+
if M_matvec is None:
|
| 450 |
+
raise ValueError("M_matvec must be specified for mode=2")
|
| 451 |
+
if Minv_matvec is None:
|
| 452 |
+
raise ValueError("Minv_matvec must be specified for mode=2")
|
| 453 |
+
|
| 454 |
+
self.OP = lambda x: Minv_matvec(matvec(x))
|
| 455 |
+
self.OPa = Minv_matvec
|
| 456 |
+
self.OPb = matvec
|
| 457 |
+
self.B = M_matvec
|
| 458 |
+
self.bmat = 'G'
|
| 459 |
+
elif mode == 3:
|
| 460 |
+
if matvec is not None:
|
| 461 |
+
raise ValueError("matvec must not be specified for mode=3")
|
| 462 |
+
if Minv_matvec is None:
|
| 463 |
+
raise ValueError("Minv_matvec must be specified for mode=3")
|
| 464 |
+
|
| 465 |
+
if M_matvec is None:
|
| 466 |
+
self.OP = Minv_matvec
|
| 467 |
+
self.OPa = Minv_matvec
|
| 468 |
+
self.B = lambda x: x
|
| 469 |
+
self.bmat = 'I'
|
| 470 |
+
else:
|
| 471 |
+
self.OP = lambda x: Minv_matvec(M_matvec(x))
|
| 472 |
+
self.OPa = Minv_matvec
|
| 473 |
+
self.B = M_matvec
|
| 474 |
+
self.bmat = 'G'
|
| 475 |
+
elif mode == 4:
|
| 476 |
+
if matvec is None:
|
| 477 |
+
raise ValueError("matvec must be specified for mode=4")
|
| 478 |
+
if M_matvec is not None:
|
| 479 |
+
raise ValueError("M_matvec must not be specified for mode=4")
|
| 480 |
+
if Minv_matvec is None:
|
| 481 |
+
raise ValueError("Minv_matvec must be specified for mode=4")
|
| 482 |
+
self.OPa = Minv_matvec
|
| 483 |
+
self.OP = lambda x: self.OPa(matvec(x))
|
| 484 |
+
self.B = matvec
|
| 485 |
+
self.bmat = 'G'
|
| 486 |
+
elif mode == 5:
|
| 487 |
+
if matvec is None:
|
| 488 |
+
raise ValueError("matvec must be specified for mode=5")
|
| 489 |
+
if Minv_matvec is None:
|
| 490 |
+
raise ValueError("Minv_matvec must be specified for mode=5")
|
| 491 |
+
|
| 492 |
+
self.OPa = Minv_matvec
|
| 493 |
+
self.A_matvec = matvec
|
| 494 |
+
|
| 495 |
+
if M_matvec is None:
|
| 496 |
+
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
|
| 497 |
+
self.B = lambda x: x
|
| 498 |
+
self.bmat = 'I'
|
| 499 |
+
else:
|
| 500 |
+
self.OP = lambda x: Minv_matvec(matvec(x)
|
| 501 |
+
+ sigma * M_matvec(x))
|
| 502 |
+
self.B = M_matvec
|
| 503 |
+
self.bmat = 'G'
|
| 504 |
+
else:
|
| 505 |
+
raise ValueError("mode=%i not implemented" % mode)
|
| 506 |
+
|
| 507 |
+
if which not in _SEUPD_WHICH:
|
| 508 |
+
raise ValueError("which must be one of %s"
|
| 509 |
+
% ' '.join(_SEUPD_WHICH))
|
| 510 |
+
if k >= n:
|
| 511 |
+
raise ValueError("k must be less than ndim(A), k=%d" % k)
|
| 512 |
+
|
| 513 |
+
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
|
| 514 |
+
ncv, v0, maxiter, which, tol)
|
| 515 |
+
|
| 516 |
+
if self.ncv > n or self.ncv <= k:
|
| 517 |
+
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
|
| 518 |
+
|
| 519 |
+
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
|
| 520 |
+
self.workd = _aligned_zeros(3 * n, self.tp)
|
| 521 |
+
self.workl = _aligned_zeros(self.ncv * (self.ncv + 8), self.tp)
|
| 522 |
+
|
| 523 |
+
ltr = _type_conv[self.tp]
|
| 524 |
+
if ltr not in ["s", "d"]:
|
| 525 |
+
raise ValueError("Input matrix is not real-valued.")
|
| 526 |
+
|
| 527 |
+
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
|
| 528 |
+
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
|
| 529 |
+
|
| 530 |
+
self.iterate_infodict = _SAUPD_ERRORS[ltr]
|
| 531 |
+
self.extract_infodict = _SEUPD_ERRORS[ltr]
|
| 532 |
+
|
| 533 |
+
self.ipntr = np.zeros(11, arpack_int)
|
| 534 |
+
|
| 535 |
+
def iterate(self):
|
| 536 |
+
self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info = \
|
| 537 |
+
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
|
| 538 |
+
self.tol, self.resid, self.v, self.iparam,
|
| 539 |
+
self.ipntr, self.workd, self.workl, self.info)
|
| 540 |
+
|
| 541 |
+
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
|
| 542 |
+
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
|
| 543 |
+
if self.ido == -1:
|
| 544 |
+
# initialization
|
| 545 |
+
self.workd[yslice] = self.OP(self.workd[xslice])
|
| 546 |
+
elif self.ido == 1:
|
| 547 |
+
# compute y = Op*x
|
| 548 |
+
if self.mode == 1:
|
| 549 |
+
self.workd[yslice] = self.OP(self.workd[xslice])
|
| 550 |
+
elif self.mode == 2:
|
| 551 |
+
self.workd[xslice] = self.OPb(self.workd[xslice])
|
| 552 |
+
self.workd[yslice] = self.OPa(self.workd[xslice])
|
| 553 |
+
elif self.mode == 5:
|
| 554 |
+
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
|
| 555 |
+
Ax = self.A_matvec(self.workd[xslice])
|
| 556 |
+
self.workd[yslice] = self.OPa(Ax + (self.sigma *
|
| 557 |
+
self.workd[Bxslice]))
|
| 558 |
+
else:
|
| 559 |
+
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
|
| 560 |
+
self.workd[yslice] = self.OPa(self.workd[Bxslice])
|
| 561 |
+
elif self.ido == 2:
|
| 562 |
+
self.workd[yslice] = self.B(self.workd[xslice])
|
| 563 |
+
elif self.ido == 3:
|
| 564 |
+
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
|
| 565 |
+
else:
|
| 566 |
+
self.converged = True
|
| 567 |
+
|
| 568 |
+
if self.info == 0:
|
| 569 |
+
pass
|
| 570 |
+
elif self.info == 1:
|
| 571 |
+
self._raise_no_convergence()
|
| 572 |
+
else:
|
| 573 |
+
raise ArpackError(self.info, infodict=self.iterate_infodict)
|
| 574 |
+
|
| 575 |
+
def extract(self, return_eigenvectors):
|
| 576 |
+
rvec = return_eigenvectors
|
| 577 |
+
ierr = 0
|
| 578 |
+
howmny = 'A' # return all eigenvectors
|
| 579 |
+
sselect = np.zeros(self.ncv, 'int') # unused
|
| 580 |
+
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
|
| 581 |
+
self.bmat, self.which, self.k,
|
| 582 |
+
self.tol, self.resid, self.v,
|
| 583 |
+
self.iparam[0:7], self.ipntr,
|
| 584 |
+
self.workd[0:2 * self.n],
|
| 585 |
+
self.workl, ierr)
|
| 586 |
+
if ierr != 0:
|
| 587 |
+
raise ArpackError(ierr, infodict=self.extract_infodict)
|
| 588 |
+
k_ok = self.iparam[4]
|
| 589 |
+
d = d[:k_ok]
|
| 590 |
+
z = z[:, :k_ok]
|
| 591 |
+
|
| 592 |
+
if return_eigenvectors:
|
| 593 |
+
return d, z
|
| 594 |
+
else:
|
| 595 |
+
return d
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
class _UnsymmetricArpackParams(_ArpackParams):
|
| 599 |
+
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
|
| 600 |
+
Minv_matvec=None, sigma=None,
|
| 601 |
+
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
|
| 602 |
+
# The following modes are supported:
|
| 603 |
+
# mode = 1:
|
| 604 |
+
# Solve the standard eigenvalue problem:
|
| 605 |
+
# A*x = lambda*x
|
| 606 |
+
# A - square matrix
|
| 607 |
+
# Arguments should be
|
| 608 |
+
# matvec = left multiplication by A
|
| 609 |
+
# M_matvec = None [not used]
|
| 610 |
+
# Minv_matvec = None [not used]
|
| 611 |
+
#
|
| 612 |
+
# mode = 2:
|
| 613 |
+
# Solve the generalized eigenvalue problem:
|
| 614 |
+
# A*x = lambda*M*x
|
| 615 |
+
# A - square matrix
|
| 616 |
+
# M - symmetric, positive semi-definite
|
| 617 |
+
# Arguments should be
|
| 618 |
+
# matvec = left multiplication by A
|
| 619 |
+
# M_matvec = left multiplication by M
|
| 620 |
+
# Minv_matvec = left multiplication by M^-1
|
| 621 |
+
#
|
| 622 |
+
# mode = 3,4:
|
| 623 |
+
# Solve the general eigenvalue problem in shift-invert mode:
|
| 624 |
+
# A*x = lambda*M*x
|
| 625 |
+
# A - square matrix
|
| 626 |
+
# M - symmetric, positive semi-definite
|
| 627 |
+
# Arguments should be
|
| 628 |
+
# matvec = None [not used]
|
| 629 |
+
# M_matvec = left multiplication by M
|
| 630 |
+
# or None, if M is the identity
|
| 631 |
+
# Minv_matvec = left multiplication by [A-sigma*M]^-1
|
| 632 |
+
# if A is real and mode==3, use the real part of Minv_matvec
|
| 633 |
+
# if A is real and mode==4, use the imag part of Minv_matvec
|
| 634 |
+
# if A is complex and mode==3,
|
| 635 |
+
# use real and imag parts of Minv_matvec
|
| 636 |
+
if mode == 1:
|
| 637 |
+
if matvec is None:
|
| 638 |
+
raise ValueError("matvec must be specified for mode=1")
|
| 639 |
+
if M_matvec is not None:
|
| 640 |
+
raise ValueError("M_matvec cannot be specified for mode=1")
|
| 641 |
+
if Minv_matvec is not None:
|
| 642 |
+
raise ValueError("Minv_matvec cannot be specified for mode=1")
|
| 643 |
+
|
| 644 |
+
self.OP = matvec
|
| 645 |
+
self.B = lambda x: x
|
| 646 |
+
self.bmat = 'I'
|
| 647 |
+
elif mode == 2:
|
| 648 |
+
if matvec is None:
|
| 649 |
+
raise ValueError("matvec must be specified for mode=2")
|
| 650 |
+
if M_matvec is None:
|
| 651 |
+
raise ValueError("M_matvec must be specified for mode=2")
|
| 652 |
+
if Minv_matvec is None:
|
| 653 |
+
raise ValueError("Minv_matvec must be specified for mode=2")
|
| 654 |
+
|
| 655 |
+
self.OP = lambda x: Minv_matvec(matvec(x))
|
| 656 |
+
self.OPa = Minv_matvec
|
| 657 |
+
self.OPb = matvec
|
| 658 |
+
self.B = M_matvec
|
| 659 |
+
self.bmat = 'G'
|
| 660 |
+
elif mode in (3, 4):
|
| 661 |
+
if matvec is None:
|
| 662 |
+
raise ValueError("matvec must be specified "
|
| 663 |
+
"for mode in (3,4)")
|
| 664 |
+
if Minv_matvec is None:
|
| 665 |
+
raise ValueError("Minv_matvec must be specified "
|
| 666 |
+
"for mode in (3,4)")
|
| 667 |
+
|
| 668 |
+
self.matvec = matvec
|
| 669 |
+
if tp in 'DF': # complex type
|
| 670 |
+
if mode == 3:
|
| 671 |
+
self.OPa = Minv_matvec
|
| 672 |
+
else:
|
| 673 |
+
raise ValueError("mode=4 invalid for complex A")
|
| 674 |
+
else: # real type
|
| 675 |
+
if mode == 3:
|
| 676 |
+
self.OPa = lambda x: np.real(Minv_matvec(x))
|
| 677 |
+
else:
|
| 678 |
+
self.OPa = lambda x: np.imag(Minv_matvec(x))
|
| 679 |
+
if M_matvec is None:
|
| 680 |
+
self.B = lambda x: x
|
| 681 |
+
self.bmat = 'I'
|
| 682 |
+
self.OP = self.OPa
|
| 683 |
+
else:
|
| 684 |
+
self.B = M_matvec
|
| 685 |
+
self.bmat = 'G'
|
| 686 |
+
self.OP = lambda x: self.OPa(M_matvec(x))
|
| 687 |
+
else:
|
| 688 |
+
raise ValueError("mode=%i not implemented" % mode)
|
| 689 |
+
|
| 690 |
+
if which not in _NEUPD_WHICH:
|
| 691 |
+
raise ValueError("Parameter which must be one of %s"
|
| 692 |
+
% ' '.join(_NEUPD_WHICH))
|
| 693 |
+
if k >= n - 1:
|
| 694 |
+
raise ValueError("k must be less than ndim(A)-1, k=%d" % k)
|
| 695 |
+
|
| 696 |
+
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
|
| 697 |
+
ncv, v0, maxiter, which, tol)
|
| 698 |
+
|
| 699 |
+
if self.ncv > n or self.ncv <= k + 1:
|
| 700 |
+
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
|
| 701 |
+
|
| 702 |
+
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
|
| 703 |
+
self.workd = _aligned_zeros(3 * n, self.tp)
|
| 704 |
+
self.workl = _aligned_zeros(3 * self.ncv * (self.ncv + 2), self.tp)
|
| 705 |
+
|
| 706 |
+
ltr = _type_conv[self.tp]
|
| 707 |
+
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
|
| 708 |
+
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
|
| 709 |
+
|
| 710 |
+
self.iterate_infodict = _NAUPD_ERRORS[ltr]
|
| 711 |
+
self.extract_infodict = _NEUPD_ERRORS[ltr]
|
| 712 |
+
|
| 713 |
+
self.ipntr = np.zeros(14, arpack_int)
|
| 714 |
+
|
| 715 |
+
if self.tp in 'FD':
|
| 716 |
+
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
|
| 717 |
+
self.rwork = _aligned_zeros(self.ncv, self.tp.lower())
|
| 718 |
+
else:
|
| 719 |
+
self.rwork = None
|
| 720 |
+
|
| 721 |
+
def iterate(self):
|
| 722 |
+
if self.tp in 'fd':
|
| 723 |
+
results = self._arpack_solver(self.ido, self.bmat, self.which, self.k,
|
| 724 |
+
self.tol, self.resid, self.v, self.iparam,
|
| 725 |
+
self.ipntr, self.workd, self.workl, self.info)
|
| 726 |
+
self.ido, self.tol, self.resid, self.v, \
|
| 727 |
+
self.iparam, self.ipntr, self.info = results
|
| 728 |
+
|
| 729 |
+
else:
|
| 730 |
+
results = self._arpack_solver(self.ido, self.bmat, self.which, self.k,
|
| 731 |
+
self.tol, self.resid, self.v, self.iparam,
|
| 732 |
+
self.ipntr, self.workd, self.workl,
|
| 733 |
+
self.rwork, self.info)
|
| 734 |
+
self.ido, self.tol, self.resid, self.v, \
|
| 735 |
+
self.iparam, self.ipntr, self.info = results
|
| 736 |
+
|
| 737 |
+
|
| 738 |
+
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
|
| 739 |
+
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
|
| 740 |
+
if self.ido == -1:
|
| 741 |
+
# initialization
|
| 742 |
+
self.workd[yslice] = self.OP(self.workd[xslice])
|
| 743 |
+
elif self.ido == 1:
|
| 744 |
+
# compute y = Op*x
|
| 745 |
+
if self.mode in (1, 2):
|
| 746 |
+
self.workd[yslice] = self.OP(self.workd[xslice])
|
| 747 |
+
else:
|
| 748 |
+
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
|
| 749 |
+
self.workd[yslice] = self.OPa(self.workd[Bxslice])
|
| 750 |
+
elif self.ido == 2:
|
| 751 |
+
self.workd[yslice] = self.B(self.workd[xslice])
|
| 752 |
+
elif self.ido == 3:
|
| 753 |
+
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
|
| 754 |
+
else:
|
| 755 |
+
self.converged = True
|
| 756 |
+
|
| 757 |
+
if self.info == 0:
|
| 758 |
+
pass
|
| 759 |
+
elif self.info == 1:
|
| 760 |
+
self._raise_no_convergence()
|
| 761 |
+
else:
|
| 762 |
+
raise ArpackError(self.info, infodict=self.iterate_infodict)
|
| 763 |
+
|
| 764 |
+
def extract(self, return_eigenvectors):
|
| 765 |
+
k, n = self.k, self.n
|
| 766 |
+
|
| 767 |
+
ierr = 0
|
| 768 |
+
howmny = 'A' # return all eigenvectors
|
| 769 |
+
sselect = np.zeros(self.ncv, 'int') # unused
|
| 770 |
+
sigmar = np.real(self.sigma)
|
| 771 |
+
sigmai = np.imag(self.sigma)
|
| 772 |
+
workev = np.zeros(3 * self.ncv, self.tp)
|
| 773 |
+
|
| 774 |
+
if self.tp in 'fd':
|
| 775 |
+
dr = np.zeros(k + 1, self.tp)
|
| 776 |
+
di = np.zeros(k + 1, self.tp)
|
| 777 |
+
zr = np.zeros((n, k + 1), self.tp)
|
| 778 |
+
dr, di, zr, ierr = \
|
| 779 |
+
self._arpack_extract(return_eigenvectors,
|
| 780 |
+
howmny, sselect, sigmar, sigmai, workev,
|
| 781 |
+
self.bmat, self.which, k, self.tol, self.resid,
|
| 782 |
+
self.v, self.iparam, self.ipntr,
|
| 783 |
+
self.workd, self.workl, self.info)
|
| 784 |
+
if ierr != 0:
|
| 785 |
+
raise ArpackError(ierr, infodict=self.extract_infodict)
|
| 786 |
+
nreturned = self.iparam[4] # number of good eigenvalues returned
|
| 787 |
+
|
| 788 |
+
# Build complex eigenvalues from real and imaginary parts
|
| 789 |
+
d = dr + 1.0j * di
|
| 790 |
+
|
| 791 |
+
# Arrange the eigenvectors: complex eigenvectors are stored as
|
| 792 |
+
# real,imaginary in consecutive columns
|
| 793 |
+
z = zr.astype(self.tp.upper())
|
| 794 |
+
|
| 795 |
+
# The ARPACK nonsymmetric real and double interface (s,d)naupd
|
| 796 |
+
# return eigenvalues and eigenvectors in real (float,double)
|
| 797 |
+
# arrays.
|
| 798 |
+
|
| 799 |
+
# Efficiency: this should check that return_eigenvectors == True
|
| 800 |
+
# before going through this construction.
|
| 801 |
+
if sigmai == 0:
|
| 802 |
+
i = 0
|
| 803 |
+
while i <= k:
|
| 804 |
+
# check if complex
|
| 805 |
+
if abs(d[i].imag) != 0:
|
| 806 |
+
# this is a complex conjugate pair with eigenvalues
|
| 807 |
+
# in consecutive columns
|
| 808 |
+
if i < k:
|
| 809 |
+
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
|
| 810 |
+
z[:, i + 1] = z[:, i].conjugate()
|
| 811 |
+
i += 1
|
| 812 |
+
else:
|
| 813 |
+
#last eigenvalue is complex: the imaginary part of
|
| 814 |
+
# the eigenvector has not been returned
|
| 815 |
+
#this can only happen if nreturned > k, so we'll
|
| 816 |
+
# throw out this case.
|
| 817 |
+
nreturned -= 1
|
| 818 |
+
i += 1
|
| 819 |
+
|
| 820 |
+
else:
|
| 821 |
+
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
|
| 822 |
+
# see remark 3 in <s,d>neupd.f
|
| 823 |
+
# Build complex eigenvalues from real and imaginary parts
|
| 824 |
+
i = 0
|
| 825 |
+
while i <= k:
|
| 826 |
+
if abs(d[i].imag) == 0:
|
| 827 |
+
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
|
| 828 |
+
else:
|
| 829 |
+
if i < k:
|
| 830 |
+
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
|
| 831 |
+
z[:, i + 1] = z[:, i].conjugate()
|
| 832 |
+
d[i] = ((np.dot(zr[:, i],
|
| 833 |
+
self.matvec(zr[:, i]))
|
| 834 |
+
+ np.dot(zr[:, i + 1],
|
| 835 |
+
self.matvec(zr[:, i + 1])))
|
| 836 |
+
+ 1j * (np.dot(zr[:, i],
|
| 837 |
+
self.matvec(zr[:, i + 1]))
|
| 838 |
+
- np.dot(zr[:, i + 1],
|
| 839 |
+
self.matvec(zr[:, i]))))
|
| 840 |
+
d[i + 1] = d[i].conj()
|
| 841 |
+
i += 1
|
| 842 |
+
else:
|
| 843 |
+
#last eigenvalue is complex: the imaginary part of
|
| 844 |
+
# the eigenvector has not been returned
|
| 845 |
+
#this can only happen if nreturned > k, so we'll
|
| 846 |
+
# throw out this case.
|
| 847 |
+
nreturned -= 1
|
| 848 |
+
i += 1
|
| 849 |
+
|
| 850 |
+
# Now we have k+1 possible eigenvalues and eigenvectors
|
| 851 |
+
# Return the ones specified by the keyword "which"
|
| 852 |
+
|
| 853 |
+
if nreturned <= k:
|
| 854 |
+
# we got less or equal as many eigenvalues we wanted
|
| 855 |
+
d = d[:nreturned]
|
| 856 |
+
z = z[:, :nreturned]
|
| 857 |
+
else:
|
| 858 |
+
# we got one extra eigenvalue (likely a cc pair, but which?)
|
| 859 |
+
if self.mode in (1, 2):
|
| 860 |
+
rd = d
|
| 861 |
+
elif self.mode in (3, 4):
|
| 862 |
+
rd = 1 / (d - self.sigma)
|
| 863 |
+
|
| 864 |
+
if self.which in ['LR', 'SR']:
|
| 865 |
+
ind = np.argsort(rd.real)
|
| 866 |
+
elif self.which in ['LI', 'SI']:
|
| 867 |
+
# for LI,SI ARPACK returns largest,smallest
|
| 868 |
+
# abs(imaginary) (complex pairs come together)
|
| 869 |
+
ind = np.argsort(abs(rd.imag))
|
| 870 |
+
else:
|
| 871 |
+
ind = np.argsort(abs(rd))
|
| 872 |
+
|
| 873 |
+
if self.which in ['LR', 'LM', 'LI']:
|
| 874 |
+
ind = ind[-k:][::-1]
|
| 875 |
+
elif self.which in ['SR', 'SM', 'SI']:
|
| 876 |
+
ind = ind[:k]
|
| 877 |
+
|
| 878 |
+
d = d[ind]
|
| 879 |
+
z = z[:, ind]
|
| 880 |
+
else:
|
| 881 |
+
# complex is so much simpler...
|
| 882 |
+
d, z, ierr =\
|
| 883 |
+
self._arpack_extract(return_eigenvectors,
|
| 884 |
+
howmny, sselect, self.sigma, workev,
|
| 885 |
+
self.bmat, self.which, k, self.tol, self.resid,
|
| 886 |
+
self.v, self.iparam, self.ipntr,
|
| 887 |
+
self.workd, self.workl, self.rwork, ierr)
|
| 888 |
+
|
| 889 |
+
if ierr != 0:
|
| 890 |
+
raise ArpackError(ierr, infodict=self.extract_infodict)
|
| 891 |
+
|
| 892 |
+
k_ok = self.iparam[4]
|
| 893 |
+
d = d[:k_ok]
|
| 894 |
+
z = z[:, :k_ok]
|
| 895 |
+
|
| 896 |
+
if return_eigenvectors:
|
| 897 |
+
return d, z
|
| 898 |
+
else:
|
| 899 |
+
return d
|
| 900 |
+
|
| 901 |
+
|
| 902 |
+
def _aslinearoperator_with_dtype(m):
|
| 903 |
+
m = aslinearoperator(m)
|
| 904 |
+
if not hasattr(m, 'dtype'):
|
| 905 |
+
x = np.zeros(m.shape[1])
|
| 906 |
+
m.dtype = (m * x).dtype
|
| 907 |
+
return m
|
| 908 |
+
|
| 909 |
+
|
| 910 |
+
class SpLuInv(LinearOperator):
|
| 911 |
+
"""
|
| 912 |
+
SpLuInv:
|
| 913 |
+
helper class to repeatedly solve M*x=b
|
| 914 |
+
using a sparse LU-decomposition of M
|
| 915 |
+
"""
|
| 916 |
+
|
| 917 |
+
def __init__(self, M):
|
| 918 |
+
self.M_lu = splu(M)
|
| 919 |
+
self.shape = M.shape
|
| 920 |
+
self.dtype = M.dtype
|
| 921 |
+
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
|
| 922 |
+
|
| 923 |
+
def _matvec(self, x):
|
| 924 |
+
# careful here: splu.solve will throw away imaginary
|
| 925 |
+
# part of x if M is real
|
| 926 |
+
x = np.asarray(x)
|
| 927 |
+
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
|
| 928 |
+
return (self.M_lu.solve(np.real(x).astype(self.dtype))
|
| 929 |
+
+ 1j * self.M_lu.solve(np.imag(x).astype(self.dtype)))
|
| 930 |
+
else:
|
| 931 |
+
return self.M_lu.solve(x.astype(self.dtype))
|
| 932 |
+
|
| 933 |
+
|
| 934 |
+
class LuInv(LinearOperator):
|
| 935 |
+
"""
|
| 936 |
+
LuInv:
|
| 937 |
+
helper class to repeatedly solve M*x=b
|
| 938 |
+
using an LU-decomposition of M
|
| 939 |
+
"""
|
| 940 |
+
|
| 941 |
+
def __init__(self, M):
|
| 942 |
+
self.M_lu = lu_factor(M)
|
| 943 |
+
self.shape = M.shape
|
| 944 |
+
self.dtype = M.dtype
|
| 945 |
+
|
| 946 |
+
def _matvec(self, x):
|
| 947 |
+
return lu_solve(self.M_lu, x)
|
| 948 |
+
|
| 949 |
+
|
| 950 |
+
def gmres_loose(A, b, tol):
|
| 951 |
+
"""
|
| 952 |
+
gmres with looser termination condition.
|
| 953 |
+
"""
|
| 954 |
+
b = np.asarray(b)
|
| 955 |
+
min_tol = 1000 * np.sqrt(b.size) * np.finfo(b.dtype).eps
|
| 956 |
+
return gmres(A, b, rtol=max(tol, min_tol), atol=0)
|
| 957 |
+
|
| 958 |
+
|
| 959 |
+
class IterInv(LinearOperator):
|
| 960 |
+
"""
|
| 961 |
+
IterInv:
|
| 962 |
+
helper class to repeatedly solve M*x=b
|
| 963 |
+
using an iterative method.
|
| 964 |
+
"""
|
| 965 |
+
|
| 966 |
+
def __init__(self, M, ifunc=gmres_loose, tol=0):
|
| 967 |
+
self.M = M
|
| 968 |
+
if hasattr(M, 'dtype'):
|
| 969 |
+
self.dtype = M.dtype
|
| 970 |
+
else:
|
| 971 |
+
x = np.zeros(M.shape[1])
|
| 972 |
+
self.dtype = (M * x).dtype
|
| 973 |
+
self.shape = M.shape
|
| 974 |
+
|
| 975 |
+
if tol <= 0:
|
| 976 |
+
# when tol=0, ARPACK uses machine tolerance as calculated
|
| 977 |
+
# by LAPACK's _LAMCH function. We should match this
|
| 978 |
+
tol = 2 * np.finfo(self.dtype).eps
|
| 979 |
+
self.ifunc = ifunc
|
| 980 |
+
self.tol = tol
|
| 981 |
+
|
| 982 |
+
def _matvec(self, x):
|
| 983 |
+
b, info = self.ifunc(self.M, x, tol=self.tol)
|
| 984 |
+
if info != 0:
|
| 985 |
+
raise ValueError("Error in inverting M: function "
|
| 986 |
+
"%s did not converge (info = %i)."
|
| 987 |
+
% (self.ifunc.__name__, info))
|
| 988 |
+
return b
|
| 989 |
+
|
| 990 |
+
|
| 991 |
+
class IterOpInv(LinearOperator):
|
| 992 |
+
"""
|
| 993 |
+
IterOpInv:
|
| 994 |
+
helper class to repeatedly solve [A-sigma*M]*x = b
|
| 995 |
+
using an iterative method
|
| 996 |
+
"""
|
| 997 |
+
|
| 998 |
+
def __init__(self, A, M, sigma, ifunc=gmres_loose, tol=0):
|
| 999 |
+
self.A = A
|
| 1000 |
+
self.M = M
|
| 1001 |
+
self.sigma = sigma
|
| 1002 |
+
|
| 1003 |
+
def mult_func(x):
|
| 1004 |
+
return A.matvec(x) - sigma * M.matvec(x)
|
| 1005 |
+
|
| 1006 |
+
def mult_func_M_None(x):
|
| 1007 |
+
return A.matvec(x) - sigma * x
|
| 1008 |
+
|
| 1009 |
+
x = np.zeros(A.shape[1])
|
| 1010 |
+
if M is None:
|
| 1011 |
+
dtype = mult_func_M_None(x).dtype
|
| 1012 |
+
self.OP = LinearOperator(self.A.shape,
|
| 1013 |
+
mult_func_M_None,
|
| 1014 |
+
dtype=dtype)
|
| 1015 |
+
else:
|
| 1016 |
+
dtype = mult_func(x).dtype
|
| 1017 |
+
self.OP = LinearOperator(self.A.shape,
|
| 1018 |
+
mult_func,
|
| 1019 |
+
dtype=dtype)
|
| 1020 |
+
self.shape = A.shape
|
| 1021 |
+
|
| 1022 |
+
if tol <= 0:
|
| 1023 |
+
# when tol=0, ARPACK uses machine tolerance as calculated
|
| 1024 |
+
# by LAPACK's _LAMCH function. We should match this
|
| 1025 |
+
tol = 2 * np.finfo(self.OP.dtype).eps
|
| 1026 |
+
self.ifunc = ifunc
|
| 1027 |
+
self.tol = tol
|
| 1028 |
+
|
| 1029 |
+
def _matvec(self, x):
|
| 1030 |
+
b, info = self.ifunc(self.OP, x, tol=self.tol)
|
| 1031 |
+
if info != 0:
|
| 1032 |
+
raise ValueError("Error in inverting [A-sigma*M]: function "
|
| 1033 |
+
"%s did not converge (info = %i)."
|
| 1034 |
+
% (self.ifunc.__name__, info))
|
| 1035 |
+
return b
|
| 1036 |
+
|
| 1037 |
+
@property
|
| 1038 |
+
def dtype(self):
|
| 1039 |
+
return self.OP.dtype
|
| 1040 |
+
|
| 1041 |
+
|
| 1042 |
+
def _fast_spmatrix_to_csc(A, hermitian=False):
|
| 1043 |
+
"""Convert sparse matrix to CSC (by transposing, if possible)"""
|
| 1044 |
+
if (A.format == "csr" and hermitian
|
| 1045 |
+
and not np.issubdtype(A.dtype, np.complexfloating)):
|
| 1046 |
+
return A.T
|
| 1047 |
+
elif is_pydata_spmatrix(A):
|
| 1048 |
+
# No need to convert
|
| 1049 |
+
return A
|
| 1050 |
+
else:
|
| 1051 |
+
return A.tocsc()
|
| 1052 |
+
|
| 1053 |
+
|
| 1054 |
+
def get_inv_matvec(M, hermitian=False, tol=0):
|
| 1055 |
+
if isdense(M):
|
| 1056 |
+
return LuInv(M).matvec
|
| 1057 |
+
elif issparse(M) or is_pydata_spmatrix(M):
|
| 1058 |
+
M = _fast_spmatrix_to_csc(M, hermitian=hermitian)
|
| 1059 |
+
return SpLuInv(M).matvec
|
| 1060 |
+
else:
|
| 1061 |
+
return IterInv(M, tol=tol).matvec
|
| 1062 |
+
|
| 1063 |
+
|
| 1064 |
+
def get_OPinv_matvec(A, M, sigma, hermitian=False, tol=0):
|
| 1065 |
+
if sigma == 0:
|
| 1066 |
+
return get_inv_matvec(A, hermitian=hermitian, tol=tol)
|
| 1067 |
+
|
| 1068 |
+
if M is None:
|
| 1069 |
+
#M is the identity matrix
|
| 1070 |
+
if isdense(A):
|
| 1071 |
+
if (np.issubdtype(A.dtype, np.complexfloating)
|
| 1072 |
+
or np.imag(sigma) == 0):
|
| 1073 |
+
A = np.copy(A)
|
| 1074 |
+
else:
|
| 1075 |
+
A = A + 0j
|
| 1076 |
+
A.flat[::A.shape[1] + 1] -= sigma
|
| 1077 |
+
return LuInv(A).matvec
|
| 1078 |
+
elif issparse(A) or is_pydata_spmatrix(A):
|
| 1079 |
+
A = A - sigma * eye(A.shape[0])
|
| 1080 |
+
A = _fast_spmatrix_to_csc(A, hermitian=hermitian)
|
| 1081 |
+
return SpLuInv(A).matvec
|
| 1082 |
+
else:
|
| 1083 |
+
return IterOpInv(_aslinearoperator_with_dtype(A),
|
| 1084 |
+
M, sigma, tol=tol).matvec
|
| 1085 |
+
else:
|
| 1086 |
+
if ((not isdense(A) and not issparse(A) and not is_pydata_spmatrix(A)) or
|
| 1087 |
+
(not isdense(M) and not issparse(M) and not is_pydata_spmatrix(A))):
|
| 1088 |
+
return IterOpInv(_aslinearoperator_with_dtype(A),
|
| 1089 |
+
_aslinearoperator_with_dtype(M),
|
| 1090 |
+
sigma, tol=tol).matvec
|
| 1091 |
+
elif isdense(A) or isdense(M):
|
| 1092 |
+
return LuInv(A - sigma * M).matvec
|
| 1093 |
+
else:
|
| 1094 |
+
OP = A - sigma * M
|
| 1095 |
+
OP = _fast_spmatrix_to_csc(OP, hermitian=hermitian)
|
| 1096 |
+
return SpLuInv(OP).matvec
|
| 1097 |
+
|
| 1098 |
+
|
| 1099 |
+
# ARPACK is not threadsafe or reentrant (SAVE variables), so we need a
|
| 1100 |
+
# lock and a re-entering check.
|
| 1101 |
+
_ARPACK_LOCK = ReentrancyLock("Nested calls to eigs/eighs not allowed: "
|
| 1102 |
+
"ARPACK is not re-entrant")
|
| 1103 |
+
|
| 1104 |
+
|
| 1105 |
+
def eigs(A, k=6, M=None, sigma=None, which='LM', v0=None,
|
| 1106 |
+
ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
|
| 1107 |
+
Minv=None, OPinv=None, OPpart=None):
|
| 1108 |
+
"""
|
| 1109 |
+
Find k eigenvalues and eigenvectors of the square matrix A.
|
| 1110 |
+
|
| 1111 |
+
Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem
|
| 1112 |
+
for w[i] eigenvalues with corresponding eigenvectors x[i].
|
| 1113 |
+
|
| 1114 |
+
If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the
|
| 1115 |
+
generalized eigenvalue problem for w[i] eigenvalues
|
| 1116 |
+
with corresponding eigenvectors x[i]
|
| 1117 |
+
|
| 1118 |
+
Parameters
|
| 1119 |
+
----------
|
| 1120 |
+
A : ndarray, sparse matrix or LinearOperator
|
| 1121 |
+
An array, sparse matrix, or LinearOperator representing
|
| 1122 |
+
the operation ``A @ x``, where A is a real or complex square matrix.
|
| 1123 |
+
k : int, optional
|
| 1124 |
+
The number of eigenvalues and eigenvectors desired.
|
| 1125 |
+
`k` must be smaller than N-1. It is not possible to compute all
|
| 1126 |
+
eigenvectors of a matrix.
|
| 1127 |
+
M : ndarray, sparse matrix or LinearOperator, optional
|
| 1128 |
+
An array, sparse matrix, or LinearOperator representing
|
| 1129 |
+
the operation M@x for the generalized eigenvalue problem
|
| 1130 |
+
|
| 1131 |
+
A @ x = w * M @ x.
|
| 1132 |
+
|
| 1133 |
+
M must represent a real symmetric matrix if A is real, and must
|
| 1134 |
+
represent a complex Hermitian matrix if A is complex. For best
|
| 1135 |
+
results, the data type of M should be the same as that of A.
|
| 1136 |
+
Additionally:
|
| 1137 |
+
|
| 1138 |
+
If `sigma` is None, M is positive definite
|
| 1139 |
+
|
| 1140 |
+
If sigma is specified, M is positive semi-definite
|
| 1141 |
+
|
| 1142 |
+
If sigma is None, eigs requires an operator to compute the solution
|
| 1143 |
+
of the linear equation ``M @ x = b``. This is done internally via a
|
| 1144 |
+
(sparse) LU decomposition for an explicit matrix M, or via an
|
| 1145 |
+
iterative solver for a general linear operator. Alternatively,
|
| 1146 |
+
the user can supply the matrix or operator Minv, which gives
|
| 1147 |
+
``x = Minv @ b = M^-1 @ b``.
|
| 1148 |
+
sigma : real or complex, optional
|
| 1149 |
+
Find eigenvalues near sigma using shift-invert mode. This requires
|
| 1150 |
+
an operator to compute the solution of the linear system
|
| 1151 |
+
``[A - sigma * M] @ x = b``, where M is the identity matrix if
|
| 1152 |
+
unspecified. This is computed internally via a (sparse) LU
|
| 1153 |
+
decomposition for explicit matrices A & M, or via an iterative
|
| 1154 |
+
solver if either A or M is a general linear operator.
|
| 1155 |
+
Alternatively, the user can supply the matrix or operator OPinv,
|
| 1156 |
+
which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``.
|
| 1157 |
+
For a real matrix A, shift-invert can either be done in imaginary
|
| 1158 |
+
mode or real mode, specified by the parameter OPpart ('r' or 'i').
|
| 1159 |
+
Note that when sigma is specified, the keyword 'which' (below)
|
| 1160 |
+
refers to the shifted eigenvalues ``w'[i]`` where:
|
| 1161 |
+
|
| 1162 |
+
If A is real and OPpart == 'r' (default),
|
| 1163 |
+
``w'[i] = 1/2 * [1/(w[i]-sigma) + 1/(w[i]-conj(sigma))]``.
|
| 1164 |
+
|
| 1165 |
+
If A is real and OPpart == 'i',
|
| 1166 |
+
``w'[i] = 1/2i * [1/(w[i]-sigma) - 1/(w[i]-conj(sigma))]``.
|
| 1167 |
+
|
| 1168 |
+
If A is complex, ``w'[i] = 1/(w[i]-sigma)``.
|
| 1169 |
+
|
| 1170 |
+
v0 : ndarray, optional
|
| 1171 |
+
Starting vector for iteration.
|
| 1172 |
+
Default: random
|
| 1173 |
+
ncv : int, optional
|
| 1174 |
+
The number of Lanczos vectors generated
|
| 1175 |
+
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
|
| 1176 |
+
Default: ``min(n, max(2*k + 1, 20))``
|
| 1177 |
+
which : str, ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'], optional
|
| 1178 |
+
Which `k` eigenvectors and eigenvalues to find:
|
| 1179 |
+
|
| 1180 |
+
'LM' : largest magnitude
|
| 1181 |
+
|
| 1182 |
+
'SM' : smallest magnitude
|
| 1183 |
+
|
| 1184 |
+
'LR' : largest real part
|
| 1185 |
+
|
| 1186 |
+
'SR' : smallest real part
|
| 1187 |
+
|
| 1188 |
+
'LI' : largest imaginary part
|
| 1189 |
+
|
| 1190 |
+
'SI' : smallest imaginary part
|
| 1191 |
+
|
| 1192 |
+
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
|
| 1193 |
+
(see discussion in 'sigma', above). ARPACK is generally better
|
| 1194 |
+
at finding large values than small values. If small eigenvalues are
|
| 1195 |
+
desired, consider using shift-invert mode for better performance.
|
| 1196 |
+
maxiter : int, optional
|
| 1197 |
+
Maximum number of Arnoldi update iterations allowed
|
| 1198 |
+
Default: ``n*10``
|
| 1199 |
+
tol : float, optional
|
| 1200 |
+
Relative accuracy for eigenvalues (stopping criterion)
|
| 1201 |
+
The default value of 0 implies machine precision.
|
| 1202 |
+
return_eigenvectors : bool, optional
|
| 1203 |
+
Return eigenvectors (True) in addition to eigenvalues
|
| 1204 |
+
Minv : ndarray, sparse matrix or LinearOperator, optional
|
| 1205 |
+
See notes in M, above.
|
| 1206 |
+
OPinv : ndarray, sparse matrix or LinearOperator, optional
|
| 1207 |
+
See notes in sigma, above.
|
| 1208 |
+
OPpart : {'r' or 'i'}, optional
|
| 1209 |
+
See notes in sigma, above
|
| 1210 |
+
|
| 1211 |
+
Returns
|
| 1212 |
+
-------
|
| 1213 |
+
w : ndarray
|
| 1214 |
+
Array of k eigenvalues.
|
| 1215 |
+
v : ndarray
|
| 1216 |
+
An array of `k` eigenvectors.
|
| 1217 |
+
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
|
| 1218 |
+
|
| 1219 |
+
Raises
|
| 1220 |
+
------
|
| 1221 |
+
ArpackNoConvergence
|
| 1222 |
+
When the requested convergence is not obtained.
|
| 1223 |
+
The currently converged eigenvalues and eigenvectors can be found
|
| 1224 |
+
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
|
| 1225 |
+
object.
|
| 1226 |
+
|
| 1227 |
+
See Also
|
| 1228 |
+
--------
|
| 1229 |
+
eigsh : eigenvalues and eigenvectors for symmetric matrix A
|
| 1230 |
+
svds : singular value decomposition for a matrix A
|
| 1231 |
+
|
| 1232 |
+
Notes
|
| 1233 |
+
-----
|
| 1234 |
+
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
|
| 1235 |
+
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
|
| 1236 |
+
find the eigenvalues and eigenvectors [2]_.
|
| 1237 |
+
|
| 1238 |
+
References
|
| 1239 |
+
----------
|
| 1240 |
+
.. [1] ARPACK Software, https://github.com/opencollab/arpack-ng
|
| 1241 |
+
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
|
| 1242 |
+
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
|
| 1243 |
+
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
|
| 1244 |
+
|
| 1245 |
+
Examples
|
| 1246 |
+
--------
|
| 1247 |
+
Find 6 eigenvectors of the identity matrix:
|
| 1248 |
+
|
| 1249 |
+
>>> import numpy as np
|
| 1250 |
+
>>> from scipy.sparse.linalg import eigs
|
| 1251 |
+
>>> id = np.eye(13)
|
| 1252 |
+
>>> vals, vecs = eigs(id, k=6)
|
| 1253 |
+
>>> vals
|
| 1254 |
+
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
|
| 1255 |
+
>>> vecs.shape
|
| 1256 |
+
(13, 6)
|
| 1257 |
+
|
| 1258 |
+
"""
|
| 1259 |
+
if A.shape[0] != A.shape[1]:
|
| 1260 |
+
raise ValueError(f'expected square matrix (shape={A.shape})')
|
| 1261 |
+
if M is not None:
|
| 1262 |
+
if M.shape != A.shape:
|
| 1263 |
+
raise ValueError(f'wrong M dimensions {M.shape}, should be {A.shape}')
|
| 1264 |
+
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
|
| 1265 |
+
warnings.warn('M does not have the same type precision as A. '
|
| 1266 |
+
'This may adversely affect ARPACK convergence',
|
| 1267 |
+
stacklevel=2)
|
| 1268 |
+
|
| 1269 |
+
n = A.shape[0]
|
| 1270 |
+
|
| 1271 |
+
if k <= 0:
|
| 1272 |
+
raise ValueError("k=%d must be greater than 0." % k)
|
| 1273 |
+
|
| 1274 |
+
if k >= n - 1:
|
| 1275 |
+
warnings.warn("k >= N - 1 for N * N square matrix. "
|
| 1276 |
+
"Attempting to use scipy.linalg.eig instead.",
|
| 1277 |
+
RuntimeWarning, stacklevel=2)
|
| 1278 |
+
|
| 1279 |
+
if issparse(A):
|
| 1280 |
+
raise TypeError("Cannot use scipy.linalg.eig for sparse A with "
|
| 1281 |
+
"k >= N - 1. Use scipy.linalg.eig(A.toarray()) or"
|
| 1282 |
+
" reduce k.")
|
| 1283 |
+
if isinstance(A, LinearOperator):
|
| 1284 |
+
raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
|
| 1285 |
+
"A with k >= N - 1.")
|
| 1286 |
+
if isinstance(M, LinearOperator):
|
| 1287 |
+
raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
|
| 1288 |
+
"M with k >= N - 1.")
|
| 1289 |
+
|
| 1290 |
+
return eig(A, b=M, right=return_eigenvectors)
|
| 1291 |
+
|
| 1292 |
+
if sigma is None:
|
| 1293 |
+
matvec = _aslinearoperator_with_dtype(A).matvec
|
| 1294 |
+
|
| 1295 |
+
if OPinv is not None:
|
| 1296 |
+
raise ValueError("OPinv should not be specified "
|
| 1297 |
+
"with sigma = None.")
|
| 1298 |
+
if OPpart is not None:
|
| 1299 |
+
raise ValueError("OPpart should not be specified with "
|
| 1300 |
+
"sigma = None or complex A")
|
| 1301 |
+
|
| 1302 |
+
if M is None:
|
| 1303 |
+
#standard eigenvalue problem
|
| 1304 |
+
mode = 1
|
| 1305 |
+
M_matvec = None
|
| 1306 |
+
Minv_matvec = None
|
| 1307 |
+
if Minv is not None:
|
| 1308 |
+
raise ValueError("Minv should not be "
|
| 1309 |
+
"specified with M = None.")
|
| 1310 |
+
else:
|
| 1311 |
+
#general eigenvalue problem
|
| 1312 |
+
mode = 2
|
| 1313 |
+
if Minv is None:
|
| 1314 |
+
Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol)
|
| 1315 |
+
else:
|
| 1316 |
+
Minv = _aslinearoperator_with_dtype(Minv)
|
| 1317 |
+
Minv_matvec = Minv.matvec
|
| 1318 |
+
M_matvec = _aslinearoperator_with_dtype(M).matvec
|
| 1319 |
+
else:
|
| 1320 |
+
#sigma is not None: shift-invert mode
|
| 1321 |
+
if np.issubdtype(A.dtype, np.complexfloating):
|
| 1322 |
+
if OPpart is not None:
|
| 1323 |
+
raise ValueError("OPpart should not be specified "
|
| 1324 |
+
"with sigma=None or complex A")
|
| 1325 |
+
mode = 3
|
| 1326 |
+
elif OPpart is None or OPpart.lower() == 'r':
|
| 1327 |
+
mode = 3
|
| 1328 |
+
elif OPpart.lower() == 'i':
|
| 1329 |
+
if np.imag(sigma) == 0:
|
| 1330 |
+
raise ValueError("OPpart cannot be 'i' if sigma is real")
|
| 1331 |
+
mode = 4
|
| 1332 |
+
else:
|
| 1333 |
+
raise ValueError("OPpart must be one of ('r','i')")
|
| 1334 |
+
|
| 1335 |
+
matvec = _aslinearoperator_with_dtype(A).matvec
|
| 1336 |
+
if Minv is not None:
|
| 1337 |
+
raise ValueError("Minv should not be specified when sigma is")
|
| 1338 |
+
if OPinv is None:
|
| 1339 |
+
Minv_matvec = get_OPinv_matvec(A, M, sigma,
|
| 1340 |
+
hermitian=False, tol=tol)
|
| 1341 |
+
else:
|
| 1342 |
+
OPinv = _aslinearoperator_with_dtype(OPinv)
|
| 1343 |
+
Minv_matvec = OPinv.matvec
|
| 1344 |
+
if M is None:
|
| 1345 |
+
M_matvec = None
|
| 1346 |
+
else:
|
| 1347 |
+
M_matvec = _aslinearoperator_with_dtype(M).matvec
|
| 1348 |
+
|
| 1349 |
+
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
|
| 1350 |
+
M_matvec, Minv_matvec, sigma,
|
| 1351 |
+
ncv, v0, maxiter, which, tol)
|
| 1352 |
+
|
| 1353 |
+
with _ARPACK_LOCK:
|
| 1354 |
+
while not params.converged:
|
| 1355 |
+
params.iterate()
|
| 1356 |
+
|
| 1357 |
+
return params.extract(return_eigenvectors)
|
| 1358 |
+
|
| 1359 |
+
|
| 1360 |
+
def eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None,
|
| 1361 |
+
ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
|
| 1362 |
+
Minv=None, OPinv=None, mode='normal'):
|
| 1363 |
+
"""
|
| 1364 |
+
Find k eigenvalues and eigenvectors of the real symmetric square matrix
|
| 1365 |
+
or complex Hermitian matrix A.
|
| 1366 |
+
|
| 1367 |
+
Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem for
|
| 1368 |
+
w[i] eigenvalues with corresponding eigenvectors x[i].
|
| 1369 |
+
|
| 1370 |
+
If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the
|
| 1371 |
+
generalized eigenvalue problem for w[i] eigenvalues
|
| 1372 |
+
with corresponding eigenvectors x[i].
|
| 1373 |
+
|
| 1374 |
+
Note that there is no specialized routine for the case when A is a complex
|
| 1375 |
+
Hermitian matrix. In this case, ``eigsh()`` will call ``eigs()`` and return the
|
| 1376 |
+
real parts of the eigenvalues thus obtained.
|
| 1377 |
+
|
| 1378 |
+
Parameters
|
| 1379 |
+
----------
|
| 1380 |
+
A : ndarray, sparse matrix or LinearOperator
|
| 1381 |
+
A square operator representing the operation ``A @ x``, where ``A`` is
|
| 1382 |
+
real symmetric or complex Hermitian. For buckling mode (see below)
|
| 1383 |
+
``A`` must additionally be positive-definite.
|
| 1384 |
+
k : int, optional
|
| 1385 |
+
The number of eigenvalues and eigenvectors desired.
|
| 1386 |
+
`k` must be smaller than N. It is not possible to compute all
|
| 1387 |
+
eigenvectors of a matrix.
|
| 1388 |
+
|
| 1389 |
+
Returns
|
| 1390 |
+
-------
|
| 1391 |
+
w : array
|
| 1392 |
+
Array of k eigenvalues.
|
| 1393 |
+
v : array
|
| 1394 |
+
An array representing the `k` eigenvectors. The column ``v[:, i]`` is
|
| 1395 |
+
the eigenvector corresponding to the eigenvalue ``w[i]``.
|
| 1396 |
+
|
| 1397 |
+
Other Parameters
|
| 1398 |
+
----------------
|
| 1399 |
+
M : An N x N matrix, array, sparse matrix, or linear operator representing
|
| 1400 |
+
the operation ``M @ x`` for the generalized eigenvalue problem
|
| 1401 |
+
|
| 1402 |
+
A @ x = w * M @ x.
|
| 1403 |
+
|
| 1404 |
+
M must represent a real symmetric matrix if A is real, and must
|
| 1405 |
+
represent a complex Hermitian matrix if A is complex. For best
|
| 1406 |
+
results, the data type of M should be the same as that of A.
|
| 1407 |
+
Additionally:
|
| 1408 |
+
|
| 1409 |
+
If sigma is None, M is symmetric positive definite.
|
| 1410 |
+
|
| 1411 |
+
If sigma is specified, M is symmetric positive semi-definite.
|
| 1412 |
+
|
| 1413 |
+
In buckling mode, M is symmetric indefinite.
|
| 1414 |
+
|
| 1415 |
+
If sigma is None, eigsh requires an operator to compute the solution
|
| 1416 |
+
of the linear equation ``M @ x = b``. This is done internally via a
|
| 1417 |
+
(sparse) LU decomposition for an explicit matrix M, or via an
|
| 1418 |
+
iterative solver for a general linear operator. Alternatively,
|
| 1419 |
+
the user can supply the matrix or operator Minv, which gives
|
| 1420 |
+
``x = Minv @ b = M^-1 @ b``.
|
| 1421 |
+
sigma : real
|
| 1422 |
+
Find eigenvalues near sigma using shift-invert mode. This requires
|
| 1423 |
+
an operator to compute the solution of the linear system
|
| 1424 |
+
``[A - sigma * M] x = b``, where M is the identity matrix if
|
| 1425 |
+
unspecified. This is computed internally via a (sparse) LU
|
| 1426 |
+
decomposition for explicit matrices A & M, or via an iterative
|
| 1427 |
+
solver if either A or M is a general linear operator.
|
| 1428 |
+
Alternatively, the user can supply the matrix or operator OPinv,
|
| 1429 |
+
which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``.
|
| 1430 |
+
Note that when sigma is specified, the keyword 'which' refers to
|
| 1431 |
+
the shifted eigenvalues ``w'[i]`` where:
|
| 1432 |
+
|
| 1433 |
+
if mode == 'normal', ``w'[i] = 1 / (w[i] - sigma)``.
|
| 1434 |
+
|
| 1435 |
+
if mode == 'cayley', ``w'[i] = (w[i] + sigma) / (w[i] - sigma)``.
|
| 1436 |
+
|
| 1437 |
+
if mode == 'buckling', ``w'[i] = w[i] / (w[i] - sigma)``.
|
| 1438 |
+
|
| 1439 |
+
(see further discussion in 'mode' below)
|
| 1440 |
+
v0 : ndarray, optional
|
| 1441 |
+
Starting vector for iteration.
|
| 1442 |
+
Default: random
|
| 1443 |
+
ncv : int, optional
|
| 1444 |
+
The number of Lanczos vectors generated ncv must be greater than k and
|
| 1445 |
+
smaller than n; it is recommended that ``ncv > 2*k``.
|
| 1446 |
+
Default: ``min(n, max(2*k + 1, 20))``
|
| 1447 |
+
which : str ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
|
| 1448 |
+
If A is a complex Hermitian matrix, 'BE' is invalid.
|
| 1449 |
+
Which `k` eigenvectors and eigenvalues to find:
|
| 1450 |
+
|
| 1451 |
+
'LM' : Largest (in magnitude) eigenvalues.
|
| 1452 |
+
|
| 1453 |
+
'SM' : Smallest (in magnitude) eigenvalues.
|
| 1454 |
+
|
| 1455 |
+
'LA' : Largest (algebraic) eigenvalues.
|
| 1456 |
+
|
| 1457 |
+
'SA' : Smallest (algebraic) eigenvalues.
|
| 1458 |
+
|
| 1459 |
+
'BE' : Half (k/2) from each end of the spectrum.
|
| 1460 |
+
|
| 1461 |
+
When k is odd, return one more (k/2+1) from the high end.
|
| 1462 |
+
When sigma != None, 'which' refers to the shifted eigenvalues ``w'[i]``
|
| 1463 |
+
(see discussion in 'sigma', above). ARPACK is generally better
|
| 1464 |
+
at finding large values than small values. If small eigenvalues are
|
| 1465 |
+
desired, consider using shift-invert mode for better performance.
|
| 1466 |
+
maxiter : int, optional
|
| 1467 |
+
Maximum number of Arnoldi update iterations allowed.
|
| 1468 |
+
Default: ``n*10``
|
| 1469 |
+
tol : float
|
| 1470 |
+
Relative accuracy for eigenvalues (stopping criterion).
|
| 1471 |
+
The default value of 0 implies machine precision.
|
| 1472 |
+
Minv : N x N matrix, array, sparse matrix, or LinearOperator
|
| 1473 |
+
See notes in M, above.
|
| 1474 |
+
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
|
| 1475 |
+
See notes in sigma, above.
|
| 1476 |
+
return_eigenvectors : bool
|
| 1477 |
+
Return eigenvectors (True) in addition to eigenvalues.
|
| 1478 |
+
This value determines the order in which eigenvalues are sorted.
|
| 1479 |
+
The sort order is also dependent on the `which` variable.
|
| 1480 |
+
|
| 1481 |
+
For which = 'LM' or 'SA':
|
| 1482 |
+
If `return_eigenvectors` is True, eigenvalues are sorted by
|
| 1483 |
+
algebraic value.
|
| 1484 |
+
|
| 1485 |
+
If `return_eigenvectors` is False, eigenvalues are sorted by
|
| 1486 |
+
absolute value.
|
| 1487 |
+
|
| 1488 |
+
For which = 'BE' or 'LA':
|
| 1489 |
+
eigenvalues are always sorted by algebraic value.
|
| 1490 |
+
|
| 1491 |
+
For which = 'SM':
|
| 1492 |
+
If `return_eigenvectors` is True, eigenvalues are sorted by
|
| 1493 |
+
algebraic value.
|
| 1494 |
+
|
| 1495 |
+
If `return_eigenvectors` is False, eigenvalues are sorted by
|
| 1496 |
+
decreasing absolute value.
|
| 1497 |
+
|
| 1498 |
+
mode : string ['normal' | 'buckling' | 'cayley']
|
| 1499 |
+
Specify strategy to use for shift-invert mode. This argument applies
|
| 1500 |
+
only for real-valued A and sigma != None. For shift-invert mode,
|
| 1501 |
+
ARPACK internally solves the eigenvalue problem
|
| 1502 |
+
``OP @ x'[i] = w'[i] * B @ x'[i]``
|
| 1503 |
+
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
|
| 1504 |
+
into the desired eigenvectors and eigenvalues of the problem
|
| 1505 |
+
``A @ x[i] = w[i] * M @ x[i]``.
|
| 1506 |
+
The modes are as follows:
|
| 1507 |
+
|
| 1508 |
+
'normal' :
|
| 1509 |
+
OP = [A - sigma * M]^-1 @ M,
|
| 1510 |
+
B = M,
|
| 1511 |
+
w'[i] = 1 / (w[i] - sigma)
|
| 1512 |
+
|
| 1513 |
+
'buckling' :
|
| 1514 |
+
OP = [A - sigma * M]^-1 @ A,
|
| 1515 |
+
B = A,
|
| 1516 |
+
w'[i] = w[i] / (w[i] - sigma)
|
| 1517 |
+
|
| 1518 |
+
'cayley' :
|
| 1519 |
+
OP = [A - sigma * M]^-1 @ [A + sigma * M],
|
| 1520 |
+
B = M,
|
| 1521 |
+
w'[i] = (w[i] + sigma) / (w[i] - sigma)
|
| 1522 |
+
|
| 1523 |
+
The choice of mode will affect which eigenvalues are selected by
|
| 1524 |
+
the keyword 'which', and can also impact the stability of
|
| 1525 |
+
convergence (see [2] for a discussion).
|
| 1526 |
+
|
| 1527 |
+
Raises
|
| 1528 |
+
------
|
| 1529 |
+
ArpackNoConvergence
|
| 1530 |
+
When the requested convergence is not obtained.
|
| 1531 |
+
|
| 1532 |
+
The currently converged eigenvalues and eigenvectors can be found
|
| 1533 |
+
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
|
| 1534 |
+
object.
|
| 1535 |
+
|
| 1536 |
+
See Also
|
| 1537 |
+
--------
|
| 1538 |
+
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
|
| 1539 |
+
svds : singular value decomposition for a matrix A
|
| 1540 |
+
|
| 1541 |
+
Notes
|
| 1542 |
+
-----
|
| 1543 |
+
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
|
| 1544 |
+
functions which use the Implicitly Restarted Lanczos Method to
|
| 1545 |
+
find the eigenvalues and eigenvectors [2]_.
|
| 1546 |
+
|
| 1547 |
+
References
|
| 1548 |
+
----------
|
| 1549 |
+
.. [1] ARPACK Software, https://github.com/opencollab/arpack-ng
|
| 1550 |
+
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
|
| 1551 |
+
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
|
| 1552 |
+
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
|
| 1553 |
+
|
| 1554 |
+
Examples
|
| 1555 |
+
--------
|
| 1556 |
+
>>> import numpy as np
|
| 1557 |
+
>>> from scipy.sparse.linalg import eigsh
|
| 1558 |
+
>>> identity = np.eye(13)
|
| 1559 |
+
>>> eigenvalues, eigenvectors = eigsh(identity, k=6)
|
| 1560 |
+
>>> eigenvalues
|
| 1561 |
+
array([1., 1., 1., 1., 1., 1.])
|
| 1562 |
+
>>> eigenvectors.shape
|
| 1563 |
+
(13, 6)
|
| 1564 |
+
|
| 1565 |
+
"""
|
| 1566 |
+
# complex Hermitian matrices should be solved with eigs
|
| 1567 |
+
if np.issubdtype(A.dtype, np.complexfloating):
|
| 1568 |
+
if mode != 'normal':
|
| 1569 |
+
raise ValueError("mode=%s cannot be used with "
|
| 1570 |
+
"complex matrix A" % mode)
|
| 1571 |
+
if which == 'BE':
|
| 1572 |
+
raise ValueError("which='BE' cannot be used with complex matrix A")
|
| 1573 |
+
elif which == 'LA':
|
| 1574 |
+
which = 'LR'
|
| 1575 |
+
elif which == 'SA':
|
| 1576 |
+
which = 'SR'
|
| 1577 |
+
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
|
| 1578 |
+
ncv=ncv, maxiter=maxiter, tol=tol,
|
| 1579 |
+
return_eigenvectors=return_eigenvectors, Minv=Minv,
|
| 1580 |
+
OPinv=OPinv)
|
| 1581 |
+
|
| 1582 |
+
if return_eigenvectors:
|
| 1583 |
+
return ret[0].real, ret[1]
|
| 1584 |
+
else:
|
| 1585 |
+
return ret.real
|
| 1586 |
+
|
| 1587 |
+
if A.shape[0] != A.shape[1]:
|
| 1588 |
+
raise ValueError(f'expected square matrix (shape={A.shape})')
|
| 1589 |
+
if M is not None:
|
| 1590 |
+
if M.shape != A.shape:
|
| 1591 |
+
raise ValueError(f'wrong M dimensions {M.shape}, should be {A.shape}')
|
| 1592 |
+
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
|
| 1593 |
+
warnings.warn('M does not have the same type precision as A. '
|
| 1594 |
+
'This may adversely affect ARPACK convergence',
|
| 1595 |
+
stacklevel=2)
|
| 1596 |
+
|
| 1597 |
+
n = A.shape[0]
|
| 1598 |
+
|
| 1599 |
+
if k <= 0:
|
| 1600 |
+
raise ValueError("k must be greater than 0.")
|
| 1601 |
+
|
| 1602 |
+
if k >= n:
|
| 1603 |
+
warnings.warn("k >= N for N * N square matrix. "
|
| 1604 |
+
"Attempting to use scipy.linalg.eigh instead.",
|
| 1605 |
+
RuntimeWarning, stacklevel=2)
|
| 1606 |
+
|
| 1607 |
+
if issparse(A):
|
| 1608 |
+
raise TypeError("Cannot use scipy.linalg.eigh for sparse A with "
|
| 1609 |
+
"k >= N. Use scipy.linalg.eigh(A.toarray()) or"
|
| 1610 |
+
" reduce k.")
|
| 1611 |
+
if isinstance(A, LinearOperator):
|
| 1612 |
+
raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
|
| 1613 |
+
"A with k >= N.")
|
| 1614 |
+
if isinstance(M, LinearOperator):
|
| 1615 |
+
raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
|
| 1616 |
+
"M with k >= N.")
|
| 1617 |
+
|
| 1618 |
+
return eigh(A, b=M, eigvals_only=not return_eigenvectors)
|
| 1619 |
+
|
| 1620 |
+
if sigma is None:
|
| 1621 |
+
A = _aslinearoperator_with_dtype(A)
|
| 1622 |
+
matvec = A.matvec
|
| 1623 |
+
|
| 1624 |
+
if OPinv is not None:
|
| 1625 |
+
raise ValueError("OPinv should not be specified "
|
| 1626 |
+
"with sigma = None.")
|
| 1627 |
+
if M is None:
|
| 1628 |
+
#standard eigenvalue problem
|
| 1629 |
+
mode = 1
|
| 1630 |
+
M_matvec = None
|
| 1631 |
+
Minv_matvec = None
|
| 1632 |
+
if Minv is not None:
|
| 1633 |
+
raise ValueError("Minv should not be "
|
| 1634 |
+
"specified with M = None.")
|
| 1635 |
+
else:
|
| 1636 |
+
#general eigenvalue problem
|
| 1637 |
+
mode = 2
|
| 1638 |
+
if Minv is None:
|
| 1639 |
+
Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol)
|
| 1640 |
+
else:
|
| 1641 |
+
Minv = _aslinearoperator_with_dtype(Minv)
|
| 1642 |
+
Minv_matvec = Minv.matvec
|
| 1643 |
+
M_matvec = _aslinearoperator_with_dtype(M).matvec
|
| 1644 |
+
else:
|
| 1645 |
+
# sigma is not None: shift-invert mode
|
| 1646 |
+
if Minv is not None:
|
| 1647 |
+
raise ValueError("Minv should not be specified when sigma is")
|
| 1648 |
+
|
| 1649 |
+
# normal mode
|
| 1650 |
+
if mode == 'normal':
|
| 1651 |
+
mode = 3
|
| 1652 |
+
matvec = None
|
| 1653 |
+
if OPinv is None:
|
| 1654 |
+
Minv_matvec = get_OPinv_matvec(A, M, sigma,
|
| 1655 |
+
hermitian=True, tol=tol)
|
| 1656 |
+
else:
|
| 1657 |
+
OPinv = _aslinearoperator_with_dtype(OPinv)
|
| 1658 |
+
Minv_matvec = OPinv.matvec
|
| 1659 |
+
if M is None:
|
| 1660 |
+
M_matvec = None
|
| 1661 |
+
else:
|
| 1662 |
+
M = _aslinearoperator_with_dtype(M)
|
| 1663 |
+
M_matvec = M.matvec
|
| 1664 |
+
|
| 1665 |
+
# buckling mode
|
| 1666 |
+
elif mode == 'buckling':
|
| 1667 |
+
mode = 4
|
| 1668 |
+
if OPinv is None:
|
| 1669 |
+
Minv_matvec = get_OPinv_matvec(A, M, sigma,
|
| 1670 |
+
hermitian=True, tol=tol)
|
| 1671 |
+
else:
|
| 1672 |
+
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
|
| 1673 |
+
matvec = _aslinearoperator_with_dtype(A).matvec
|
| 1674 |
+
M_matvec = None
|
| 1675 |
+
|
| 1676 |
+
# cayley-transform mode
|
| 1677 |
+
elif mode == 'cayley':
|
| 1678 |
+
mode = 5
|
| 1679 |
+
matvec = _aslinearoperator_with_dtype(A).matvec
|
| 1680 |
+
if OPinv is None:
|
| 1681 |
+
Minv_matvec = get_OPinv_matvec(A, M, sigma,
|
| 1682 |
+
hermitian=True, tol=tol)
|
| 1683 |
+
else:
|
| 1684 |
+
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
|
| 1685 |
+
if M is None:
|
| 1686 |
+
M_matvec = None
|
| 1687 |
+
else:
|
| 1688 |
+
M_matvec = _aslinearoperator_with_dtype(M).matvec
|
| 1689 |
+
|
| 1690 |
+
# unrecognized mode
|
| 1691 |
+
else:
|
| 1692 |
+
raise ValueError("unrecognized mode '%s'" % mode)
|
| 1693 |
+
|
| 1694 |
+
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
|
| 1695 |
+
M_matvec, Minv_matvec, sigma,
|
| 1696 |
+
ncv, v0, maxiter, which, tol)
|
| 1697 |
+
|
| 1698 |
+
with _ARPACK_LOCK:
|
| 1699 |
+
while not params.converged:
|
| 1700 |
+
params.iterate()
|
| 1701 |
+
|
| 1702 |
+
return params.extract(return_eigenvectors)
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__init__.py
ADDED
|
File without changes
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py
ADDED
|
@@ -0,0 +1,718 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__usage__ = """
|
| 2 |
+
To run tests locally:
|
| 3 |
+
python tests/test_arpack.py [-l<int>] [-v<int>]
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import threading
|
| 8 |
+
import itertools
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
|
| 13 |
+
from pytest import raises as assert_raises
|
| 14 |
+
import pytest
|
| 15 |
+
|
| 16 |
+
from numpy import dot, conj, random
|
| 17 |
+
from scipy.linalg import eig, eigh
|
| 18 |
+
from scipy.sparse import csc_matrix, csr_matrix, diags, rand
|
| 19 |
+
from scipy.sparse.linalg import LinearOperator, aslinearoperator
|
| 20 |
+
from scipy.sparse.linalg._eigen.arpack import (eigs, eigsh, arpack,
|
| 21 |
+
ArpackNoConvergence)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
from scipy._lib._gcutils import assert_deallocated, IS_PYPY
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# precision for tests
|
| 28 |
+
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _get_test_tolerance(type_char, mattype=None, D_type=None, which=None):
|
| 32 |
+
"""
|
| 33 |
+
Return tolerance values suitable for a given test:
|
| 34 |
+
|
| 35 |
+
Parameters
|
| 36 |
+
----------
|
| 37 |
+
type_char : {'f', 'd', 'F', 'D'}
|
| 38 |
+
Data type in ARPACK eigenvalue problem
|
| 39 |
+
mattype : {csr_matrix, aslinearoperator, asarray}, optional
|
| 40 |
+
Linear operator type
|
| 41 |
+
|
| 42 |
+
Returns
|
| 43 |
+
-------
|
| 44 |
+
tol
|
| 45 |
+
Tolerance to pass to the ARPACK routine
|
| 46 |
+
rtol
|
| 47 |
+
Relative tolerance for outputs
|
| 48 |
+
atol
|
| 49 |
+
Absolute tolerance for outputs
|
| 50 |
+
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
rtol = {'f': 3000 * np.finfo(np.float32).eps,
|
| 54 |
+
'F': 3000 * np.finfo(np.float32).eps,
|
| 55 |
+
'd': 2000 * np.finfo(np.float64).eps,
|
| 56 |
+
'D': 2000 * np.finfo(np.float64).eps}[type_char]
|
| 57 |
+
atol = rtol
|
| 58 |
+
tol = 0
|
| 59 |
+
|
| 60 |
+
if mattype is aslinearoperator and type_char in ('f', 'F'):
|
| 61 |
+
# iterative methods in single precision: worse errors
|
| 62 |
+
# also: bump ARPACK tolerance so that the iterative method converges
|
| 63 |
+
tol = 30 * np.finfo(np.float32).eps
|
| 64 |
+
rtol *= 5
|
| 65 |
+
|
| 66 |
+
if mattype is csr_matrix and type_char in ('f', 'F'):
|
| 67 |
+
# sparse in single precision: worse errors
|
| 68 |
+
rtol *= 5
|
| 69 |
+
|
| 70 |
+
if (
|
| 71 |
+
which in ('LM', 'SM', 'LA')
|
| 72 |
+
and D_type.name == "gen-hermitian-Mc"
|
| 73 |
+
):
|
| 74 |
+
if type_char == 'F':
|
| 75 |
+
# missing case 1, 2, and more, from PR 14798
|
| 76 |
+
rtol *= 5
|
| 77 |
+
|
| 78 |
+
if type_char == 'D':
|
| 79 |
+
# missing more cases, from PR 14798
|
| 80 |
+
rtol *= 10
|
| 81 |
+
atol *= 10
|
| 82 |
+
|
| 83 |
+
return tol, rtol, atol
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def generate_matrix(N, complex_=False, hermitian=False,
|
| 87 |
+
pos_definite=False, sparse=False):
|
| 88 |
+
M = np.random.random((N, N))
|
| 89 |
+
if complex_:
|
| 90 |
+
M = M + 1j * np.random.random((N, N))
|
| 91 |
+
|
| 92 |
+
if hermitian:
|
| 93 |
+
if pos_definite:
|
| 94 |
+
if sparse:
|
| 95 |
+
i = np.arange(N)
|
| 96 |
+
j = np.random.randint(N, size=N-2)
|
| 97 |
+
i, j = np.meshgrid(i, j)
|
| 98 |
+
M[i, j] = 0
|
| 99 |
+
M = np.dot(M.conj(), M.T)
|
| 100 |
+
else:
|
| 101 |
+
M = np.dot(M.conj(), M.T)
|
| 102 |
+
if sparse:
|
| 103 |
+
i = np.random.randint(N, size=N * N // 4)
|
| 104 |
+
j = np.random.randint(N, size=N * N // 4)
|
| 105 |
+
ind = np.nonzero(i == j)
|
| 106 |
+
j[ind] = (j[ind] + 1) % N
|
| 107 |
+
M[i, j] = 0
|
| 108 |
+
M[j, i] = 0
|
| 109 |
+
else:
|
| 110 |
+
if sparse:
|
| 111 |
+
i = np.random.randint(N, size=N * N // 2)
|
| 112 |
+
j = np.random.randint(N, size=N * N // 2)
|
| 113 |
+
M[i, j] = 0
|
| 114 |
+
return M
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def generate_matrix_symmetric(N, pos_definite=False, sparse=False):
|
| 118 |
+
M = np.random.random((N, N))
|
| 119 |
+
|
| 120 |
+
M = 0.5 * (M + M.T) # Make M symmetric
|
| 121 |
+
|
| 122 |
+
if pos_definite:
|
| 123 |
+
Id = N * np.eye(N)
|
| 124 |
+
if sparse:
|
| 125 |
+
M = csr_matrix(M)
|
| 126 |
+
M += Id
|
| 127 |
+
else:
|
| 128 |
+
if sparse:
|
| 129 |
+
M = csr_matrix(M)
|
| 130 |
+
|
| 131 |
+
return M
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def assert_allclose_cc(actual, desired, **kw):
|
| 135 |
+
"""Almost equal or complex conjugates almost equal"""
|
| 136 |
+
try:
|
| 137 |
+
assert_allclose(actual, desired, **kw)
|
| 138 |
+
except AssertionError:
|
| 139 |
+
assert_allclose(actual, conj(desired), **kw)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def argsort_which(eigenvalues, typ, k, which,
|
| 143 |
+
sigma=None, OPpart=None, mode=None):
|
| 144 |
+
"""Return sorted indices of eigenvalues using the "which" keyword
|
| 145 |
+
from eigs and eigsh"""
|
| 146 |
+
if sigma is None:
|
| 147 |
+
reval = np.round(eigenvalues, decimals=_ndigits[typ])
|
| 148 |
+
else:
|
| 149 |
+
if mode is None or mode == 'normal':
|
| 150 |
+
if OPpart is None:
|
| 151 |
+
reval = 1. / (eigenvalues - sigma)
|
| 152 |
+
elif OPpart == 'r':
|
| 153 |
+
reval = 0.5 * (1. / (eigenvalues - sigma)
|
| 154 |
+
+ 1. / (eigenvalues - np.conj(sigma)))
|
| 155 |
+
elif OPpart == 'i':
|
| 156 |
+
reval = -0.5j * (1. / (eigenvalues - sigma)
|
| 157 |
+
- 1. / (eigenvalues - np.conj(sigma)))
|
| 158 |
+
elif mode == 'cayley':
|
| 159 |
+
reval = (eigenvalues + sigma) / (eigenvalues - sigma)
|
| 160 |
+
elif mode == 'buckling':
|
| 161 |
+
reval = eigenvalues / (eigenvalues - sigma)
|
| 162 |
+
else:
|
| 163 |
+
raise ValueError("mode='%s' not recognized" % mode)
|
| 164 |
+
|
| 165 |
+
reval = np.round(reval, decimals=_ndigits[typ])
|
| 166 |
+
|
| 167 |
+
if which in ['LM', 'SM']:
|
| 168 |
+
ind = np.argsort(abs(reval))
|
| 169 |
+
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
|
| 170 |
+
ind = np.argsort(np.real(reval))
|
| 171 |
+
elif which in ['LI', 'SI']:
|
| 172 |
+
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
|
| 173 |
+
if typ.islower():
|
| 174 |
+
ind = np.argsort(abs(np.imag(reval)))
|
| 175 |
+
else:
|
| 176 |
+
ind = np.argsort(np.imag(reval))
|
| 177 |
+
else:
|
| 178 |
+
raise ValueError("which='%s' is unrecognized" % which)
|
| 179 |
+
|
| 180 |
+
if which in ['LM', 'LA', 'LR', 'LI']:
|
| 181 |
+
return ind[-k:]
|
| 182 |
+
elif which in ['SM', 'SA', 'SR', 'SI']:
|
| 183 |
+
return ind[:k]
|
| 184 |
+
elif which == 'BE':
|
| 185 |
+
return np.concatenate((ind[:k//2], ind[k//2-k:]))
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
|
| 189 |
+
mattype=np.asarray, OPpart=None, mode='normal'):
|
| 190 |
+
general = ('bmat' in d)
|
| 191 |
+
|
| 192 |
+
if symmetric:
|
| 193 |
+
eigs_func = eigsh
|
| 194 |
+
else:
|
| 195 |
+
eigs_func = eigs
|
| 196 |
+
|
| 197 |
+
if general:
|
| 198 |
+
err = ("error for {}:general, typ={}, which={}, sigma={}, "
|
| 199 |
+
"mattype={}, OPpart={}, mode={}".format(eigs_func.__name__,
|
| 200 |
+
typ, which, sigma,
|
| 201 |
+
mattype.__name__,
|
| 202 |
+
OPpart, mode))
|
| 203 |
+
else:
|
| 204 |
+
err = ("error for {}:standard, typ={}, which={}, sigma={}, "
|
| 205 |
+
"mattype={}, OPpart={}, mode={}".format(eigs_func.__name__,
|
| 206 |
+
typ, which, sigma,
|
| 207 |
+
mattype.__name__,
|
| 208 |
+
OPpart, mode))
|
| 209 |
+
|
| 210 |
+
a = d['mat'].astype(typ)
|
| 211 |
+
ac = mattype(a)
|
| 212 |
+
|
| 213 |
+
if general:
|
| 214 |
+
b = d['bmat'].astype(typ)
|
| 215 |
+
bc = mattype(b)
|
| 216 |
+
|
| 217 |
+
# get exact eigenvalues
|
| 218 |
+
exact_eval = d['eval'].astype(typ.upper())
|
| 219 |
+
ind = argsort_which(exact_eval, typ, k, which,
|
| 220 |
+
sigma, OPpart, mode)
|
| 221 |
+
exact_eval = exact_eval[ind]
|
| 222 |
+
|
| 223 |
+
# compute arpack eigenvalues
|
| 224 |
+
kwargs = dict(which=which, v0=v0, sigma=sigma)
|
| 225 |
+
if eigs_func is eigsh:
|
| 226 |
+
kwargs['mode'] = mode
|
| 227 |
+
else:
|
| 228 |
+
kwargs['OPpart'] = OPpart
|
| 229 |
+
|
| 230 |
+
# compute suitable tolerances
|
| 231 |
+
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype, d, which)
|
| 232 |
+
# on rare occasions, ARPACK routines return results that are proper
|
| 233 |
+
# eigenvalues and -vectors, but not necessarily the ones requested in
|
| 234 |
+
# the parameter which. This is inherent to the Krylov methods, and
|
| 235 |
+
# should not be treated as a failure. If such a rare situation
|
| 236 |
+
# occurs, the calculation is tried again (but at most a few times).
|
| 237 |
+
ntries = 0
|
| 238 |
+
while ntries < 5:
|
| 239 |
+
# solve
|
| 240 |
+
if general:
|
| 241 |
+
try:
|
| 242 |
+
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
|
| 243 |
+
except ArpackNoConvergence:
|
| 244 |
+
kwargs['maxiter'] = 20*a.shape[0]
|
| 245 |
+
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
|
| 246 |
+
else:
|
| 247 |
+
try:
|
| 248 |
+
eigenvalues, evec = eigs_func(ac, k, **kwargs)
|
| 249 |
+
except ArpackNoConvergence:
|
| 250 |
+
kwargs['maxiter'] = 20*a.shape[0]
|
| 251 |
+
eigenvalues, evec = eigs_func(ac, k, **kwargs)
|
| 252 |
+
|
| 253 |
+
ind = argsort_which(eigenvalues, typ, k, which,
|
| 254 |
+
sigma, OPpart, mode)
|
| 255 |
+
eigenvalues = eigenvalues[ind]
|
| 256 |
+
evec = evec[:, ind]
|
| 257 |
+
|
| 258 |
+
try:
|
| 259 |
+
# check eigenvalues
|
| 260 |
+
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol,
|
| 261 |
+
err_msg=err)
|
| 262 |
+
check_evecs = True
|
| 263 |
+
except AssertionError:
|
| 264 |
+
check_evecs = False
|
| 265 |
+
ntries += 1
|
| 266 |
+
|
| 267 |
+
if check_evecs:
|
| 268 |
+
# check eigenvectors
|
| 269 |
+
LHS = np.dot(a, evec)
|
| 270 |
+
if general:
|
| 271 |
+
RHS = eigenvalues * np.dot(b, evec)
|
| 272 |
+
else:
|
| 273 |
+
RHS = eigenvalues * evec
|
| 274 |
+
|
| 275 |
+
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
|
| 276 |
+
break
|
| 277 |
+
|
| 278 |
+
# check eigenvalues
|
| 279 |
+
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol, err_msg=err)
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
class DictWithRepr(dict):
|
| 283 |
+
def __init__(self, name):
|
| 284 |
+
self.name = name
|
| 285 |
+
|
| 286 |
+
def __repr__(self):
|
| 287 |
+
return "<%s>" % self.name
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
class SymmetricParams:
|
| 291 |
+
def __init__(self):
|
| 292 |
+
self.eigs = eigsh
|
| 293 |
+
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
|
| 294 |
+
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
|
| 295 |
+
self.sigmas_modes = {None: ['normal'],
|
| 296 |
+
0.5: ['normal', 'buckling', 'cayley']}
|
| 297 |
+
|
| 298 |
+
# generate matrices
|
| 299 |
+
# these should all be float32 so that the eigenvalues
|
| 300 |
+
# are the same in float32 and float64
|
| 301 |
+
N = 6
|
| 302 |
+
np.random.seed(2300)
|
| 303 |
+
Ar = generate_matrix(N, hermitian=True,
|
| 304 |
+
pos_definite=True).astype('f').astype('d')
|
| 305 |
+
M = generate_matrix(N, hermitian=True,
|
| 306 |
+
pos_definite=True).astype('f').astype('d')
|
| 307 |
+
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
|
| 308 |
+
complex_=True).astype('F').astype('D')
|
| 309 |
+
Mc = generate_matrix(N, hermitian=True, pos_definite=True,
|
| 310 |
+
complex_=True).astype('F').astype('D')
|
| 311 |
+
v0 = np.random.random(N)
|
| 312 |
+
|
| 313 |
+
# standard symmetric problem
|
| 314 |
+
SS = DictWithRepr("std-symmetric")
|
| 315 |
+
SS['mat'] = Ar
|
| 316 |
+
SS['v0'] = v0
|
| 317 |
+
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
|
| 318 |
+
|
| 319 |
+
# general symmetric problem
|
| 320 |
+
GS = DictWithRepr("gen-symmetric")
|
| 321 |
+
GS['mat'] = Ar
|
| 322 |
+
GS['bmat'] = M
|
| 323 |
+
GS['v0'] = v0
|
| 324 |
+
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
|
| 325 |
+
|
| 326 |
+
# standard hermitian problem
|
| 327 |
+
SH = DictWithRepr("std-hermitian")
|
| 328 |
+
SH['mat'] = Ac
|
| 329 |
+
SH['v0'] = v0
|
| 330 |
+
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
|
| 331 |
+
|
| 332 |
+
# general hermitian problem
|
| 333 |
+
GH = DictWithRepr("gen-hermitian")
|
| 334 |
+
GH['mat'] = Ac
|
| 335 |
+
GH['bmat'] = M
|
| 336 |
+
GH['v0'] = v0
|
| 337 |
+
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
|
| 338 |
+
|
| 339 |
+
# general hermitian problem with hermitian M
|
| 340 |
+
GHc = DictWithRepr("gen-hermitian-Mc")
|
| 341 |
+
GHc['mat'] = Ac
|
| 342 |
+
GHc['bmat'] = Mc
|
| 343 |
+
GHc['v0'] = v0
|
| 344 |
+
GHc['eval'] = eigh(GHc['mat'], GHc['bmat'], eigvals_only=True)
|
| 345 |
+
|
| 346 |
+
self.real_test_cases = [SS, GS]
|
| 347 |
+
self.complex_test_cases = [SH, GH, GHc]
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
class NonSymmetricParams:
|
| 351 |
+
def __init__(self):
|
| 352 |
+
self.eigs = eigs
|
| 353 |
+
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
|
| 354 |
+
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
|
| 355 |
+
self.sigmas_OPparts = {None: [None],
|
| 356 |
+
0.1: ['r'],
|
| 357 |
+
0.1 + 0.1j: ['r', 'i']}
|
| 358 |
+
|
| 359 |
+
# generate matrices
|
| 360 |
+
# these should all be float32 so that the eigenvalues
|
| 361 |
+
# are the same in float32 and float64
|
| 362 |
+
N = 6
|
| 363 |
+
np.random.seed(2300)
|
| 364 |
+
Ar = generate_matrix(N).astype('f').astype('d')
|
| 365 |
+
M = generate_matrix(N, hermitian=True,
|
| 366 |
+
pos_definite=True).astype('f').astype('d')
|
| 367 |
+
Ac = generate_matrix(N, complex_=True).astype('F').astype('D')
|
| 368 |
+
v0 = np.random.random(N)
|
| 369 |
+
|
| 370 |
+
# standard real nonsymmetric problem
|
| 371 |
+
SNR = DictWithRepr("std-real-nonsym")
|
| 372 |
+
SNR['mat'] = Ar
|
| 373 |
+
SNR['v0'] = v0
|
| 374 |
+
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
|
| 375 |
+
|
| 376 |
+
# general real nonsymmetric problem
|
| 377 |
+
GNR = DictWithRepr("gen-real-nonsym")
|
| 378 |
+
GNR['mat'] = Ar
|
| 379 |
+
GNR['bmat'] = M
|
| 380 |
+
GNR['v0'] = v0
|
| 381 |
+
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
|
| 382 |
+
|
| 383 |
+
# standard complex nonsymmetric problem
|
| 384 |
+
SNC = DictWithRepr("std-cmplx-nonsym")
|
| 385 |
+
SNC['mat'] = Ac
|
| 386 |
+
SNC['v0'] = v0
|
| 387 |
+
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
|
| 388 |
+
|
| 389 |
+
# general complex nonsymmetric problem
|
| 390 |
+
GNC = DictWithRepr("gen-cmplx-nonsym")
|
| 391 |
+
GNC['mat'] = Ac
|
| 392 |
+
GNC['bmat'] = M
|
| 393 |
+
GNC['v0'] = v0
|
| 394 |
+
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
|
| 395 |
+
|
| 396 |
+
self.real_test_cases = [SNR, GNR]
|
| 397 |
+
self.complex_test_cases = [SNC, GNC]
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def test_symmetric_modes():
|
| 401 |
+
params = SymmetricParams()
|
| 402 |
+
k = 2
|
| 403 |
+
symmetric = True
|
| 404 |
+
for D in params.real_test_cases:
|
| 405 |
+
for typ in 'fd':
|
| 406 |
+
for which in params.which:
|
| 407 |
+
for mattype in params.mattypes:
|
| 408 |
+
for (sigma, modes) in params.sigmas_modes.items():
|
| 409 |
+
for mode in modes:
|
| 410 |
+
eval_evec(symmetric, D, typ, k, which,
|
| 411 |
+
None, sigma, mattype, None, mode)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
def test_hermitian_modes():
|
| 415 |
+
params = SymmetricParams()
|
| 416 |
+
k = 2
|
| 417 |
+
symmetric = True
|
| 418 |
+
for D in params.complex_test_cases:
|
| 419 |
+
for typ in 'FD':
|
| 420 |
+
for which in params.which:
|
| 421 |
+
if which == 'BE':
|
| 422 |
+
continue # BE invalid for complex
|
| 423 |
+
for mattype in params.mattypes:
|
| 424 |
+
for sigma in params.sigmas_modes:
|
| 425 |
+
eval_evec(symmetric, D, typ, k, which,
|
| 426 |
+
None, sigma, mattype)
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
def test_symmetric_starting_vector():
|
| 430 |
+
params = SymmetricParams()
|
| 431 |
+
symmetric = True
|
| 432 |
+
for k in [1, 2, 3, 4, 5]:
|
| 433 |
+
for D in params.real_test_cases:
|
| 434 |
+
for typ in 'fd':
|
| 435 |
+
v0 = random.rand(len(D['v0'])).astype(typ)
|
| 436 |
+
eval_evec(symmetric, D, typ, k, 'LM', v0)
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
def test_symmetric_no_convergence():
|
| 440 |
+
np.random.seed(1234)
|
| 441 |
+
m = generate_matrix(30, hermitian=True, pos_definite=True)
|
| 442 |
+
tol, rtol, atol = _get_test_tolerance('d')
|
| 443 |
+
try:
|
| 444 |
+
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
|
| 445 |
+
raise AssertionError("Spurious no-error exit")
|
| 446 |
+
except ArpackNoConvergence as err:
|
| 447 |
+
k = len(err.eigenvalues)
|
| 448 |
+
if k <= 0:
|
| 449 |
+
raise AssertionError("Spurious no-eigenvalues-found case") from err
|
| 450 |
+
w, v = err.eigenvalues, err.eigenvectors
|
| 451 |
+
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def test_real_nonsymmetric_modes():
|
| 455 |
+
params = NonSymmetricParams()
|
| 456 |
+
k = 2
|
| 457 |
+
symmetric = False
|
| 458 |
+
for D in params.real_test_cases:
|
| 459 |
+
for typ in 'fd':
|
| 460 |
+
for which in params.which:
|
| 461 |
+
for mattype in params.mattypes:
|
| 462 |
+
for sigma, OPparts in params.sigmas_OPparts.items():
|
| 463 |
+
for OPpart in OPparts:
|
| 464 |
+
eval_evec(symmetric, D, typ, k, which,
|
| 465 |
+
None, sigma, mattype, OPpart)
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
def test_complex_nonsymmetric_modes():
|
| 469 |
+
params = NonSymmetricParams()
|
| 470 |
+
k = 2
|
| 471 |
+
symmetric = False
|
| 472 |
+
for D in params.complex_test_cases:
|
| 473 |
+
for typ in 'DF':
|
| 474 |
+
for which in params.which:
|
| 475 |
+
for mattype in params.mattypes:
|
| 476 |
+
for sigma in params.sigmas_OPparts:
|
| 477 |
+
eval_evec(symmetric, D, typ, k, which,
|
| 478 |
+
None, sigma, mattype)
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
def test_standard_nonsymmetric_starting_vector():
|
| 482 |
+
params = NonSymmetricParams()
|
| 483 |
+
sigma = None
|
| 484 |
+
symmetric = False
|
| 485 |
+
for k in [1, 2, 3, 4]:
|
| 486 |
+
for d in params.complex_test_cases:
|
| 487 |
+
for typ in 'FD':
|
| 488 |
+
A = d['mat']
|
| 489 |
+
n = A.shape[0]
|
| 490 |
+
v0 = random.rand(n).astype(typ)
|
| 491 |
+
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
def test_general_nonsymmetric_starting_vector():
|
| 495 |
+
params = NonSymmetricParams()
|
| 496 |
+
sigma = None
|
| 497 |
+
symmetric = False
|
| 498 |
+
for k in [1, 2, 3, 4]:
|
| 499 |
+
for d in params.complex_test_cases:
|
| 500 |
+
for typ in 'FD':
|
| 501 |
+
A = d['mat']
|
| 502 |
+
n = A.shape[0]
|
| 503 |
+
v0 = random.rand(n).astype(typ)
|
| 504 |
+
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
def test_standard_nonsymmetric_no_convergence():
|
| 508 |
+
np.random.seed(1234)
|
| 509 |
+
m = generate_matrix(30, complex_=True)
|
| 510 |
+
tol, rtol, atol = _get_test_tolerance('d')
|
| 511 |
+
try:
|
| 512 |
+
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
|
| 513 |
+
raise AssertionError("Spurious no-error exit")
|
| 514 |
+
except ArpackNoConvergence as err:
|
| 515 |
+
k = len(err.eigenvalues)
|
| 516 |
+
if k <= 0:
|
| 517 |
+
raise AssertionError("Spurious no-eigenvalues-found case") from err
|
| 518 |
+
w, v = err.eigenvalues, err.eigenvectors
|
| 519 |
+
for ww, vv in zip(w, v.T):
|
| 520 |
+
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
def test_eigen_bad_shapes():
|
| 524 |
+
# A is not square.
|
| 525 |
+
A = csc_matrix(np.zeros((2, 3)))
|
| 526 |
+
assert_raises(ValueError, eigs, A)
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
def test_eigen_bad_kwargs():
|
| 530 |
+
# Test eigen on wrong keyword argument
|
| 531 |
+
A = csc_matrix(np.zeros((8, 8)))
|
| 532 |
+
assert_raises(ValueError, eigs, A, which='XX')
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
def test_ticket_1459_arpack_crash():
|
| 536 |
+
for dtype in [np.float32, np.float64]:
|
| 537 |
+
# This test does not seem to catch the issue for float32,
|
| 538 |
+
# but we made the same fix there, just to be sure
|
| 539 |
+
|
| 540 |
+
N = 6
|
| 541 |
+
k = 2
|
| 542 |
+
|
| 543 |
+
np.random.seed(2301)
|
| 544 |
+
A = np.random.random((N, N)).astype(dtype)
|
| 545 |
+
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
|
| 546 |
+
-0.34365925382227402451, 0.46122533684552280420,
|
| 547 |
+
-0.58001341115969040629, -0.78844877570084292984e-01],
|
| 548 |
+
dtype=dtype)
|
| 549 |
+
|
| 550 |
+
# Should not crash:
|
| 551 |
+
evals, evecs = eigs(A, k, v0=v0)
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
|
| 555 |
+
def test_linearoperator_deallocation():
|
| 556 |
+
# Check that the linear operators used by the Arpack wrappers are
|
| 557 |
+
# deallocatable by reference counting -- they are big objects, so
|
| 558 |
+
# Python's cyclic GC may not collect them fast enough before
|
| 559 |
+
# running out of memory if eigs/eigsh are called in a tight loop.
|
| 560 |
+
|
| 561 |
+
M_d = np.eye(10)
|
| 562 |
+
M_s = csc_matrix(M_d)
|
| 563 |
+
M_o = aslinearoperator(M_d)
|
| 564 |
+
|
| 565 |
+
with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
|
| 566 |
+
pass
|
| 567 |
+
with assert_deallocated(lambda: arpack.LuInv(M_d)):
|
| 568 |
+
pass
|
| 569 |
+
with assert_deallocated(lambda: arpack.IterInv(M_s)):
|
| 570 |
+
pass
|
| 571 |
+
with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
|
| 572 |
+
pass
|
| 573 |
+
with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
|
| 574 |
+
pass
|
| 575 |
+
|
| 576 |
+
def test_parallel_threads():
|
| 577 |
+
results = []
|
| 578 |
+
v0 = np.random.rand(50)
|
| 579 |
+
|
| 580 |
+
def worker():
|
| 581 |
+
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
|
| 582 |
+
w, v = eigs(x, k=3, v0=v0)
|
| 583 |
+
results.append(w)
|
| 584 |
+
|
| 585 |
+
w, v = eigsh(x, k=3, v0=v0)
|
| 586 |
+
results.append(w)
|
| 587 |
+
|
| 588 |
+
threads = [threading.Thread(target=worker) for k in range(10)]
|
| 589 |
+
for t in threads:
|
| 590 |
+
t.start()
|
| 591 |
+
for t in threads:
|
| 592 |
+
t.join()
|
| 593 |
+
|
| 594 |
+
worker()
|
| 595 |
+
|
| 596 |
+
for r in results:
|
| 597 |
+
assert_allclose(r, results[-1])
|
| 598 |
+
|
| 599 |
+
|
| 600 |
+
def test_reentering():
|
| 601 |
+
# Just some linear operator that calls eigs recursively
|
| 602 |
+
def A_matvec(x):
|
| 603 |
+
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
|
| 604 |
+
w, v = eigs(x, k=1)
|
| 605 |
+
return v / w[0]
|
| 606 |
+
A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
|
| 607 |
+
|
| 608 |
+
# The Fortran code is not reentrant, so this fails (gracefully, not crashing)
|
| 609 |
+
assert_raises(RuntimeError, eigs, A, k=1)
|
| 610 |
+
assert_raises(RuntimeError, eigsh, A, k=1)
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
def test_regression_arpackng_1315():
|
| 614 |
+
# Check that issue arpack-ng/#1315 is not present.
|
| 615 |
+
# Adapted from arpack-ng/TESTS/bug_1315_single.c
|
| 616 |
+
# If this fails, then the installed ARPACK library is faulty.
|
| 617 |
+
|
| 618 |
+
for dtype in [np.float32, np.float64]:
|
| 619 |
+
np.random.seed(1234)
|
| 620 |
+
|
| 621 |
+
w0 = np.arange(1, 1000+1).astype(dtype)
|
| 622 |
+
A = diags([w0], [0], shape=(1000, 1000))
|
| 623 |
+
|
| 624 |
+
v0 = np.random.rand(1000).astype(dtype)
|
| 625 |
+
w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
|
| 626 |
+
|
| 627 |
+
assert_allclose(np.sort(w), np.sort(w0[-9:]),
|
| 628 |
+
rtol=1e-4)
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
def test_eigs_for_k_greater():
|
| 632 |
+
# Test eigs() for k beyond limits.
|
| 633 |
+
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
|
| 634 |
+
A = generate_matrix(4, sparse=False)
|
| 635 |
+
M_dense = np.random.random((4, 4))
|
| 636 |
+
M_sparse = generate_matrix(4, sparse=True)
|
| 637 |
+
M_linop = aslinearoperator(M_dense)
|
| 638 |
+
eig_tuple1 = eig(A, b=M_dense)
|
| 639 |
+
eig_tuple2 = eig(A, b=M_sparse)
|
| 640 |
+
|
| 641 |
+
with suppress_warnings() as sup:
|
| 642 |
+
sup.filter(RuntimeWarning)
|
| 643 |
+
|
| 644 |
+
assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1)
|
| 645 |
+
assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1)
|
| 646 |
+
assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1)
|
| 647 |
+
assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2)
|
| 648 |
+
|
| 649 |
+
# M as LinearOperator
|
| 650 |
+
assert_raises(TypeError, eigs, A, M=M_linop, k=3)
|
| 651 |
+
|
| 652 |
+
# Test 'A' for different types
|
| 653 |
+
assert_raises(TypeError, eigs, aslinearoperator(A), k=3)
|
| 654 |
+
assert_raises(TypeError, eigs, A_sparse, k=3)
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
def test_eigsh_for_k_greater():
|
| 658 |
+
# Test eigsh() for k beyond limits.
|
| 659 |
+
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
|
| 660 |
+
A = generate_matrix(4, sparse=False)
|
| 661 |
+
M_dense = generate_matrix_symmetric(4, pos_definite=True)
|
| 662 |
+
M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True)
|
| 663 |
+
M_linop = aslinearoperator(M_dense)
|
| 664 |
+
eig_tuple1 = eigh(A, b=M_dense)
|
| 665 |
+
eig_tuple2 = eigh(A, b=M_sparse)
|
| 666 |
+
|
| 667 |
+
with suppress_warnings() as sup:
|
| 668 |
+
sup.filter(RuntimeWarning)
|
| 669 |
+
|
| 670 |
+
assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
|
| 671 |
+
assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
|
| 672 |
+
assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)
|
| 673 |
+
|
| 674 |
+
# M as LinearOperator
|
| 675 |
+
assert_raises(TypeError, eigsh, A, M=M_linop, k=4)
|
| 676 |
+
|
| 677 |
+
# Test 'A' for different types
|
| 678 |
+
assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
|
| 679 |
+
assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
|
| 680 |
+
|
| 681 |
+
|
| 682 |
+
def test_real_eigs_real_k_subset():
|
| 683 |
+
np.random.seed(1)
|
| 684 |
+
|
| 685 |
+
n = 10
|
| 686 |
+
A = rand(n, n, density=0.5)
|
| 687 |
+
A.data *= 2
|
| 688 |
+
A.data -= 1
|
| 689 |
+
|
| 690 |
+
v0 = np.ones(n)
|
| 691 |
+
|
| 692 |
+
whichs = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
|
| 693 |
+
dtypes = [np.float32, np.float64]
|
| 694 |
+
|
| 695 |
+
for which, sigma, dtype in itertools.product(whichs, [None, 0, 5], dtypes):
|
| 696 |
+
prev_w = np.array([], dtype=dtype)
|
| 697 |
+
eps = np.finfo(dtype).eps
|
| 698 |
+
for k in range(1, 9):
|
| 699 |
+
w, z = eigs(A.astype(dtype), k=k, which=which, sigma=sigma,
|
| 700 |
+
v0=v0.astype(dtype), tol=0)
|
| 701 |
+
assert_allclose(np.linalg.norm(A.dot(z) - z * w), 0, atol=np.sqrt(eps))
|
| 702 |
+
|
| 703 |
+
# Check that the set of eigenvalues for `k` is a subset of that for `k+1`
|
| 704 |
+
dist = abs(prev_w[:,None] - w).min(axis=1)
|
| 705 |
+
assert_allclose(dist, 0, atol=np.sqrt(eps))
|
| 706 |
+
|
| 707 |
+
prev_w = w
|
| 708 |
+
|
| 709 |
+
# Check sort order
|
| 710 |
+
if sigma is None:
|
| 711 |
+
d = w
|
| 712 |
+
else:
|
| 713 |
+
d = 1 / (w - sigma)
|
| 714 |
+
|
| 715 |
+
if which == 'LM':
|
| 716 |
+
# ARPACK is systematic for 'LM', but sort order
|
| 717 |
+
# appears not well defined for other modes
|
| 718 |
+
assert np.all(np.diff(abs(d)) <= 1e-6)
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
|
| 3 |
+
|
| 4 |
+
LOBPCG is a preconditioned eigensolver for large symmetric positive definite
|
| 5 |
+
(SPD) generalized eigenproblems.
|
| 6 |
+
|
| 7 |
+
Call the function lobpcg - see help for lobpcg.lobpcg.
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
from .lobpcg import *
|
| 11 |
+
|
| 12 |
+
__all__ = [s for s in dir() if not s.startswith('_')]
|
| 13 |
+
|
| 14 |
+
from scipy._lib._testutils import PytestTester
|
| 15 |
+
test = PytestTester(__name__)
|
| 16 |
+
del PytestTester
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (736 Bytes). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/lobpcg.cpython-39.pyc
ADDED
|
Binary file (25.4 kB). View file
|
|
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py
ADDED
|
@@ -0,0 +1,1112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
|
| 3 |
+
|
| 4 |
+
References
|
| 5 |
+
----------
|
| 6 |
+
.. [1] A. V. Knyazev (2001),
|
| 7 |
+
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
|
| 8 |
+
Block Preconditioned Conjugate Gradient Method.
|
| 9 |
+
SIAM Journal on Scientific Computing 23, no. 2,
|
| 10 |
+
pp. 517-541. :doi:`10.1137/S1064827500366124`
|
| 11 |
+
|
| 12 |
+
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
|
| 13 |
+
Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
|
| 14 |
+
in hypre and PETSc. :arxiv:`0705.2626`
|
| 15 |
+
|
| 16 |
+
.. [3] A. V. Knyazev's C and MATLAB implementations:
|
| 17 |
+
https://github.com/lobpcg/blopex
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
import warnings
|
| 21 |
+
import numpy as np
|
| 22 |
+
from scipy.linalg import (inv, eigh, cho_factor, cho_solve,
|
| 23 |
+
cholesky, LinAlgError)
|
| 24 |
+
from scipy.sparse.linalg import LinearOperator
|
| 25 |
+
from scipy.sparse import issparse
|
| 26 |
+
|
| 27 |
+
__all__ = ["lobpcg"]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _report_nonhermitian(M, name):
|
| 31 |
+
"""
|
| 32 |
+
Report if `M` is not a Hermitian matrix given its type.
|
| 33 |
+
"""
|
| 34 |
+
from scipy.linalg import norm
|
| 35 |
+
|
| 36 |
+
md = M - M.T.conj()
|
| 37 |
+
nmd = norm(md, 1)
|
| 38 |
+
tol = 10 * np.finfo(M.dtype).eps
|
| 39 |
+
tol = max(tol, tol * norm(M, 1))
|
| 40 |
+
if nmd > tol:
|
| 41 |
+
warnings.warn(
|
| 42 |
+
f"Matrix {name} of the type {M.dtype} is not Hermitian: "
|
| 43 |
+
f"condition: {nmd} < {tol} fails.",
|
| 44 |
+
UserWarning, stacklevel=4
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
def _as2d(ar):
|
| 48 |
+
"""
|
| 49 |
+
If the input array is 2D return it, if it is 1D, append a dimension,
|
| 50 |
+
making it a column vector.
|
| 51 |
+
"""
|
| 52 |
+
if ar.ndim == 2:
|
| 53 |
+
return ar
|
| 54 |
+
else: # Assume 1!
|
| 55 |
+
aux = np.asarray(ar)
|
| 56 |
+
aux.shape = (ar.shape[0], 1)
|
| 57 |
+
return aux
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _makeMatMat(m):
|
| 61 |
+
if m is None:
|
| 62 |
+
return None
|
| 63 |
+
elif callable(m):
|
| 64 |
+
return lambda v: m(v)
|
| 65 |
+
else:
|
| 66 |
+
return lambda v: m @ v
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def _matmul_inplace(x, y, verbosityLevel=0):
|
| 70 |
+
"""Perform 'np.matmul' in-place if possible.
|
| 71 |
+
|
| 72 |
+
If some sufficient conditions for inplace matmul are met, do so.
|
| 73 |
+
Otherwise try inplace update and fall back to overwrite if that fails.
|
| 74 |
+
"""
|
| 75 |
+
if x.flags["CARRAY"] and x.shape[1] == y.shape[1] and x.dtype == y.dtype:
|
| 76 |
+
# conditions where we can guarantee that inplace updates will work;
|
| 77 |
+
# i.e. x is not a view/slice, x & y have compatible dtypes, and the
|
| 78 |
+
# shape of the result of x @ y matches the shape of x.
|
| 79 |
+
np.matmul(x, y, out=x)
|
| 80 |
+
else:
|
| 81 |
+
# ideally, we'd have an exhaustive list of conditions above when
|
| 82 |
+
# inplace updates are possible; since we don't, we opportunistically
|
| 83 |
+
# try if it works, and fall back to overwriting if necessary
|
| 84 |
+
try:
|
| 85 |
+
np.matmul(x, y, out=x)
|
| 86 |
+
except Exception:
|
| 87 |
+
if verbosityLevel:
|
| 88 |
+
warnings.warn(
|
| 89 |
+
"Inplace update of x = x @ y failed, "
|
| 90 |
+
"x needs to be overwritten.",
|
| 91 |
+
UserWarning, stacklevel=3
|
| 92 |
+
)
|
| 93 |
+
x = x @ y
|
| 94 |
+
return x
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
|
| 98 |
+
"""Changes blockVectorV in-place."""
|
| 99 |
+
YBV = blockVectorBY.T.conj() @ blockVectorV
|
| 100 |
+
tmp = cho_solve(factYBY, YBV)
|
| 101 |
+
blockVectorV -= blockVectorY @ tmp
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None,
|
| 105 |
+
verbosityLevel=0):
|
| 106 |
+
"""in-place B-orthonormalize the given block vector using Cholesky."""
|
| 107 |
+
if blockVectorBV is None:
|
| 108 |
+
if B is None:
|
| 109 |
+
blockVectorBV = blockVectorV
|
| 110 |
+
else:
|
| 111 |
+
try:
|
| 112 |
+
blockVectorBV = B(blockVectorV)
|
| 113 |
+
except Exception as e:
|
| 114 |
+
if verbosityLevel:
|
| 115 |
+
warnings.warn(
|
| 116 |
+
f"Secondary MatMul call failed with error\n"
|
| 117 |
+
f"{e}\n",
|
| 118 |
+
UserWarning, stacklevel=3
|
| 119 |
+
)
|
| 120 |
+
return None, None, None
|
| 121 |
+
if blockVectorBV.shape != blockVectorV.shape:
|
| 122 |
+
raise ValueError(
|
| 123 |
+
f"The shape {blockVectorV.shape} "
|
| 124 |
+
f"of the orthogonalized matrix not preserved\n"
|
| 125 |
+
f"and changed to {blockVectorBV.shape} "
|
| 126 |
+
f"after multiplying by the secondary matrix.\n"
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
VBV = blockVectorV.T.conj() @ blockVectorBV
|
| 130 |
+
try:
|
| 131 |
+
# VBV is a Cholesky factor from now on...
|
| 132 |
+
VBV = cholesky(VBV, overwrite_a=True)
|
| 133 |
+
VBV = inv(VBV, overwrite_a=True)
|
| 134 |
+
blockVectorV = _matmul_inplace(
|
| 135 |
+
blockVectorV, VBV,
|
| 136 |
+
verbosityLevel=verbosityLevel
|
| 137 |
+
)
|
| 138 |
+
if B is not None:
|
| 139 |
+
blockVectorBV = _matmul_inplace(
|
| 140 |
+
blockVectorBV, VBV,
|
| 141 |
+
verbosityLevel=verbosityLevel
|
| 142 |
+
)
|
| 143 |
+
return blockVectorV, blockVectorBV, VBV
|
| 144 |
+
except LinAlgError:
|
| 145 |
+
if verbosityLevel:
|
| 146 |
+
warnings.warn(
|
| 147 |
+
"Cholesky has failed.",
|
| 148 |
+
UserWarning, stacklevel=3
|
| 149 |
+
)
|
| 150 |
+
return None, None, None
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def _get_indx(_lambda, num, largest):
|
| 154 |
+
"""Get `num` indices into `_lambda` depending on `largest` option."""
|
| 155 |
+
ii = np.argsort(_lambda)
|
| 156 |
+
if largest:
|
| 157 |
+
ii = ii[:-num - 1:-1]
|
| 158 |
+
else:
|
| 159 |
+
ii = ii[:num]
|
| 160 |
+
|
| 161 |
+
return ii
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel):
|
| 165 |
+
if verbosityLevel:
|
| 166 |
+
_report_nonhermitian(gramA, "gramA")
|
| 167 |
+
_report_nonhermitian(gramB, "gramB")
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def lobpcg(
|
| 171 |
+
A,
|
| 172 |
+
X,
|
| 173 |
+
B=None,
|
| 174 |
+
M=None,
|
| 175 |
+
Y=None,
|
| 176 |
+
tol=None,
|
| 177 |
+
maxiter=None,
|
| 178 |
+
largest=True,
|
| 179 |
+
verbosityLevel=0,
|
| 180 |
+
retLambdaHistory=False,
|
| 181 |
+
retResidualNormsHistory=False,
|
| 182 |
+
restartControl=20,
|
| 183 |
+
):
|
| 184 |
+
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
|
| 185 |
+
|
| 186 |
+
LOBPCG is a preconditioned eigensolver for large real symmetric and complex
|
| 187 |
+
Hermitian definite generalized eigenproblems.
|
| 188 |
+
|
| 189 |
+
Parameters
|
| 190 |
+
----------
|
| 191 |
+
A : {sparse matrix, ndarray, LinearOperator, callable object}
|
| 192 |
+
The Hermitian linear operator of the problem, usually given by a
|
| 193 |
+
sparse matrix. Often called the "stiffness matrix".
|
| 194 |
+
X : ndarray, float32 or float64
|
| 195 |
+
Initial approximation to the ``k`` eigenvectors (non-sparse).
|
| 196 |
+
If `A` has ``shape=(n,n)`` then `X` must have ``shape=(n,k)``.
|
| 197 |
+
B : {sparse matrix, ndarray, LinearOperator, callable object}
|
| 198 |
+
Optional. By default ``B = None``, which is equivalent to identity.
|
| 199 |
+
The right hand side operator in a generalized eigenproblem if present.
|
| 200 |
+
Often called the "mass matrix". Must be Hermitian positive definite.
|
| 201 |
+
M : {sparse matrix, ndarray, LinearOperator, callable object}
|
| 202 |
+
Optional. By default ``M = None``, which is equivalent to identity.
|
| 203 |
+
Preconditioner aiming to accelerate convergence.
|
| 204 |
+
Y : ndarray, float32 or float64, default: None
|
| 205 |
+
An ``n-by-sizeY`` ndarray of constraints with ``sizeY < n``.
|
| 206 |
+
The iterations will be performed in the ``B``-orthogonal complement
|
| 207 |
+
of the column-space of `Y`. `Y` must be full rank if present.
|
| 208 |
+
tol : scalar, optional
|
| 209 |
+
The default is ``tol=n*sqrt(eps)``.
|
| 210 |
+
Solver tolerance for the stopping criterion.
|
| 211 |
+
maxiter : int, default: 20
|
| 212 |
+
Maximum number of iterations.
|
| 213 |
+
largest : bool, default: True
|
| 214 |
+
When True, solve for the largest eigenvalues, otherwise the smallest.
|
| 215 |
+
verbosityLevel : int, optional
|
| 216 |
+
By default ``verbosityLevel=0`` no output.
|
| 217 |
+
Controls the solver standard/screen output.
|
| 218 |
+
retLambdaHistory : bool, default: False
|
| 219 |
+
Whether to return iterative eigenvalue history.
|
| 220 |
+
retResidualNormsHistory : bool, default: False
|
| 221 |
+
Whether to return iterative history of residual norms.
|
| 222 |
+
restartControl : int, optional.
|
| 223 |
+
Iterations restart if the residuals jump ``2**restartControl`` times
|
| 224 |
+
compared to the smallest recorded in ``retResidualNormsHistory``.
|
| 225 |
+
The default is ``restartControl=20``, making the restarts rare for
|
| 226 |
+
backward compatibility.
|
| 227 |
+
|
| 228 |
+
Returns
|
| 229 |
+
-------
|
| 230 |
+
lambda : ndarray of the shape ``(k, )``.
|
| 231 |
+
Array of ``k`` approximate eigenvalues.
|
| 232 |
+
v : ndarray of the same shape as ``X.shape``.
|
| 233 |
+
An array of ``k`` approximate eigenvectors.
|
| 234 |
+
lambdaHistory : ndarray, optional.
|
| 235 |
+
The eigenvalue history, if `retLambdaHistory` is ``True``.
|
| 236 |
+
ResidualNormsHistory : ndarray, optional.
|
| 237 |
+
The history of residual norms, if `retResidualNormsHistory`
|
| 238 |
+
is ``True``.
|
| 239 |
+
|
| 240 |
+
Notes
|
| 241 |
+
-----
|
| 242 |
+
The iterative loop runs ``maxit=maxiter`` (20 if ``maxit=None``)
|
| 243 |
+
iterations at most and finishes earlier if the tolerance is met.
|
| 244 |
+
Breaking backward compatibility with the previous version, LOBPCG
|
| 245 |
+
now returns the block of iterative vectors with the best accuracy rather
|
| 246 |
+
than the last one iterated, as a cure for possible divergence.
|
| 247 |
+
|
| 248 |
+
If ``X.dtype == np.float32`` and user-provided operations/multiplications
|
| 249 |
+
by `A`, `B`, and `M` all preserve the ``np.float32`` data type,
|
| 250 |
+
all the calculations and the output are in ``np.float32``.
|
| 251 |
+
|
| 252 |
+
The size of the iteration history output equals to the number of the best
|
| 253 |
+
(limited by `maxit`) iterations plus 3: initial, final, and postprocessing.
|
| 254 |
+
|
| 255 |
+
If both `retLambdaHistory` and `retResidualNormsHistory` are ``True``,
|
| 256 |
+
the return tuple has the following format
|
| 257 |
+
``(lambda, V, lambda history, residual norms history)``.
|
| 258 |
+
|
| 259 |
+
In the following ``n`` denotes the matrix size and ``k`` the number
|
| 260 |
+
of required eigenvalues (smallest or largest).
|
| 261 |
+
|
| 262 |
+
The LOBPCG code internally solves eigenproblems of the size ``3k`` on every
|
| 263 |
+
iteration by calling the dense eigensolver `eigh`, so if ``k`` is not
|
| 264 |
+
small enough compared to ``n``, it makes no sense to call the LOBPCG code.
|
| 265 |
+
Moreover, if one calls the LOBPCG algorithm for ``5k > n``, it would likely
|
| 266 |
+
break internally, so the code calls the standard function `eigh` instead.
|
| 267 |
+
It is not that ``n`` should be large for the LOBPCG to work, but rather the
|
| 268 |
+
ratio ``n / k`` should be large. It you call LOBPCG with ``k=1``
|
| 269 |
+
and ``n=10``, it works though ``n`` is small. The method is intended
|
| 270 |
+
for extremely large ``n / k``.
|
| 271 |
+
|
| 272 |
+
The convergence speed depends basically on three factors:
|
| 273 |
+
|
| 274 |
+
1. Quality of the initial approximations `X` to the seeking eigenvectors.
|
| 275 |
+
Randomly distributed around the origin vectors work well if no better
|
| 276 |
+
choice is known.
|
| 277 |
+
|
| 278 |
+
2. Relative separation of the desired eigenvalues from the rest
|
| 279 |
+
of the eigenvalues. One can vary ``k`` to improve the separation.
|
| 280 |
+
|
| 281 |
+
3. Proper preconditioning to shrink the spectral spread.
|
| 282 |
+
For example, a rod vibration test problem (under tests
|
| 283 |
+
directory) is ill-conditioned for large ``n``, so convergence will be
|
| 284 |
+
slow, unless efficient preconditioning is used. For this specific
|
| 285 |
+
problem, a good simple preconditioner function would be a linear solve
|
| 286 |
+
for `A`, which is easy to code since `A` is tridiagonal.
|
| 287 |
+
|
| 288 |
+
References
|
| 289 |
+
----------
|
| 290 |
+
.. [1] A. V. Knyazev (2001),
|
| 291 |
+
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
|
| 292 |
+
Block Preconditioned Conjugate Gradient Method.
|
| 293 |
+
SIAM Journal on Scientific Computing 23, no. 2,
|
| 294 |
+
pp. 517-541. :doi:`10.1137/S1064827500366124`
|
| 295 |
+
|
| 296 |
+
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
|
| 297 |
+
(2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
|
| 298 |
+
(BLOPEX) in hypre and PETSc. :arxiv:`0705.2626`
|
| 299 |
+
|
| 300 |
+
.. [3] A. V. Knyazev's C and MATLAB implementations:
|
| 301 |
+
https://github.com/lobpcg/blopex
|
| 302 |
+
|
| 303 |
+
Examples
|
| 304 |
+
--------
|
| 305 |
+
Our first example is minimalistic - find the largest eigenvalue of
|
| 306 |
+
a diagonal matrix by solving the non-generalized eigenvalue problem
|
| 307 |
+
``A x = lambda x`` without constraints or preconditioning.
|
| 308 |
+
|
| 309 |
+
>>> import numpy as np
|
| 310 |
+
>>> from scipy.sparse import spdiags
|
| 311 |
+
>>> from scipy.sparse.linalg import LinearOperator, aslinearoperator
|
| 312 |
+
>>> from scipy.sparse.linalg import lobpcg
|
| 313 |
+
|
| 314 |
+
The square matrix size is
|
| 315 |
+
|
| 316 |
+
>>> n = 100
|
| 317 |
+
|
| 318 |
+
and its diagonal entries are 1, ..., 100 defined by
|
| 319 |
+
|
| 320 |
+
>>> vals = np.arange(1, n + 1).astype(np.int16)
|
| 321 |
+
|
| 322 |
+
The first mandatory input parameter in this test is
|
| 323 |
+
the sparse diagonal matrix `A`
|
| 324 |
+
of the eigenvalue problem ``A x = lambda x`` to solve.
|
| 325 |
+
|
| 326 |
+
>>> A = spdiags(vals, 0, n, n)
|
| 327 |
+
>>> A = A.astype(np.int16)
|
| 328 |
+
>>> A.toarray()
|
| 329 |
+
array([[ 1, 0, 0, ..., 0, 0, 0],
|
| 330 |
+
[ 0, 2, 0, ..., 0, 0, 0],
|
| 331 |
+
[ 0, 0, 3, ..., 0, 0, 0],
|
| 332 |
+
...,
|
| 333 |
+
[ 0, 0, 0, ..., 98, 0, 0],
|
| 334 |
+
[ 0, 0, 0, ..., 0, 99, 0],
|
| 335 |
+
[ 0, 0, 0, ..., 0, 0, 100]], dtype=int16)
|
| 336 |
+
|
| 337 |
+
The second mandatory input parameter `X` is a 2D array with the
|
| 338 |
+
row dimension determining the number of requested eigenvalues.
|
| 339 |
+
`X` is an initial guess for targeted eigenvectors.
|
| 340 |
+
`X` must have linearly independent columns.
|
| 341 |
+
If no initial approximations available, randomly oriented vectors
|
| 342 |
+
commonly work best, e.g., with components normally distributed
|
| 343 |
+
around zero or uniformly distributed on the interval [-1 1].
|
| 344 |
+
Setting the initial approximations to dtype ``np.float32``
|
| 345 |
+
forces all iterative values to dtype ``np.float32`` speeding up
|
| 346 |
+
the run while still allowing accurate eigenvalue computations.
|
| 347 |
+
|
| 348 |
+
>>> k = 1
|
| 349 |
+
>>> rng = np.random.default_rng()
|
| 350 |
+
>>> X = rng.normal(size=(n, k))
|
| 351 |
+
>>> X = X.astype(np.float32)
|
| 352 |
+
|
| 353 |
+
>>> eigenvalues, _ = lobpcg(A, X, maxiter=60)
|
| 354 |
+
>>> eigenvalues
|
| 355 |
+
array([100.])
|
| 356 |
+
>>> eigenvalues.dtype
|
| 357 |
+
dtype('float32')
|
| 358 |
+
|
| 359 |
+
`lobpcg` needs only access the matrix product with `A` rather
|
| 360 |
+
then the matrix itself. Since the matrix `A` is diagonal in
|
| 361 |
+
this example, one can write a function of the matrix product
|
| 362 |
+
``A @ X`` using the diagonal values ``vals`` only, e.g., by
|
| 363 |
+
element-wise multiplication with broadcasting in the lambda-function
|
| 364 |
+
|
| 365 |
+
>>> A_lambda = lambda X: vals[:, np.newaxis] * X
|
| 366 |
+
|
| 367 |
+
or the regular function
|
| 368 |
+
|
| 369 |
+
>>> def A_matmat(X):
|
| 370 |
+
... return vals[:, np.newaxis] * X
|
| 371 |
+
|
| 372 |
+
and use the handle to one of these callables as an input
|
| 373 |
+
|
| 374 |
+
>>> eigenvalues, _ = lobpcg(A_lambda, X, maxiter=60)
|
| 375 |
+
>>> eigenvalues
|
| 376 |
+
array([100.])
|
| 377 |
+
>>> eigenvalues, _ = lobpcg(A_matmat, X, maxiter=60)
|
| 378 |
+
>>> eigenvalues
|
| 379 |
+
array([100.])
|
| 380 |
+
|
| 381 |
+
The traditional callable `LinearOperator` is no longer
|
| 382 |
+
necessary but still supported as the input to `lobpcg`.
|
| 383 |
+
Specifying ``matmat=A_matmat`` explicitly improves performance.
|
| 384 |
+
|
| 385 |
+
>>> A_lo = LinearOperator((n, n), matvec=A_matmat, matmat=A_matmat, dtype=np.int16)
|
| 386 |
+
>>> eigenvalues, _ = lobpcg(A_lo, X, maxiter=80)
|
| 387 |
+
>>> eigenvalues
|
| 388 |
+
array([100.])
|
| 389 |
+
|
| 390 |
+
The least efficient callable option is `aslinearoperator`:
|
| 391 |
+
|
| 392 |
+
>>> eigenvalues, _ = lobpcg(aslinearoperator(A), X, maxiter=80)
|
| 393 |
+
>>> eigenvalues
|
| 394 |
+
array([100.])
|
| 395 |
+
|
| 396 |
+
We now switch to computing the three smallest eigenvalues specifying
|
| 397 |
+
|
| 398 |
+
>>> k = 3
|
| 399 |
+
>>> X = np.random.default_rng().normal(size=(n, k))
|
| 400 |
+
|
| 401 |
+
and ``largest=False`` parameter
|
| 402 |
+
|
| 403 |
+
>>> eigenvalues, _ = lobpcg(A, X, largest=False, maxiter=80)
|
| 404 |
+
>>> print(eigenvalues)
|
| 405 |
+
[1. 2. 3.]
|
| 406 |
+
|
| 407 |
+
The next example illustrates computing 3 smallest eigenvalues of
|
| 408 |
+
the same matrix `A` given by the function handle ``A_matmat`` but
|
| 409 |
+
with constraints and preconditioning.
|
| 410 |
+
|
| 411 |
+
Constraints - an optional input parameter is a 2D array comprising
|
| 412 |
+
of column vectors that the eigenvectors must be orthogonal to
|
| 413 |
+
|
| 414 |
+
>>> Y = np.eye(n, 3)
|
| 415 |
+
|
| 416 |
+
The preconditioner acts as the inverse of `A` in this example, but
|
| 417 |
+
in the reduced precision ``np.float32`` even though the initial `X`
|
| 418 |
+
and thus all iterates and the output are in full ``np.float64``.
|
| 419 |
+
|
| 420 |
+
>>> inv_vals = 1./vals
|
| 421 |
+
>>> inv_vals = inv_vals.astype(np.float32)
|
| 422 |
+
>>> M = lambda X: inv_vals[:, np.newaxis] * X
|
| 423 |
+
|
| 424 |
+
Let us now solve the eigenvalue problem for the matrix `A` first
|
| 425 |
+
without preconditioning requesting 80 iterations
|
| 426 |
+
|
| 427 |
+
>>> eigenvalues, _ = lobpcg(A_matmat, X, Y=Y, largest=False, maxiter=80)
|
| 428 |
+
>>> eigenvalues
|
| 429 |
+
array([4., 5., 6.])
|
| 430 |
+
>>> eigenvalues.dtype
|
| 431 |
+
dtype('float64')
|
| 432 |
+
|
| 433 |
+
With preconditioning we need only 20 iterations from the same `X`
|
| 434 |
+
|
| 435 |
+
>>> eigenvalues, _ = lobpcg(A_matmat, X, Y=Y, M=M, largest=False, maxiter=20)
|
| 436 |
+
>>> eigenvalues
|
| 437 |
+
array([4., 5., 6.])
|
| 438 |
+
|
| 439 |
+
Note that the vectors passed in `Y` are the eigenvectors of the 3
|
| 440 |
+
smallest eigenvalues. The results returned above are orthogonal to those.
|
| 441 |
+
|
| 442 |
+
The primary matrix `A` may be indefinite, e.g., after shifting
|
| 443 |
+
``vals`` by 50 from 1, ..., 100 to -49, ..., 50, we still can compute
|
| 444 |
+
the 3 smallest or largest eigenvalues.
|
| 445 |
+
|
| 446 |
+
>>> vals = vals - 50
|
| 447 |
+
>>> X = rng.normal(size=(n, k))
|
| 448 |
+
>>> eigenvalues, _ = lobpcg(A_matmat, X, largest=False, maxiter=99)
|
| 449 |
+
>>> eigenvalues
|
| 450 |
+
array([-49., -48., -47.])
|
| 451 |
+
>>> eigenvalues, _ = lobpcg(A_matmat, X, largest=True, maxiter=99)
|
| 452 |
+
>>> eigenvalues
|
| 453 |
+
array([50., 49., 48.])
|
| 454 |
+
|
| 455 |
+
"""
|
| 456 |
+
blockVectorX = X
|
| 457 |
+
bestblockVectorX = blockVectorX
|
| 458 |
+
blockVectorY = Y
|
| 459 |
+
residualTolerance = tol
|
| 460 |
+
if maxiter is None:
|
| 461 |
+
maxiter = 20
|
| 462 |
+
|
| 463 |
+
bestIterationNumber = maxiter
|
| 464 |
+
|
| 465 |
+
sizeY = 0
|
| 466 |
+
if blockVectorY is not None:
|
| 467 |
+
if len(blockVectorY.shape) != 2:
|
| 468 |
+
warnings.warn(
|
| 469 |
+
f"Expected rank-2 array for argument Y, instead got "
|
| 470 |
+
f"{len(blockVectorY.shape)}, "
|
| 471 |
+
f"so ignore it and use no constraints.",
|
| 472 |
+
UserWarning, stacklevel=2
|
| 473 |
+
)
|
| 474 |
+
blockVectorY = None
|
| 475 |
+
else:
|
| 476 |
+
sizeY = blockVectorY.shape[1]
|
| 477 |
+
|
| 478 |
+
# Block size.
|
| 479 |
+
if blockVectorX is None:
|
| 480 |
+
raise ValueError("The mandatory initial matrix X cannot be None")
|
| 481 |
+
if len(blockVectorX.shape) != 2:
|
| 482 |
+
raise ValueError("expected rank-2 array for argument X")
|
| 483 |
+
|
| 484 |
+
n, sizeX = blockVectorX.shape
|
| 485 |
+
|
| 486 |
+
# Data type of iterates, determined by X, must be inexact
|
| 487 |
+
if not np.issubdtype(blockVectorX.dtype, np.inexact):
|
| 488 |
+
warnings.warn(
|
| 489 |
+
f"Data type for argument X is {blockVectorX.dtype}, "
|
| 490 |
+
f"which is not inexact, so casted to np.float32.",
|
| 491 |
+
UserWarning, stacklevel=2
|
| 492 |
+
)
|
| 493 |
+
blockVectorX = np.asarray(blockVectorX, dtype=np.float32)
|
| 494 |
+
|
| 495 |
+
if retLambdaHistory:
|
| 496 |
+
lambdaHistory = np.zeros((maxiter + 3, sizeX),
|
| 497 |
+
dtype=blockVectorX.dtype)
|
| 498 |
+
if retResidualNormsHistory:
|
| 499 |
+
residualNormsHistory = np.zeros((maxiter + 3, sizeX),
|
| 500 |
+
dtype=blockVectorX.dtype)
|
| 501 |
+
|
| 502 |
+
if verbosityLevel:
|
| 503 |
+
aux = "Solving "
|
| 504 |
+
if B is None:
|
| 505 |
+
aux += "standard"
|
| 506 |
+
else:
|
| 507 |
+
aux += "generalized"
|
| 508 |
+
aux += " eigenvalue problem with"
|
| 509 |
+
if M is None:
|
| 510 |
+
aux += "out"
|
| 511 |
+
aux += " preconditioning\n\n"
|
| 512 |
+
aux += "matrix size %d\n" % n
|
| 513 |
+
aux += "block size %d\n\n" % sizeX
|
| 514 |
+
if blockVectorY is None:
|
| 515 |
+
aux += "No constraints\n\n"
|
| 516 |
+
else:
|
| 517 |
+
if sizeY > 1:
|
| 518 |
+
aux += "%d constraints\n\n" % sizeY
|
| 519 |
+
else:
|
| 520 |
+
aux += "%d constraint\n\n" % sizeY
|
| 521 |
+
print(aux)
|
| 522 |
+
|
| 523 |
+
if (n - sizeY) < (5 * sizeX):
|
| 524 |
+
warnings.warn(
|
| 525 |
+
f"The problem size {n} minus the constraints size {sizeY} "
|
| 526 |
+
f"is too small relative to the block size {sizeX}. "
|
| 527 |
+
f"Using a dense eigensolver instead of LOBPCG iterations."
|
| 528 |
+
f"No output of the history of the iterations.",
|
| 529 |
+
UserWarning, stacklevel=2
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
sizeX = min(sizeX, n)
|
| 533 |
+
|
| 534 |
+
if blockVectorY is not None:
|
| 535 |
+
raise NotImplementedError(
|
| 536 |
+
"The dense eigensolver does not support constraints."
|
| 537 |
+
)
|
| 538 |
+
|
| 539 |
+
# Define the closed range of indices of eigenvalues to return.
|
| 540 |
+
if largest:
|
| 541 |
+
eigvals = (n - sizeX, n - 1)
|
| 542 |
+
else:
|
| 543 |
+
eigvals = (0, sizeX - 1)
|
| 544 |
+
|
| 545 |
+
try:
|
| 546 |
+
if isinstance(A, LinearOperator):
|
| 547 |
+
A = A(np.eye(n, dtype=int))
|
| 548 |
+
elif callable(A):
|
| 549 |
+
A = A(np.eye(n, dtype=int))
|
| 550 |
+
if A.shape != (n, n):
|
| 551 |
+
raise ValueError(
|
| 552 |
+
f"The shape {A.shape} of the primary matrix\n"
|
| 553 |
+
f"defined by a callable object is wrong.\n"
|
| 554 |
+
)
|
| 555 |
+
elif issparse(A):
|
| 556 |
+
A = A.toarray()
|
| 557 |
+
else:
|
| 558 |
+
A = np.asarray(A)
|
| 559 |
+
except Exception as e:
|
| 560 |
+
raise Exception(
|
| 561 |
+
f"Primary MatMul call failed with error\n"
|
| 562 |
+
f"{e}\n")
|
| 563 |
+
|
| 564 |
+
if B is not None:
|
| 565 |
+
try:
|
| 566 |
+
if isinstance(B, LinearOperator):
|
| 567 |
+
B = B(np.eye(n, dtype=int))
|
| 568 |
+
elif callable(B):
|
| 569 |
+
B = B(np.eye(n, dtype=int))
|
| 570 |
+
if B.shape != (n, n):
|
| 571 |
+
raise ValueError(
|
| 572 |
+
f"The shape {B.shape} of the secondary matrix\n"
|
| 573 |
+
f"defined by a callable object is wrong.\n"
|
| 574 |
+
)
|
| 575 |
+
elif issparse(B):
|
| 576 |
+
B = B.toarray()
|
| 577 |
+
else:
|
| 578 |
+
B = np.asarray(B)
|
| 579 |
+
except Exception as e:
|
| 580 |
+
raise Exception(
|
| 581 |
+
f"Secondary MatMul call failed with error\n"
|
| 582 |
+
f"{e}\n")
|
| 583 |
+
|
| 584 |
+
try:
|
| 585 |
+
vals, vecs = eigh(A,
|
| 586 |
+
B,
|
| 587 |
+
subset_by_index=eigvals,
|
| 588 |
+
check_finite=False)
|
| 589 |
+
if largest:
|
| 590 |
+
# Reverse order to be compatible with eigs() in 'LM' mode.
|
| 591 |
+
vals = vals[::-1]
|
| 592 |
+
vecs = vecs[:, ::-1]
|
| 593 |
+
|
| 594 |
+
return vals, vecs
|
| 595 |
+
except Exception as e:
|
| 596 |
+
raise Exception(
|
| 597 |
+
f"Dense eigensolver failed with error\n"
|
| 598 |
+
f"{e}\n"
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
if (residualTolerance is None) or (residualTolerance <= 0.0):
|
| 602 |
+
residualTolerance = np.sqrt(np.finfo(blockVectorX.dtype).eps) * n
|
| 603 |
+
|
| 604 |
+
A = _makeMatMat(A)
|
| 605 |
+
B = _makeMatMat(B)
|
| 606 |
+
M = _makeMatMat(M)
|
| 607 |
+
|
| 608 |
+
# Apply constraints to X.
|
| 609 |
+
if blockVectorY is not None:
|
| 610 |
+
|
| 611 |
+
if B is not None:
|
| 612 |
+
blockVectorBY = B(blockVectorY)
|
| 613 |
+
if blockVectorBY.shape != blockVectorY.shape:
|
| 614 |
+
raise ValueError(
|
| 615 |
+
f"The shape {blockVectorY.shape} "
|
| 616 |
+
f"of the constraint not preserved\n"
|
| 617 |
+
f"and changed to {blockVectorBY.shape} "
|
| 618 |
+
f"after multiplying by the secondary matrix.\n"
|
| 619 |
+
)
|
| 620 |
+
else:
|
| 621 |
+
blockVectorBY = blockVectorY
|
| 622 |
+
|
| 623 |
+
# gramYBY is a dense array.
|
| 624 |
+
gramYBY = blockVectorY.T.conj() @ blockVectorBY
|
| 625 |
+
try:
|
| 626 |
+
# gramYBY is a Cholesky factor from now on...
|
| 627 |
+
gramYBY = cho_factor(gramYBY, overwrite_a=True)
|
| 628 |
+
except LinAlgError as e:
|
| 629 |
+
raise ValueError("Linearly dependent constraints") from e
|
| 630 |
+
|
| 631 |
+
_applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)
|
| 632 |
+
|
| 633 |
+
##
|
| 634 |
+
# B-orthonormalize X.
|
| 635 |
+
blockVectorX, blockVectorBX, _ = _b_orthonormalize(
|
| 636 |
+
B, blockVectorX, verbosityLevel=verbosityLevel)
|
| 637 |
+
if blockVectorX is None:
|
| 638 |
+
raise ValueError("Linearly dependent initial approximations")
|
| 639 |
+
|
| 640 |
+
##
|
| 641 |
+
# Compute the initial Ritz vectors: solve the eigenproblem.
|
| 642 |
+
blockVectorAX = A(blockVectorX)
|
| 643 |
+
if blockVectorAX.shape != blockVectorX.shape:
|
| 644 |
+
raise ValueError(
|
| 645 |
+
f"The shape {blockVectorX.shape} "
|
| 646 |
+
f"of the initial approximations not preserved\n"
|
| 647 |
+
f"and changed to {blockVectorAX.shape} "
|
| 648 |
+
f"after multiplying by the primary matrix.\n"
|
| 649 |
+
)
|
| 650 |
+
|
| 651 |
+
gramXAX = blockVectorX.T.conj() @ blockVectorAX
|
| 652 |
+
|
| 653 |
+
_lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
|
| 654 |
+
ii = _get_indx(_lambda, sizeX, largest)
|
| 655 |
+
_lambda = _lambda[ii]
|
| 656 |
+
if retLambdaHistory:
|
| 657 |
+
lambdaHistory[0, :] = _lambda
|
| 658 |
+
|
| 659 |
+
eigBlockVector = np.asarray(eigBlockVector[:, ii])
|
| 660 |
+
blockVectorX = _matmul_inplace(
|
| 661 |
+
blockVectorX, eigBlockVector,
|
| 662 |
+
verbosityLevel=verbosityLevel
|
| 663 |
+
)
|
| 664 |
+
blockVectorAX = _matmul_inplace(
|
| 665 |
+
blockVectorAX, eigBlockVector,
|
| 666 |
+
verbosityLevel=verbosityLevel
|
| 667 |
+
)
|
| 668 |
+
if B is not None:
|
| 669 |
+
blockVectorBX = _matmul_inplace(
|
| 670 |
+
blockVectorBX, eigBlockVector,
|
| 671 |
+
verbosityLevel=verbosityLevel
|
| 672 |
+
)
|
| 673 |
+
|
| 674 |
+
##
|
| 675 |
+
# Active index set.
|
| 676 |
+
activeMask = np.ones((sizeX,), dtype=bool)
|
| 677 |
+
|
| 678 |
+
##
|
| 679 |
+
# Main iteration loop.
|
| 680 |
+
|
| 681 |
+
blockVectorP = None # set during iteration
|
| 682 |
+
blockVectorAP = None
|
| 683 |
+
blockVectorBP = None
|
| 684 |
+
|
| 685 |
+
smallestResidualNorm = np.abs(np.finfo(blockVectorX.dtype).max)
|
| 686 |
+
|
| 687 |
+
iterationNumber = -1
|
| 688 |
+
restart = True
|
| 689 |
+
forcedRestart = False
|
| 690 |
+
explicitGramFlag = False
|
| 691 |
+
while iterationNumber < maxiter:
|
| 692 |
+
iterationNumber += 1
|
| 693 |
+
|
| 694 |
+
if B is not None:
|
| 695 |
+
aux = blockVectorBX * _lambda[np.newaxis, :]
|
| 696 |
+
else:
|
| 697 |
+
aux = blockVectorX * _lambda[np.newaxis, :]
|
| 698 |
+
|
| 699 |
+
blockVectorR = blockVectorAX - aux
|
| 700 |
+
|
| 701 |
+
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
|
| 702 |
+
residualNorms = np.sqrt(np.abs(aux))
|
| 703 |
+
if retResidualNormsHistory:
|
| 704 |
+
residualNormsHistory[iterationNumber, :] = residualNorms
|
| 705 |
+
residualNorm = np.sum(np.abs(residualNorms)) / sizeX
|
| 706 |
+
|
| 707 |
+
if residualNorm < smallestResidualNorm:
|
| 708 |
+
smallestResidualNorm = residualNorm
|
| 709 |
+
bestIterationNumber = iterationNumber
|
| 710 |
+
bestblockVectorX = blockVectorX
|
| 711 |
+
elif residualNorm > 2**restartControl * smallestResidualNorm:
|
| 712 |
+
forcedRestart = True
|
| 713 |
+
blockVectorAX = A(blockVectorX)
|
| 714 |
+
if blockVectorAX.shape != blockVectorX.shape:
|
| 715 |
+
raise ValueError(
|
| 716 |
+
f"The shape {blockVectorX.shape} "
|
| 717 |
+
f"of the restarted iterate not preserved\n"
|
| 718 |
+
f"and changed to {blockVectorAX.shape} "
|
| 719 |
+
f"after multiplying by the primary matrix.\n"
|
| 720 |
+
)
|
| 721 |
+
if B is not None:
|
| 722 |
+
blockVectorBX = B(blockVectorX)
|
| 723 |
+
if blockVectorBX.shape != blockVectorX.shape:
|
| 724 |
+
raise ValueError(
|
| 725 |
+
f"The shape {blockVectorX.shape} "
|
| 726 |
+
f"of the restarted iterate not preserved\n"
|
| 727 |
+
f"and changed to {blockVectorBX.shape} "
|
| 728 |
+
f"after multiplying by the secondary matrix.\n"
|
| 729 |
+
)
|
| 730 |
+
|
| 731 |
+
ii = np.where(residualNorms > residualTolerance, True, False)
|
| 732 |
+
activeMask = activeMask & ii
|
| 733 |
+
currentBlockSize = activeMask.sum()
|
| 734 |
+
|
| 735 |
+
if verbosityLevel:
|
| 736 |
+
print(f"iteration {iterationNumber}")
|
| 737 |
+
print(f"current block size: {currentBlockSize}")
|
| 738 |
+
print(f"eigenvalue(s):\n{_lambda}")
|
| 739 |
+
print(f"residual norm(s):\n{residualNorms}")
|
| 740 |
+
|
| 741 |
+
if currentBlockSize == 0:
|
| 742 |
+
break
|
| 743 |
+
|
| 744 |
+
activeBlockVectorR = _as2d(blockVectorR[:, activeMask])
|
| 745 |
+
|
| 746 |
+
if iterationNumber > 0:
|
| 747 |
+
activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
|
| 748 |
+
activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
|
| 749 |
+
if B is not None:
|
| 750 |
+
activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])
|
| 751 |
+
|
| 752 |
+
if M is not None:
|
| 753 |
+
# Apply preconditioner T to the active residuals.
|
| 754 |
+
activeBlockVectorR = M(activeBlockVectorR)
|
| 755 |
+
|
| 756 |
+
##
|
| 757 |
+
# Apply constraints to the preconditioned residuals.
|
| 758 |
+
if blockVectorY is not None:
|
| 759 |
+
_applyConstraints(activeBlockVectorR,
|
| 760 |
+
gramYBY,
|
| 761 |
+
blockVectorBY,
|
| 762 |
+
blockVectorY)
|
| 763 |
+
|
| 764 |
+
##
|
| 765 |
+
# B-orthogonalize the preconditioned residuals to X.
|
| 766 |
+
if B is not None:
|
| 767 |
+
activeBlockVectorR = activeBlockVectorR - (
|
| 768 |
+
blockVectorX @
|
| 769 |
+
(blockVectorBX.T.conj() @ activeBlockVectorR)
|
| 770 |
+
)
|
| 771 |
+
else:
|
| 772 |
+
activeBlockVectorR = activeBlockVectorR - (
|
| 773 |
+
blockVectorX @
|
| 774 |
+
(blockVectorX.T.conj() @ activeBlockVectorR)
|
| 775 |
+
)
|
| 776 |
+
|
| 777 |
+
##
|
| 778 |
+
# B-orthonormalize the preconditioned residuals.
|
| 779 |
+
aux = _b_orthonormalize(
|
| 780 |
+
B, activeBlockVectorR, verbosityLevel=verbosityLevel)
|
| 781 |
+
activeBlockVectorR, activeBlockVectorBR, _ = aux
|
| 782 |
+
|
| 783 |
+
if activeBlockVectorR is None:
|
| 784 |
+
warnings.warn(
|
| 785 |
+
f"Failed at iteration {iterationNumber} with accuracies "
|
| 786 |
+
f"{residualNorms}\n not reaching the requested "
|
| 787 |
+
f"tolerance {residualTolerance}.",
|
| 788 |
+
UserWarning, stacklevel=2
|
| 789 |
+
)
|
| 790 |
+
break
|
| 791 |
+
activeBlockVectorAR = A(activeBlockVectorR)
|
| 792 |
+
|
| 793 |
+
if iterationNumber > 0:
|
| 794 |
+
if B is not None:
|
| 795 |
+
aux = _b_orthonormalize(
|
| 796 |
+
B, activeBlockVectorP, activeBlockVectorBP,
|
| 797 |
+
verbosityLevel=verbosityLevel
|
| 798 |
+
)
|
| 799 |
+
activeBlockVectorP, activeBlockVectorBP, invR = aux
|
| 800 |
+
else:
|
| 801 |
+
aux = _b_orthonormalize(B, activeBlockVectorP,
|
| 802 |
+
verbosityLevel=verbosityLevel)
|
| 803 |
+
activeBlockVectorP, _, invR = aux
|
| 804 |
+
# Function _b_orthonormalize returns None if Cholesky fails
|
| 805 |
+
if activeBlockVectorP is not None:
|
| 806 |
+
activeBlockVectorAP = _matmul_inplace(
|
| 807 |
+
activeBlockVectorAP, invR,
|
| 808 |
+
verbosityLevel=verbosityLevel
|
| 809 |
+
)
|
| 810 |
+
restart = forcedRestart
|
| 811 |
+
else:
|
| 812 |
+
restart = True
|
| 813 |
+
|
| 814 |
+
##
|
| 815 |
+
# Perform the Rayleigh Ritz Procedure:
|
| 816 |
+
# Compute symmetric Gram matrices:
|
| 817 |
+
|
| 818 |
+
if activeBlockVectorAR.dtype == "float32":
|
| 819 |
+
myeps = 1
|
| 820 |
+
else:
|
| 821 |
+
myeps = np.sqrt(np.finfo(activeBlockVectorR.dtype).eps)
|
| 822 |
+
|
| 823 |
+
if residualNorms.max() > myeps and not explicitGramFlag:
|
| 824 |
+
explicitGramFlag = False
|
| 825 |
+
else:
|
| 826 |
+
# Once explicitGramFlag, forever explicitGramFlag.
|
| 827 |
+
explicitGramFlag = True
|
| 828 |
+
|
| 829 |
+
# Shared memory assignments to simplify the code
|
| 830 |
+
if B is None:
|
| 831 |
+
blockVectorBX = blockVectorX
|
| 832 |
+
activeBlockVectorBR = activeBlockVectorR
|
| 833 |
+
if not restart:
|
| 834 |
+
activeBlockVectorBP = activeBlockVectorP
|
| 835 |
+
|
| 836 |
+
# Common submatrices:
|
| 837 |
+
gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
|
| 838 |
+
gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
|
| 839 |
+
|
| 840 |
+
gramDtype = activeBlockVectorAR.dtype
|
| 841 |
+
if explicitGramFlag:
|
| 842 |
+
gramRAR = (gramRAR + gramRAR.T.conj()) / 2
|
| 843 |
+
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
|
| 844 |
+
gramXAX = (gramXAX + gramXAX.T.conj()) / 2
|
| 845 |
+
gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
|
| 846 |
+
gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR)
|
| 847 |
+
gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)
|
| 848 |
+
else:
|
| 849 |
+
gramXAX = np.diag(_lambda).astype(gramDtype)
|
| 850 |
+
gramXBX = np.eye(sizeX, dtype=gramDtype)
|
| 851 |
+
gramRBR = np.eye(currentBlockSize, dtype=gramDtype)
|
| 852 |
+
gramXBR = np.zeros((sizeX, currentBlockSize), dtype=gramDtype)
|
| 853 |
+
|
| 854 |
+
if not restart:
|
| 855 |
+
gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
|
| 856 |
+
gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
|
| 857 |
+
gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
|
| 858 |
+
gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
|
| 859 |
+
gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)
|
| 860 |
+
if explicitGramFlag:
|
| 861 |
+
gramPAP = (gramPAP + gramPAP.T.conj()) / 2
|
| 862 |
+
gramPBP = np.dot(activeBlockVectorP.T.conj(),
|
| 863 |
+
activeBlockVectorBP)
|
| 864 |
+
else:
|
| 865 |
+
gramPBP = np.eye(currentBlockSize, dtype=gramDtype)
|
| 866 |
+
|
| 867 |
+
gramA = np.block(
|
| 868 |
+
[
|
| 869 |
+
[gramXAX, gramXAR, gramXAP],
|
| 870 |
+
[gramXAR.T.conj(), gramRAR, gramRAP],
|
| 871 |
+
[gramXAP.T.conj(), gramRAP.T.conj(), gramPAP],
|
| 872 |
+
]
|
| 873 |
+
)
|
| 874 |
+
gramB = np.block(
|
| 875 |
+
[
|
| 876 |
+
[gramXBX, gramXBR, gramXBP],
|
| 877 |
+
[gramXBR.T.conj(), gramRBR, gramRBP],
|
| 878 |
+
[gramXBP.T.conj(), gramRBP.T.conj(), gramPBP],
|
| 879 |
+
]
|
| 880 |
+
)
|
| 881 |
+
|
| 882 |
+
_handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel)
|
| 883 |
+
|
| 884 |
+
try:
|
| 885 |
+
_lambda, eigBlockVector = eigh(gramA,
|
| 886 |
+
gramB,
|
| 887 |
+
check_finite=False)
|
| 888 |
+
except LinAlgError as e:
|
| 889 |
+
# raise ValueError("eigh failed in lobpcg iterations") from e
|
| 890 |
+
if verbosityLevel:
|
| 891 |
+
warnings.warn(
|
| 892 |
+
f"eigh failed at iteration {iterationNumber} \n"
|
| 893 |
+
f"with error {e} causing a restart.\n",
|
| 894 |
+
UserWarning, stacklevel=2
|
| 895 |
+
)
|
| 896 |
+
# try again after dropping the direction vectors P from RR
|
| 897 |
+
restart = True
|
| 898 |
+
|
| 899 |
+
if restart:
|
| 900 |
+
gramA = np.block([[gramXAX, gramXAR], [gramXAR.T.conj(), gramRAR]])
|
| 901 |
+
gramB = np.block([[gramXBX, gramXBR], [gramXBR.T.conj(), gramRBR]])
|
| 902 |
+
|
| 903 |
+
_handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel)
|
| 904 |
+
|
| 905 |
+
try:
|
| 906 |
+
_lambda, eigBlockVector = eigh(gramA,
|
| 907 |
+
gramB,
|
| 908 |
+
check_finite=False)
|
| 909 |
+
except LinAlgError as e:
|
| 910 |
+
# raise ValueError("eigh failed in lobpcg iterations") from e
|
| 911 |
+
warnings.warn(
|
| 912 |
+
f"eigh failed at iteration {iterationNumber} with error\n"
|
| 913 |
+
f"{e}\n",
|
| 914 |
+
UserWarning, stacklevel=2
|
| 915 |
+
)
|
| 916 |
+
break
|
| 917 |
+
|
| 918 |
+
ii = _get_indx(_lambda, sizeX, largest)
|
| 919 |
+
_lambda = _lambda[ii]
|
| 920 |
+
eigBlockVector = eigBlockVector[:, ii]
|
| 921 |
+
if retLambdaHistory:
|
| 922 |
+
lambdaHistory[iterationNumber + 1, :] = _lambda
|
| 923 |
+
|
| 924 |
+
# Compute Ritz vectors.
|
| 925 |
+
if B is not None:
|
| 926 |
+
if not restart:
|
| 927 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
| 928 |
+
eigBlockVectorR = eigBlockVector[sizeX:
|
| 929 |
+
sizeX + currentBlockSize]
|
| 930 |
+
eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
|
| 931 |
+
|
| 932 |
+
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
| 933 |
+
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
|
| 934 |
+
|
| 935 |
+
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
| 936 |
+
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
|
| 937 |
+
|
| 938 |
+
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
|
| 939 |
+
bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
|
| 940 |
+
else:
|
| 941 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
| 942 |
+
eigBlockVectorR = eigBlockVector[sizeX:]
|
| 943 |
+
|
| 944 |
+
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
| 945 |
+
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
| 946 |
+
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
|
| 947 |
+
|
| 948 |
+
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
|
| 949 |
+
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
|
| 950 |
+
blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp
|
| 951 |
+
|
| 952 |
+
blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
|
| 953 |
+
|
| 954 |
+
else:
|
| 955 |
+
if not restart:
|
| 956 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
| 957 |
+
eigBlockVectorR = eigBlockVector[sizeX:
|
| 958 |
+
sizeX + currentBlockSize]
|
| 959 |
+
eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
|
| 960 |
+
|
| 961 |
+
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
| 962 |
+
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
|
| 963 |
+
|
| 964 |
+
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
| 965 |
+
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
|
| 966 |
+
else:
|
| 967 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
| 968 |
+
eigBlockVectorR = eigBlockVector[sizeX:]
|
| 969 |
+
|
| 970 |
+
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
| 971 |
+
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
| 972 |
+
|
| 973 |
+
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
|
| 974 |
+
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
|
| 975 |
+
|
| 976 |
+
blockVectorP, blockVectorAP = pp, app
|
| 977 |
+
|
| 978 |
+
if B is not None:
|
| 979 |
+
aux = blockVectorBX * _lambda[np.newaxis, :]
|
| 980 |
+
else:
|
| 981 |
+
aux = blockVectorX * _lambda[np.newaxis, :]
|
| 982 |
+
|
| 983 |
+
blockVectorR = blockVectorAX - aux
|
| 984 |
+
|
| 985 |
+
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
|
| 986 |
+
residualNorms = np.sqrt(np.abs(aux))
|
| 987 |
+
# Use old lambda in case of early loop exit.
|
| 988 |
+
if retLambdaHistory:
|
| 989 |
+
lambdaHistory[iterationNumber + 1, :] = _lambda
|
| 990 |
+
if retResidualNormsHistory:
|
| 991 |
+
residualNormsHistory[iterationNumber + 1, :] = residualNorms
|
| 992 |
+
residualNorm = np.sum(np.abs(residualNorms)) / sizeX
|
| 993 |
+
if residualNorm < smallestResidualNorm:
|
| 994 |
+
smallestResidualNorm = residualNorm
|
| 995 |
+
bestIterationNumber = iterationNumber + 1
|
| 996 |
+
bestblockVectorX = blockVectorX
|
| 997 |
+
|
| 998 |
+
if np.max(np.abs(residualNorms)) > residualTolerance:
|
| 999 |
+
warnings.warn(
|
| 1000 |
+
f"Exited at iteration {iterationNumber} with accuracies \n"
|
| 1001 |
+
f"{residualNorms}\n"
|
| 1002 |
+
f"not reaching the requested tolerance {residualTolerance}.\n"
|
| 1003 |
+
f"Use iteration {bestIterationNumber} instead with accuracy \n"
|
| 1004 |
+
f"{smallestResidualNorm}.\n",
|
| 1005 |
+
UserWarning, stacklevel=2
|
| 1006 |
+
)
|
| 1007 |
+
|
| 1008 |
+
if verbosityLevel:
|
| 1009 |
+
print(f"Final iterative eigenvalue(s):\n{_lambda}")
|
| 1010 |
+
print(f"Final iterative residual norm(s):\n{residualNorms}")
|
| 1011 |
+
|
| 1012 |
+
blockVectorX = bestblockVectorX
|
| 1013 |
+
# Making eigenvectors "exactly" satisfy the blockVectorY constrains
|
| 1014 |
+
if blockVectorY is not None:
|
| 1015 |
+
_applyConstraints(blockVectorX,
|
| 1016 |
+
gramYBY,
|
| 1017 |
+
blockVectorBY,
|
| 1018 |
+
blockVectorY)
|
| 1019 |
+
|
| 1020 |
+
# Making eigenvectors "exactly" othonormalized by final "exact" RR
|
| 1021 |
+
blockVectorAX = A(blockVectorX)
|
| 1022 |
+
if blockVectorAX.shape != blockVectorX.shape:
|
| 1023 |
+
raise ValueError(
|
| 1024 |
+
f"The shape {blockVectorX.shape} "
|
| 1025 |
+
f"of the postprocessing iterate not preserved\n"
|
| 1026 |
+
f"and changed to {blockVectorAX.shape} "
|
| 1027 |
+
f"after multiplying by the primary matrix.\n"
|
| 1028 |
+
)
|
| 1029 |
+
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
|
| 1030 |
+
|
| 1031 |
+
blockVectorBX = blockVectorX
|
| 1032 |
+
if B is not None:
|
| 1033 |
+
blockVectorBX = B(blockVectorX)
|
| 1034 |
+
if blockVectorBX.shape != blockVectorX.shape:
|
| 1035 |
+
raise ValueError(
|
| 1036 |
+
f"The shape {blockVectorX.shape} "
|
| 1037 |
+
f"of the postprocessing iterate not preserved\n"
|
| 1038 |
+
f"and changed to {blockVectorBX.shape} "
|
| 1039 |
+
f"after multiplying by the secondary matrix.\n"
|
| 1040 |
+
)
|
| 1041 |
+
|
| 1042 |
+
gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
|
| 1043 |
+
_handle_gramA_gramB_verbosity(gramXAX, gramXBX, verbosityLevel)
|
| 1044 |
+
gramXAX = (gramXAX + gramXAX.T.conj()) / 2
|
| 1045 |
+
gramXBX = (gramXBX + gramXBX.T.conj()) / 2
|
| 1046 |
+
try:
|
| 1047 |
+
_lambda, eigBlockVector = eigh(gramXAX,
|
| 1048 |
+
gramXBX,
|
| 1049 |
+
check_finite=False)
|
| 1050 |
+
except LinAlgError as e:
|
| 1051 |
+
raise ValueError("eigh has failed in lobpcg postprocessing") from e
|
| 1052 |
+
|
| 1053 |
+
ii = _get_indx(_lambda, sizeX, largest)
|
| 1054 |
+
_lambda = _lambda[ii]
|
| 1055 |
+
eigBlockVector = np.asarray(eigBlockVector[:, ii])
|
| 1056 |
+
|
| 1057 |
+
blockVectorX = np.dot(blockVectorX, eigBlockVector)
|
| 1058 |
+
blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
|
| 1059 |
+
|
| 1060 |
+
if B is not None:
|
| 1061 |
+
blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
|
| 1062 |
+
aux = blockVectorBX * _lambda[np.newaxis, :]
|
| 1063 |
+
else:
|
| 1064 |
+
aux = blockVectorX * _lambda[np.newaxis, :]
|
| 1065 |
+
|
| 1066 |
+
blockVectorR = blockVectorAX - aux
|
| 1067 |
+
|
| 1068 |
+
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
|
| 1069 |
+
residualNorms = np.sqrt(np.abs(aux))
|
| 1070 |
+
|
| 1071 |
+
if retLambdaHistory:
|
| 1072 |
+
lambdaHistory[bestIterationNumber + 1, :] = _lambda
|
| 1073 |
+
if retResidualNormsHistory:
|
| 1074 |
+
residualNormsHistory[bestIterationNumber + 1, :] = residualNorms
|
| 1075 |
+
|
| 1076 |
+
if retLambdaHistory:
|
| 1077 |
+
lambdaHistory = lambdaHistory[
|
| 1078 |
+
: bestIterationNumber + 2, :]
|
| 1079 |
+
if retResidualNormsHistory:
|
| 1080 |
+
residualNormsHistory = residualNormsHistory[
|
| 1081 |
+
: bestIterationNumber + 2, :]
|
| 1082 |
+
|
| 1083 |
+
if np.max(np.abs(residualNorms)) > residualTolerance:
|
| 1084 |
+
warnings.warn(
|
| 1085 |
+
f"Exited postprocessing with accuracies \n"
|
| 1086 |
+
f"{residualNorms}\n"
|
| 1087 |
+
f"not reaching the requested tolerance {residualTolerance}.",
|
| 1088 |
+
UserWarning, stacklevel=2
|
| 1089 |
+
)
|
| 1090 |
+
|
| 1091 |
+
if verbosityLevel:
|
| 1092 |
+
print(f"Final postprocessing eigenvalue(s):\n{_lambda}")
|
| 1093 |
+
print(f"Final residual norm(s):\n{residualNorms}")
|
| 1094 |
+
|
| 1095 |
+
if retLambdaHistory:
|
| 1096 |
+
lambdaHistory = np.vsplit(lambdaHistory, np.shape(lambdaHistory)[0])
|
| 1097 |
+
lambdaHistory = [np.squeeze(i) for i in lambdaHistory]
|
| 1098 |
+
if retResidualNormsHistory:
|
| 1099 |
+
residualNormsHistory = np.vsplit(residualNormsHistory,
|
| 1100 |
+
np.shape(residualNormsHistory)[0])
|
| 1101 |
+
residualNormsHistory = [np.squeeze(i) for i in residualNormsHistory]
|
| 1102 |
+
|
| 1103 |
+
if retLambdaHistory:
|
| 1104 |
+
if retResidualNormsHistory:
|
| 1105 |
+
return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
|
| 1106 |
+
else:
|
| 1107 |
+
return _lambda, blockVectorX, lambdaHistory
|
| 1108 |
+
else:
|
| 1109 |
+
if retResidualNormsHistory:
|
| 1110 |
+
return _lambda, blockVectorX, residualNormsHistory
|
| 1111 |
+
else:
|
| 1112 |
+
return _lambda, blockVectorX
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py
ADDED
|
File without changes
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py
ADDED
|
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Test functions for the sparse.linalg._eigen.lobpcg module
|
| 2 |
+
"""
|
| 3 |
+
|
| 4 |
+
import itertools
|
| 5 |
+
import platform
|
| 6 |
+
import sys
|
| 7 |
+
import pytest
|
| 8 |
+
import numpy as np
|
| 9 |
+
from numpy import ones, r_, diag
|
| 10 |
+
from numpy.testing import (assert_almost_equal, assert_equal,
|
| 11 |
+
assert_allclose, assert_array_less)
|
| 12 |
+
|
| 13 |
+
from scipy import sparse
|
| 14 |
+
from scipy.linalg import eig, eigh, toeplitz, orth
|
| 15 |
+
from scipy.sparse import spdiags, diags, eye, csr_matrix
|
| 16 |
+
from scipy.sparse.linalg import eigs, LinearOperator
|
| 17 |
+
from scipy.sparse.linalg._eigen.lobpcg import lobpcg
|
| 18 |
+
from scipy.sparse.linalg._eigen.lobpcg.lobpcg import _b_orthonormalize
|
| 19 |
+
from scipy._lib._util import np_long, np_ulong
|
| 20 |
+
|
| 21 |
+
_IS_32BIT = (sys.maxsize < 2**32)
|
| 22 |
+
|
| 23 |
+
INT_DTYPES = {np.intc, np_long, np.longlong, np.uintc, np_ulong, np.ulonglong}
|
| 24 |
+
# np.half is unsupported on many test systems so excluded
|
| 25 |
+
REAL_DTYPES = {np.float32, np.float64, np.longdouble}
|
| 26 |
+
COMPLEX_DTYPES = {np.complex64, np.complex128, np.clongdouble}
|
| 27 |
+
# use sorted list to ensure fixed order of tests
|
| 28 |
+
VDTYPES = sorted(REAL_DTYPES ^ COMPLEX_DTYPES, key=str)
|
| 29 |
+
MDTYPES = sorted(INT_DTYPES ^ REAL_DTYPES ^ COMPLEX_DTYPES, key=str)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def sign_align(A, B):
|
| 33 |
+
"""Align signs of columns of A match those of B: column-wise remove
|
| 34 |
+
sign of A by multiplying with its sign then multiply in sign of B.
|
| 35 |
+
"""
|
| 36 |
+
return np.array([col_A * np.sign(col_A[0]) * np.sign(col_B[0])
|
| 37 |
+
for col_A, col_B in zip(A.T, B.T)]).T
|
| 38 |
+
|
| 39 |
+
def ElasticRod(n):
|
| 40 |
+
"""Build the matrices for the generalized eigenvalue problem of the
|
| 41 |
+
fixed-free elastic rod vibration model.
|
| 42 |
+
"""
|
| 43 |
+
L = 1.0
|
| 44 |
+
le = L/n
|
| 45 |
+
rho = 7.85e3
|
| 46 |
+
S = 1.e-4
|
| 47 |
+
E = 2.1e11
|
| 48 |
+
mass = rho*S*le/6.
|
| 49 |
+
k = E*S/le
|
| 50 |
+
A = k*(diag(r_[2.*ones(n-1), 1])-diag(ones(n-1), 1)-diag(ones(n-1), -1))
|
| 51 |
+
B = mass*(diag(r_[4.*ones(n-1), 2])+diag(ones(n-1), 1)+diag(ones(n-1), -1))
|
| 52 |
+
return A, B
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def MikotaPair(n):
|
| 56 |
+
"""Build a pair of full diagonal matrices for the generalized eigenvalue
|
| 57 |
+
problem. The Mikota pair acts as a nice test since the eigenvalues are the
|
| 58 |
+
squares of the integers n, n=1,2,...
|
| 59 |
+
"""
|
| 60 |
+
x = np.arange(1, n+1)
|
| 61 |
+
B = diag(1./x)
|
| 62 |
+
y = np.arange(n-1, 0, -1)
|
| 63 |
+
z = np.arange(2*n-1, 0, -2)
|
| 64 |
+
A = diag(z)-diag(y, -1)-diag(y, 1)
|
| 65 |
+
return A, B
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def compare_solutions(A, B, m):
|
| 69 |
+
"""Check eig vs. lobpcg consistency.
|
| 70 |
+
"""
|
| 71 |
+
n = A.shape[0]
|
| 72 |
+
rnd = np.random.RandomState(0)
|
| 73 |
+
V = rnd.random((n, m))
|
| 74 |
+
X = orth(V)
|
| 75 |
+
eigvals, _ = lobpcg(A, X, B=B, tol=1e-2, maxiter=50, largest=False)
|
| 76 |
+
eigvals.sort()
|
| 77 |
+
w, _ = eig(A, b=B)
|
| 78 |
+
w.sort()
|
| 79 |
+
assert_almost_equal(w[:int(m/2)], eigvals[:int(m/2)], decimal=2)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def test_Small():
|
| 83 |
+
A, B = ElasticRod(10)
|
| 84 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
| 85 |
+
compare_solutions(A, B, 10)
|
| 86 |
+
A, B = MikotaPair(10)
|
| 87 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
| 88 |
+
compare_solutions(A, B, 10)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def test_ElasticRod():
|
| 92 |
+
A, B = ElasticRod(20)
|
| 93 |
+
msg = "Exited at iteration.*|Exited postprocessing with accuracies.*"
|
| 94 |
+
with pytest.warns(UserWarning, match=msg):
|
| 95 |
+
compare_solutions(A, B, 2)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def test_MikotaPair():
|
| 99 |
+
A, B = MikotaPair(20)
|
| 100 |
+
compare_solutions(A, B, 2)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
@pytest.mark.parametrize("n", [50])
|
| 104 |
+
@pytest.mark.parametrize("m", [1, 2, 10])
|
| 105 |
+
@pytest.mark.parametrize("Vdtype", sorted(REAL_DTYPES, key=str))
|
| 106 |
+
@pytest.mark.parametrize("Bdtype", sorted(REAL_DTYPES, key=str))
|
| 107 |
+
@pytest.mark.parametrize("BVdtype", sorted(REAL_DTYPES, key=str))
|
| 108 |
+
def test_b_orthonormalize(n, m, Vdtype, Bdtype, BVdtype):
|
| 109 |
+
"""Test B-orthonormalization by Cholesky with callable 'B'.
|
| 110 |
+
The function '_b_orthonormalize' is key in LOBPCG but may
|
| 111 |
+
lead to numerical instabilities. The input vectors are often
|
| 112 |
+
badly scaled, so the function needs scale-invariant Cholesky;
|
| 113 |
+
see https://netlib.org/lapack/lawnspdf/lawn14.pdf.
|
| 114 |
+
"""
|
| 115 |
+
rnd = np.random.RandomState(0)
|
| 116 |
+
X = rnd.standard_normal((n, m)).astype(Vdtype)
|
| 117 |
+
Xcopy = np.copy(X)
|
| 118 |
+
vals = np.arange(1, n+1, dtype=float)
|
| 119 |
+
B = diags([vals], [0], (n, n)).astype(Bdtype)
|
| 120 |
+
BX = B @ X
|
| 121 |
+
BX = BX.astype(BVdtype)
|
| 122 |
+
dtype = min(X.dtype, B.dtype, BX.dtype)
|
| 123 |
+
# np.longdouble tol cannot be achieved on most systems
|
| 124 |
+
atol = m * n * max(np.finfo(dtype).eps, np.finfo(np.float64).eps)
|
| 125 |
+
|
| 126 |
+
Xo, BXo, _ = _b_orthonormalize(lambda v: B @ v, X, BX)
|
| 127 |
+
# Check in-place.
|
| 128 |
+
assert_equal(X, Xo)
|
| 129 |
+
assert_equal(id(X), id(Xo))
|
| 130 |
+
assert_equal(BX, BXo)
|
| 131 |
+
assert_equal(id(BX), id(BXo))
|
| 132 |
+
# Check BXo.
|
| 133 |
+
assert_allclose(B @ Xo, BXo, atol=atol, rtol=atol)
|
| 134 |
+
# Check B-orthonormality
|
| 135 |
+
assert_allclose(Xo.T.conj() @ B @ Xo, np.identity(m),
|
| 136 |
+
atol=atol, rtol=atol)
|
| 137 |
+
# Repeat without BX in outputs
|
| 138 |
+
X = np.copy(Xcopy)
|
| 139 |
+
Xo1, BXo1, _ = _b_orthonormalize(lambda v: B @ v, X)
|
| 140 |
+
assert_allclose(Xo, Xo1, atol=atol, rtol=atol)
|
| 141 |
+
assert_allclose(BXo, BXo1, atol=atol, rtol=atol)
|
| 142 |
+
# Check in-place.
|
| 143 |
+
assert_equal(X, Xo1)
|
| 144 |
+
assert_equal(id(X), id(Xo1))
|
| 145 |
+
# Check BXo1.
|
| 146 |
+
assert_allclose(B @ Xo1, BXo1, atol=atol, rtol=atol)
|
| 147 |
+
|
| 148 |
+
# Introduce column-scaling in X.
|
| 149 |
+
scaling = 1.0 / np.geomspace(10, 1e10, num=m)
|
| 150 |
+
X = Xcopy * scaling
|
| 151 |
+
X = X.astype(Vdtype)
|
| 152 |
+
BX = B @ X
|
| 153 |
+
BX = BX.astype(BVdtype)
|
| 154 |
+
# Check scaling-invariance of Cholesky-based orthonormalization
|
| 155 |
+
Xo1, BXo1, _ = _b_orthonormalize(lambda v: B @ v, X, BX)
|
| 156 |
+
# The output should be the same, up the signs of the columns.
|
| 157 |
+
Xo1 = sign_align(Xo1, Xo)
|
| 158 |
+
assert_allclose(Xo, Xo1, atol=atol, rtol=atol)
|
| 159 |
+
BXo1 = sign_align(BXo1, BXo)
|
| 160 |
+
assert_allclose(BXo, BXo1, atol=atol, rtol=atol)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
@pytest.mark.filterwarnings("ignore:Exited at iteration 0")
|
| 164 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
| 165 |
+
def test_nonhermitian_warning(capsys):
|
| 166 |
+
"""Check the warning of a Ritz matrix being not Hermitian
|
| 167 |
+
by feeding a non-Hermitian input matrix.
|
| 168 |
+
Also check stdout since verbosityLevel=1 and lack of stderr.
|
| 169 |
+
"""
|
| 170 |
+
n = 10
|
| 171 |
+
X = np.arange(n * 2).reshape(n, 2).astype(np.float32)
|
| 172 |
+
A = np.arange(n * n).reshape(n, n).astype(np.float32)
|
| 173 |
+
with pytest.warns(UserWarning, match="Matrix gramA"):
|
| 174 |
+
_, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
|
| 175 |
+
out, err = capsys.readouterr() # Capture output
|
| 176 |
+
assert out.startswith("Solving standard eigenvalue") # Test stdout
|
| 177 |
+
assert err == '' # Test empty stderr
|
| 178 |
+
# Make the matrix symmetric and the UserWarning disappears.
|
| 179 |
+
A += A.T
|
| 180 |
+
_, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
|
| 181 |
+
out, err = capsys.readouterr() # Capture output
|
| 182 |
+
assert out.startswith("Solving standard eigenvalue") # Test stdout
|
| 183 |
+
assert err == '' # Test empty stderr
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def test_regression():
|
| 187 |
+
"""Check the eigenvalue of the identity matrix is one.
|
| 188 |
+
"""
|
| 189 |
+
# https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
|
| 190 |
+
n = 10
|
| 191 |
+
X = np.ones((n, 1))
|
| 192 |
+
A = np.identity(n)
|
| 193 |
+
w, _ = lobpcg(A, X)
|
| 194 |
+
assert_allclose(w, [1])
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
@pytest.mark.filterwarnings("ignore:The problem size")
|
| 198 |
+
@pytest.mark.parametrize('n, m, m_excluded', [(30, 4, 3), (4, 2, 0)])
|
| 199 |
+
def test_diagonal(n, m, m_excluded):
|
| 200 |
+
"""Test ``m - m_excluded`` eigenvalues and eigenvectors of
|
| 201 |
+
diagonal matrices of the size ``n`` varying matrix formats:
|
| 202 |
+
dense array, spare matrix, and ``LinearOperator`` for both
|
| 203 |
+
matrixes in the generalized eigenvalue problem ``Av = cBv``
|
| 204 |
+
and for the preconditioner.
|
| 205 |
+
"""
|
| 206 |
+
rnd = np.random.RandomState(0)
|
| 207 |
+
|
| 208 |
+
# Define the generalized eigenvalue problem Av = cBv
|
| 209 |
+
# where (c, v) is a generalized eigenpair,
|
| 210 |
+
# A is the diagonal matrix whose entries are 1,...n,
|
| 211 |
+
# B is the identity matrix.
|
| 212 |
+
vals = np.arange(1, n+1, dtype=float)
|
| 213 |
+
A_s = diags([vals], [0], (n, n))
|
| 214 |
+
A_a = A_s.toarray()
|
| 215 |
+
|
| 216 |
+
def A_f(x):
|
| 217 |
+
return A_s @ x
|
| 218 |
+
|
| 219 |
+
A_lo = LinearOperator(matvec=A_f,
|
| 220 |
+
matmat=A_f,
|
| 221 |
+
shape=(n, n), dtype=float)
|
| 222 |
+
|
| 223 |
+
B_a = eye(n)
|
| 224 |
+
B_s = csr_matrix(B_a)
|
| 225 |
+
|
| 226 |
+
def B_f(x):
|
| 227 |
+
return B_a @ x
|
| 228 |
+
|
| 229 |
+
B_lo = LinearOperator(matvec=B_f,
|
| 230 |
+
matmat=B_f,
|
| 231 |
+
shape=(n, n), dtype=float)
|
| 232 |
+
|
| 233 |
+
# Let the preconditioner M be the inverse of A.
|
| 234 |
+
M_s = diags([1./vals], [0], (n, n))
|
| 235 |
+
M_a = M_s.toarray()
|
| 236 |
+
|
| 237 |
+
def M_f(x):
|
| 238 |
+
return M_s @ x
|
| 239 |
+
|
| 240 |
+
M_lo = LinearOperator(matvec=M_f,
|
| 241 |
+
matmat=M_f,
|
| 242 |
+
shape=(n, n), dtype=float)
|
| 243 |
+
|
| 244 |
+
# Pick random initial vectors.
|
| 245 |
+
X = rnd.normal(size=(n, m))
|
| 246 |
+
|
| 247 |
+
# Require that the returned eigenvectors be in the orthogonal complement
|
| 248 |
+
# of the first few standard basis vectors.
|
| 249 |
+
if m_excluded > 0:
|
| 250 |
+
Y = np.eye(n, m_excluded)
|
| 251 |
+
else:
|
| 252 |
+
Y = None
|
| 253 |
+
|
| 254 |
+
for A in [A_a, A_s, A_lo]:
|
| 255 |
+
for B in [B_a, B_s, B_lo]:
|
| 256 |
+
for M in [M_a, M_s, M_lo]:
|
| 257 |
+
eigvals, vecs = lobpcg(A, X, B, M=M, Y=Y,
|
| 258 |
+
maxiter=40, largest=False)
|
| 259 |
+
|
| 260 |
+
assert_allclose(eigvals, np.arange(1+m_excluded,
|
| 261 |
+
1+m_excluded+m))
|
| 262 |
+
_check_eigen(A, eigvals, vecs, rtol=1e-3, atol=1e-3)
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14):
|
| 266 |
+
"""Check if the eigenvalue residual is small.
|
| 267 |
+
"""
|
| 268 |
+
mult_wV = np.multiply(w, V)
|
| 269 |
+
dot_MV = M.dot(V)
|
| 270 |
+
assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol)
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def _check_fiedler(n, p):
|
| 274 |
+
"""Check the Fiedler vector computation.
|
| 275 |
+
"""
|
| 276 |
+
# This is not necessarily the recommended way to find the Fiedler vector.
|
| 277 |
+
col = np.zeros(n)
|
| 278 |
+
col[1] = 1
|
| 279 |
+
A = toeplitz(col)
|
| 280 |
+
D = np.diag(A.sum(axis=1))
|
| 281 |
+
L = D - A
|
| 282 |
+
# Compute the full eigendecomposition using tricks, e.g.
|
| 283 |
+
# http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
|
| 284 |
+
tmp = np.pi * np.arange(n) / n
|
| 285 |
+
analytic_w = 2 * (1 - np.cos(tmp))
|
| 286 |
+
analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
|
| 287 |
+
_check_eigen(L, analytic_w, analytic_V)
|
| 288 |
+
# Compute the full eigendecomposition using eigh.
|
| 289 |
+
eigh_w, eigh_V = eigh(L)
|
| 290 |
+
_check_eigen(L, eigh_w, eigh_V)
|
| 291 |
+
# Check that the first eigenvalue is near zero and that the rest agree.
|
| 292 |
+
assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
|
| 293 |
+
assert_allclose(eigh_w[1:], analytic_w[1:])
|
| 294 |
+
|
| 295 |
+
# Check small lobpcg eigenvalues.
|
| 296 |
+
X = analytic_V[:, :p]
|
| 297 |
+
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
|
| 298 |
+
assert_equal(lobpcg_w.shape, (p,))
|
| 299 |
+
assert_equal(lobpcg_V.shape, (n, p))
|
| 300 |
+
_check_eigen(L, lobpcg_w, lobpcg_V)
|
| 301 |
+
assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
|
| 302 |
+
assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
|
| 303 |
+
|
| 304 |
+
# Check large lobpcg eigenvalues.
|
| 305 |
+
X = analytic_V[:, -p:]
|
| 306 |
+
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
|
| 307 |
+
assert_equal(lobpcg_w.shape, (p,))
|
| 308 |
+
assert_equal(lobpcg_V.shape, (n, p))
|
| 309 |
+
_check_eigen(L, lobpcg_w, lobpcg_V)
|
| 310 |
+
assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
|
| 311 |
+
|
| 312 |
+
# Look for the Fiedler vector using good but not exactly correct guesses.
|
| 313 |
+
fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
|
| 314 |
+
X = np.vstack((np.ones(n), fiedler_guess)).T
|
| 315 |
+
lobpcg_w, _ = lobpcg(L, X, largest=False)
|
| 316 |
+
# Mathematically, the smaller eigenvalue should be zero
|
| 317 |
+
# and the larger should be the algebraic connectivity.
|
| 318 |
+
lobpcg_w = np.sort(lobpcg_w)
|
| 319 |
+
assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def test_fiedler_small_8():
|
| 323 |
+
"""Check the dense workaround path for small matrices.
|
| 324 |
+
"""
|
| 325 |
+
# This triggers the dense path because 8 < 2*5.
|
| 326 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
| 327 |
+
_check_fiedler(8, 2)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def test_fiedler_large_12():
|
| 331 |
+
"""Check the dense workaround path avoided for non-small matrices.
|
| 332 |
+
"""
|
| 333 |
+
# This does not trigger the dense path, because 2*5 <= 12.
|
| 334 |
+
_check_fiedler(12, 2)
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
@pytest.mark.filterwarnings("ignore:Failed at iteration")
|
| 338 |
+
@pytest.mark.filterwarnings("ignore:Exited at iteration")
|
| 339 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
| 340 |
+
def test_failure_to_run_iterations():
|
| 341 |
+
"""Check that the code exits gracefully without breaking. Issue #10974.
|
| 342 |
+
The code may or not issue a warning, filtered out. Issue #15935, #17954.
|
| 343 |
+
"""
|
| 344 |
+
rnd = np.random.RandomState(0)
|
| 345 |
+
X = rnd.standard_normal((100, 10))
|
| 346 |
+
A = X @ X.T
|
| 347 |
+
Q = rnd.standard_normal((X.shape[0], 4))
|
| 348 |
+
eigenvalues, _ = lobpcg(A, Q, maxiter=40, tol=1e-12)
|
| 349 |
+
assert np.max(eigenvalues) > 0
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def test_failure_to_run_iterations_nonsymmetric():
|
| 353 |
+
"""Check that the code exists gracefully without breaking
|
| 354 |
+
if the matrix in not symmetric.
|
| 355 |
+
"""
|
| 356 |
+
A = np.zeros((10, 10))
|
| 357 |
+
A[0, 1] = 1
|
| 358 |
+
Q = np.ones((10, 1))
|
| 359 |
+
msg = "Exited at iteration 2|Exited postprocessing with accuracies.*"
|
| 360 |
+
with pytest.warns(UserWarning, match=msg):
|
| 361 |
+
eigenvalues, _ = lobpcg(A, Q, maxiter=20)
|
| 362 |
+
assert np.max(eigenvalues) > 0
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
@pytest.mark.filterwarnings("ignore:The problem size")
|
| 366 |
+
def test_hermitian():
|
| 367 |
+
"""Check complex-value Hermitian cases.
|
| 368 |
+
"""
|
| 369 |
+
rnd = np.random.RandomState(0)
|
| 370 |
+
|
| 371 |
+
sizes = [3, 12]
|
| 372 |
+
ks = [1, 2]
|
| 373 |
+
gens = [True, False]
|
| 374 |
+
|
| 375 |
+
for s, k, gen, dh, dx, db in (
|
| 376 |
+
itertools.product(sizes, ks, gens, gens, gens, gens)
|
| 377 |
+
):
|
| 378 |
+
H = rnd.random((s, s)) + 1.j * rnd.random((s, s))
|
| 379 |
+
H = 10 * np.eye(s) + H + H.T.conj()
|
| 380 |
+
H = H.astype(np.complex128) if dh else H.astype(np.complex64)
|
| 381 |
+
|
| 382 |
+
X = rnd.standard_normal((s, k))
|
| 383 |
+
X = X + 1.j * rnd.standard_normal((s, k))
|
| 384 |
+
X = X.astype(np.complex128) if dx else X.astype(np.complex64)
|
| 385 |
+
|
| 386 |
+
if not gen:
|
| 387 |
+
B = np.eye(s)
|
| 388 |
+
w, v = lobpcg(H, X, maxiter=99, verbosityLevel=0)
|
| 389 |
+
# Also test mixing complex H with real B.
|
| 390 |
+
wb, _ = lobpcg(H, X, B, maxiter=99, verbosityLevel=0)
|
| 391 |
+
assert_allclose(w, wb, rtol=1e-6)
|
| 392 |
+
w0, _ = eigh(H)
|
| 393 |
+
else:
|
| 394 |
+
B = rnd.random((s, s)) + 1.j * rnd.random((s, s))
|
| 395 |
+
B = 10 * np.eye(s) + B.dot(B.T.conj())
|
| 396 |
+
B = B.astype(np.complex128) if db else B.astype(np.complex64)
|
| 397 |
+
w, v = lobpcg(H, X, B, maxiter=99, verbosityLevel=0)
|
| 398 |
+
w0, _ = eigh(H, B)
|
| 399 |
+
|
| 400 |
+
for wx, vx in zip(w, v.T):
|
| 401 |
+
# Check eigenvector
|
| 402 |
+
assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx)
|
| 403 |
+
/ np.linalg.norm(H.dot(vx)),
|
| 404 |
+
0, atol=5e-2, rtol=0)
|
| 405 |
+
|
| 406 |
+
# Compare eigenvalues
|
| 407 |
+
j = np.argmin(abs(w0 - wx))
|
| 408 |
+
assert_allclose(wx, w0[j], rtol=1e-4)
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
# The n=5 case tests the alternative small matrix code path that uses eigh().
|
| 412 |
+
@pytest.mark.filterwarnings("ignore:The problem size")
|
| 413 |
+
@pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)])
|
| 414 |
+
def test_eigs_consistency(n, atol):
|
| 415 |
+
"""Check eigs vs. lobpcg consistency.
|
| 416 |
+
"""
|
| 417 |
+
vals = np.arange(1, n+1, dtype=np.float64)
|
| 418 |
+
A = spdiags(vals, 0, n, n)
|
| 419 |
+
rnd = np.random.RandomState(0)
|
| 420 |
+
X = rnd.standard_normal((n, 2))
|
| 421 |
+
lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100)
|
| 422 |
+
vals, _ = eigs(A, k=2)
|
| 423 |
+
|
| 424 |
+
_check_eigen(A, lvals, lvecs, atol=atol, rtol=0)
|
| 425 |
+
assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14)
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
def test_verbosity():
|
| 429 |
+
"""Check that nonzero verbosity level code runs.
|
| 430 |
+
"""
|
| 431 |
+
rnd = np.random.RandomState(0)
|
| 432 |
+
X = rnd.standard_normal((10, 10))
|
| 433 |
+
A = X @ X.T
|
| 434 |
+
Q = rnd.standard_normal((X.shape[0], 1))
|
| 435 |
+
msg = "Exited at iteration.*|Exited postprocessing with accuracies.*"
|
| 436 |
+
with pytest.warns(UserWarning, match=msg):
|
| 437 |
+
_, _ = lobpcg(A, Q, maxiter=3, verbosityLevel=9)
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
@pytest.mark.xfail(_IS_32BIT and sys.platform == 'win32',
|
| 441 |
+
reason="tolerance violation on windows")
|
| 442 |
+
@pytest.mark.xfail(platform.machine() == 'ppc64le',
|
| 443 |
+
reason="fails on ppc64le")
|
| 444 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
| 445 |
+
def test_tolerance_float32():
|
| 446 |
+
"""Check lobpcg for attainable tolerance in float32.
|
| 447 |
+
"""
|
| 448 |
+
rnd = np.random.RandomState(0)
|
| 449 |
+
n = 50
|
| 450 |
+
m = 3
|
| 451 |
+
vals = -np.arange(1, n + 1)
|
| 452 |
+
A = diags([vals], [0], (n, n))
|
| 453 |
+
A = A.astype(np.float32)
|
| 454 |
+
X = rnd.standard_normal((n, m))
|
| 455 |
+
X = X.astype(np.float32)
|
| 456 |
+
eigvals, _ = lobpcg(A, X, tol=1.25e-5, maxiter=50, verbosityLevel=0)
|
| 457 |
+
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=2e-5, rtol=1e-5)
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
@pytest.mark.parametrize("vdtype", VDTYPES)
|
| 461 |
+
@pytest.mark.parametrize("mdtype", MDTYPES)
|
| 462 |
+
@pytest.mark.parametrize("arr_type", [np.array,
|
| 463 |
+
sparse.csr_matrix,
|
| 464 |
+
sparse.coo_matrix])
|
| 465 |
+
def test_dtypes(vdtype, mdtype, arr_type):
|
| 466 |
+
"""Test lobpcg in various dtypes.
|
| 467 |
+
"""
|
| 468 |
+
rnd = np.random.RandomState(0)
|
| 469 |
+
n = 12
|
| 470 |
+
m = 2
|
| 471 |
+
A = arr_type(np.diag(np.arange(1, n + 1)).astype(mdtype))
|
| 472 |
+
X = rnd.random((n, m))
|
| 473 |
+
X = X.astype(vdtype)
|
| 474 |
+
eigvals, eigvecs = lobpcg(A, X, tol=1e-2, largest=False)
|
| 475 |
+
assert_allclose(eigvals, np.arange(1, 1 + m), atol=1e-1)
|
| 476 |
+
# eigenvectors must be nearly real in any case
|
| 477 |
+
assert_allclose(np.sum(np.abs(eigvecs - eigvecs.conj())), 0, atol=1e-2)
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
@pytest.mark.filterwarnings("ignore:Exited at iteration")
|
| 481 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
| 482 |
+
def test_inplace_warning():
|
| 483 |
+
"""Check lobpcg gives a warning in '_b_orthonormalize'
|
| 484 |
+
that in-place orthogonalization is impossible due to dtype mismatch.
|
| 485 |
+
"""
|
| 486 |
+
rnd = np.random.RandomState(0)
|
| 487 |
+
n = 6
|
| 488 |
+
m = 1
|
| 489 |
+
vals = -np.arange(1, n + 1)
|
| 490 |
+
A = diags([vals], [0], (n, n))
|
| 491 |
+
A = A.astype(np.cdouble)
|
| 492 |
+
X = rnd.standard_normal((n, m))
|
| 493 |
+
with pytest.warns(UserWarning, match="Inplace update"):
|
| 494 |
+
eigvals, _ = lobpcg(A, X, maxiter=2, verbosityLevel=1)
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
def test_maxit():
|
| 498 |
+
"""Check lobpcg if maxit=maxiter runs maxiter iterations and
|
| 499 |
+
if maxit=None runs 20 iterations (the default)
|
| 500 |
+
by checking the size of the iteration history output, which should
|
| 501 |
+
be the number of iterations plus 3 (initial, final, and postprocessing)
|
| 502 |
+
typically when maxiter is small and the choice of the best is passive.
|
| 503 |
+
"""
|
| 504 |
+
rnd = np.random.RandomState(0)
|
| 505 |
+
n = 50
|
| 506 |
+
m = 4
|
| 507 |
+
vals = -np.arange(1, n + 1)
|
| 508 |
+
A = diags([vals], [0], (n, n))
|
| 509 |
+
A = A.astype(np.float32)
|
| 510 |
+
X = rnd.standard_normal((n, m))
|
| 511 |
+
X = X.astype(np.float64)
|
| 512 |
+
msg = "Exited at iteration.*|Exited postprocessing with accuracies.*"
|
| 513 |
+
for maxiter in range(1, 4):
|
| 514 |
+
with pytest.warns(UserWarning, match=msg):
|
| 515 |
+
_, _, l_h, r_h = lobpcg(A, X, tol=1e-8, maxiter=maxiter,
|
| 516 |
+
retLambdaHistory=True,
|
| 517 |
+
retResidualNormsHistory=True)
|
| 518 |
+
assert_allclose(np.shape(l_h)[0], maxiter+3)
|
| 519 |
+
assert_allclose(np.shape(r_h)[0], maxiter+3)
|
| 520 |
+
with pytest.warns(UserWarning, match=msg):
|
| 521 |
+
l, _, l_h, r_h = lobpcg(A, X, tol=1e-8,
|
| 522 |
+
retLambdaHistory=True,
|
| 523 |
+
retResidualNormsHistory=True)
|
| 524 |
+
assert_allclose(np.shape(l_h)[0], 20+3)
|
| 525 |
+
assert_allclose(np.shape(r_h)[0], 20+3)
|
| 526 |
+
# Check that eigenvalue output is the last one in history
|
| 527 |
+
assert_allclose(l, l_h[-1])
|
| 528 |
+
# Make sure that both history outputs are lists
|
| 529 |
+
assert isinstance(l_h, list)
|
| 530 |
+
assert isinstance(r_h, list)
|
| 531 |
+
# Make sure that both history lists are arrays-like
|
| 532 |
+
assert_allclose(np.shape(l_h), np.shape(np.asarray(l_h)))
|
| 533 |
+
assert_allclose(np.shape(r_h), np.shape(np.asarray(r_h)))
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
@pytest.mark.slow
|
| 537 |
+
@pytest.mark.parametrize("n", [15])
|
| 538 |
+
@pytest.mark.parametrize("m", [1, 2])
|
| 539 |
+
@pytest.mark.filterwarnings("ignore:Exited at iteration")
|
| 540 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
| 541 |
+
def test_diagonal_data_types(n, m):
|
| 542 |
+
"""Check lobpcg for diagonal matrices for all matrix types.
|
| 543 |
+
Constraints are imposed, so a dense eigensolver eig cannot run.
|
| 544 |
+
"""
|
| 545 |
+
rnd = np.random.RandomState(0)
|
| 546 |
+
# Define the generalized eigenvalue problem Av = cBv
|
| 547 |
+
# where (c, v) is a generalized eigenpair,
|
| 548 |
+
# and where we choose A and B to be diagonal.
|
| 549 |
+
vals = np.arange(1, n + 1)
|
| 550 |
+
|
| 551 |
+
# list_sparse_format = ['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']
|
| 552 |
+
list_sparse_format = ['coo']
|
| 553 |
+
sparse_formats = len(list_sparse_format)
|
| 554 |
+
for s_f_i, s_f in enumerate(list_sparse_format):
|
| 555 |
+
|
| 556 |
+
As64 = diags([vals * vals], [0], (n, n), format=s_f)
|
| 557 |
+
As32 = As64.astype(np.float32)
|
| 558 |
+
Af64 = As64.toarray()
|
| 559 |
+
Af32 = Af64.astype(np.float32)
|
| 560 |
+
|
| 561 |
+
def As32f(x):
|
| 562 |
+
return As32 @ x
|
| 563 |
+
As32LO = LinearOperator(matvec=As32f,
|
| 564 |
+
matmat=As32f,
|
| 565 |
+
shape=(n, n),
|
| 566 |
+
dtype=As32.dtype)
|
| 567 |
+
|
| 568 |
+
listA = [Af64, As64, Af32, As32, As32f, As32LO, lambda v: As32 @ v]
|
| 569 |
+
|
| 570 |
+
Bs64 = diags([vals], [0], (n, n), format=s_f)
|
| 571 |
+
Bf64 = Bs64.toarray()
|
| 572 |
+
Bs32 = Bs64.astype(np.float32)
|
| 573 |
+
|
| 574 |
+
def Bs32f(x):
|
| 575 |
+
return Bs32 @ x
|
| 576 |
+
Bs32LO = LinearOperator(matvec=Bs32f,
|
| 577 |
+
matmat=Bs32f,
|
| 578 |
+
shape=(n, n),
|
| 579 |
+
dtype=Bs32.dtype)
|
| 580 |
+
listB = [Bf64, Bs64, Bs32, Bs32f, Bs32LO, lambda v: Bs32 @ v]
|
| 581 |
+
|
| 582 |
+
# Define the preconditioner function as LinearOperator.
|
| 583 |
+
Ms64 = diags([1./vals], [0], (n, n), format=s_f)
|
| 584 |
+
|
| 585 |
+
def Ms64precond(x):
|
| 586 |
+
return Ms64 @ x
|
| 587 |
+
Ms64precondLO = LinearOperator(matvec=Ms64precond,
|
| 588 |
+
matmat=Ms64precond,
|
| 589 |
+
shape=(n, n),
|
| 590 |
+
dtype=Ms64.dtype)
|
| 591 |
+
Mf64 = Ms64.toarray()
|
| 592 |
+
|
| 593 |
+
def Mf64precond(x):
|
| 594 |
+
return Mf64 @ x
|
| 595 |
+
Mf64precondLO = LinearOperator(matvec=Mf64precond,
|
| 596 |
+
matmat=Mf64precond,
|
| 597 |
+
shape=(n, n),
|
| 598 |
+
dtype=Mf64.dtype)
|
| 599 |
+
Ms32 = Ms64.astype(np.float32)
|
| 600 |
+
|
| 601 |
+
def Ms32precond(x):
|
| 602 |
+
return Ms32 @ x
|
| 603 |
+
Ms32precondLO = LinearOperator(matvec=Ms32precond,
|
| 604 |
+
matmat=Ms32precond,
|
| 605 |
+
shape=(n, n),
|
| 606 |
+
dtype=Ms32.dtype)
|
| 607 |
+
Mf32 = Ms32.toarray()
|
| 608 |
+
|
| 609 |
+
def Mf32precond(x):
|
| 610 |
+
return Mf32 @ x
|
| 611 |
+
Mf32precondLO = LinearOperator(matvec=Mf32precond,
|
| 612 |
+
matmat=Mf32precond,
|
| 613 |
+
shape=(n, n),
|
| 614 |
+
dtype=Mf32.dtype)
|
| 615 |
+
listM = [None, Ms64, Ms64precondLO, Mf64precondLO, Ms64precond,
|
| 616 |
+
Ms32, Ms32precondLO, Mf32precondLO, Ms32precond]
|
| 617 |
+
|
| 618 |
+
# Setup matrix of the initial approximation to the eigenvectors
|
| 619 |
+
# (cannot be sparse array).
|
| 620 |
+
Xf64 = rnd.random((n, m))
|
| 621 |
+
Xf32 = Xf64.astype(np.float32)
|
| 622 |
+
listX = [Xf64, Xf32]
|
| 623 |
+
|
| 624 |
+
# Require that the returned eigenvectors be in the orthogonal complement
|
| 625 |
+
# of the first few standard basis vectors (cannot be sparse array).
|
| 626 |
+
m_excluded = 3
|
| 627 |
+
Yf64 = np.eye(n, m_excluded, dtype=float)
|
| 628 |
+
Yf32 = np.eye(n, m_excluded, dtype=np.float32)
|
| 629 |
+
listY = [Yf64, Yf32]
|
| 630 |
+
|
| 631 |
+
tests = list(itertools.product(listA, listB, listM, listX, listY))
|
| 632 |
+
# This is one of the slower tests because there are >1,000 configs
|
| 633 |
+
# to test here, instead of checking product of all input, output types
|
| 634 |
+
# test each configuration for the first sparse format, and then
|
| 635 |
+
# for one additional sparse format. this takes 2/7=30% as long as
|
| 636 |
+
# testing all configurations for all sparse formats.
|
| 637 |
+
if s_f_i > 0:
|
| 638 |
+
tests = tests[s_f_i - 1::sparse_formats-1]
|
| 639 |
+
|
| 640 |
+
for A, B, M, X, Y in tests:
|
| 641 |
+
eigvals, _ = lobpcg(A, X, B=B, M=M, Y=Y, tol=1e-4,
|
| 642 |
+
maxiter=100, largest=False)
|
| 643 |
+
assert_allclose(eigvals,
|
| 644 |
+
np.arange(1 + m_excluded, 1 + m_excluded + m),
|
| 645 |
+
atol=1e-5)
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/tests/__init__.py
ADDED
|
File without changes
|
.venv/Lib/site-packages/scipy/sparse/linalg/_eigen/tests/test_svds.py
ADDED
|
@@ -0,0 +1,862 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import copy
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from numpy.testing import assert_allclose, assert_equal, assert_array_equal
|
| 6 |
+
import pytest
|
| 7 |
+
|
| 8 |
+
from scipy.linalg import svd, null_space
|
| 9 |
+
from scipy.sparse import csc_matrix, issparse, spdiags, random
|
| 10 |
+
from scipy.sparse.linalg import LinearOperator, aslinearoperator
|
| 11 |
+
from scipy.sparse.linalg import svds
|
| 12 |
+
from scipy.sparse.linalg._eigen.arpack import ArpackNoConvergence
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# --- Helper Functions / Classes ---
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def sorted_svd(m, k, which='LM'):
|
| 19 |
+
# Compute svd of a dense matrix m, and return singular vectors/values
|
| 20 |
+
# sorted.
|
| 21 |
+
if issparse(m):
|
| 22 |
+
m = m.toarray()
|
| 23 |
+
u, s, vh = svd(m)
|
| 24 |
+
if which == 'LM':
|
| 25 |
+
ii = np.argsort(s)[-k:]
|
| 26 |
+
elif which == 'SM':
|
| 27 |
+
ii = np.argsort(s)[:k]
|
| 28 |
+
else:
|
| 29 |
+
raise ValueError(f"unknown which={which!r}")
|
| 30 |
+
|
| 31 |
+
return u[:, ii], s[ii], vh[ii]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _check_svds(A, k, u, s, vh, which="LM", check_usvh_A=False,
|
| 35 |
+
check_svd=True, atol=1e-10, rtol=1e-7):
|
| 36 |
+
n, m = A.shape
|
| 37 |
+
|
| 38 |
+
# Check shapes.
|
| 39 |
+
assert_equal(u.shape, (n, k))
|
| 40 |
+
assert_equal(s.shape, (k,))
|
| 41 |
+
assert_equal(vh.shape, (k, m))
|
| 42 |
+
|
| 43 |
+
# Check that the original matrix can be reconstituted.
|
| 44 |
+
A_rebuilt = (u*s).dot(vh)
|
| 45 |
+
assert_equal(A_rebuilt.shape, A.shape)
|
| 46 |
+
if check_usvh_A:
|
| 47 |
+
assert_allclose(A_rebuilt, A, atol=atol, rtol=rtol)
|
| 48 |
+
|
| 49 |
+
# Check that u is a semi-orthogonal matrix.
|
| 50 |
+
uh_u = np.dot(u.T.conj(), u)
|
| 51 |
+
assert_equal(uh_u.shape, (k, k))
|
| 52 |
+
assert_allclose(uh_u, np.identity(k), atol=atol, rtol=rtol)
|
| 53 |
+
|
| 54 |
+
# Check that vh is a semi-orthogonal matrix.
|
| 55 |
+
vh_v = np.dot(vh, vh.T.conj())
|
| 56 |
+
assert_equal(vh_v.shape, (k, k))
|
| 57 |
+
assert_allclose(vh_v, np.identity(k), atol=atol, rtol=rtol)
|
| 58 |
+
|
| 59 |
+
# Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
|
| 60 |
+
if check_svd:
|
| 61 |
+
u2, s2, vh2 = sorted_svd(A, k, which)
|
| 62 |
+
assert_allclose(np.abs(u), np.abs(u2), atol=atol, rtol=rtol)
|
| 63 |
+
assert_allclose(s, s2, atol=atol, rtol=rtol)
|
| 64 |
+
assert_allclose(np.abs(vh), np.abs(vh2), atol=atol, rtol=rtol)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _check_svds_n(A, k, u, s, vh, which="LM", check_res=True,
|
| 68 |
+
check_svd=True, atol=1e-10, rtol=1e-7):
|
| 69 |
+
n, m = A.shape
|
| 70 |
+
|
| 71 |
+
# Check shapes.
|
| 72 |
+
assert_equal(u.shape, (n, k))
|
| 73 |
+
assert_equal(s.shape, (k,))
|
| 74 |
+
assert_equal(vh.shape, (k, m))
|
| 75 |
+
|
| 76 |
+
# Check that u is a semi-orthogonal matrix.
|
| 77 |
+
uh_u = np.dot(u.T.conj(), u)
|
| 78 |
+
assert_equal(uh_u.shape, (k, k))
|
| 79 |
+
error = np.sum(np.abs(uh_u - np.identity(k))) / (k * k)
|
| 80 |
+
assert_allclose(error, 0.0, atol=atol, rtol=rtol)
|
| 81 |
+
|
| 82 |
+
# Check that vh is a semi-orthogonal matrix.
|
| 83 |
+
vh_v = np.dot(vh, vh.T.conj())
|
| 84 |
+
assert_equal(vh_v.shape, (k, k))
|
| 85 |
+
error = np.sum(np.abs(vh_v - np.identity(k))) / (k * k)
|
| 86 |
+
assert_allclose(error, 0.0, atol=atol, rtol=rtol)
|
| 87 |
+
|
| 88 |
+
# Check residuals
|
| 89 |
+
if check_res:
|
| 90 |
+
ru = A.T.conj() @ u - vh.T.conj() * s
|
| 91 |
+
rus = np.sum(np.abs(ru)) / (n * k)
|
| 92 |
+
rvh = A @ vh.T.conj() - u * s
|
| 93 |
+
rvhs = np.sum(np.abs(rvh)) / (m * k)
|
| 94 |
+
assert_allclose(rus, 0.0, atol=atol, rtol=rtol)
|
| 95 |
+
assert_allclose(rvhs, 0.0, atol=atol, rtol=rtol)
|
| 96 |
+
|
| 97 |
+
# Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
|
| 98 |
+
if check_svd:
|
| 99 |
+
u2, s2, vh2 = sorted_svd(A, k, which)
|
| 100 |
+
assert_allclose(s, s2, atol=atol, rtol=rtol)
|
| 101 |
+
A_rebuilt_svd = (u2*s2).dot(vh2)
|
| 102 |
+
A_rebuilt = (u*s).dot(vh)
|
| 103 |
+
assert_equal(A_rebuilt.shape, A.shape)
|
| 104 |
+
error = np.sum(np.abs(A_rebuilt_svd - A_rebuilt)) / (k * k)
|
| 105 |
+
assert_allclose(error, 0.0, atol=atol, rtol=rtol)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
class CheckingLinearOperator(LinearOperator):
|
| 109 |
+
def __init__(self, A):
|
| 110 |
+
self.A = A
|
| 111 |
+
self.dtype = A.dtype
|
| 112 |
+
self.shape = A.shape
|
| 113 |
+
|
| 114 |
+
def _matvec(self, x):
|
| 115 |
+
assert_equal(max(x.shape), np.size(x))
|
| 116 |
+
return self.A.dot(x)
|
| 117 |
+
|
| 118 |
+
def _rmatvec(self, x):
|
| 119 |
+
assert_equal(max(x.shape), np.size(x))
|
| 120 |
+
return self.A.T.conjugate().dot(x)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
# --- Test Input Validation ---
|
| 124 |
+
# Tests input validation on parameters `k` and `which`.
|
| 125 |
+
# Needs better input validation checks for all other parameters.
|
| 126 |
+
|
| 127 |
+
class SVDSCommonTests:
|
| 128 |
+
|
| 129 |
+
solver = None
|
| 130 |
+
|
| 131 |
+
# some of these IV tests could run only once, say with solver=None
|
| 132 |
+
|
| 133 |
+
_A_empty_msg = "`A` must not be empty."
|
| 134 |
+
_A_dtype_msg = "`A` must be of floating or complex floating data type"
|
| 135 |
+
_A_type_msg = "type not understood"
|
| 136 |
+
_A_ndim_msg = "array must have ndim <= 2"
|
| 137 |
+
_A_validation_inputs = [
|
| 138 |
+
(np.asarray([[]]), ValueError, _A_empty_msg),
|
| 139 |
+
(np.asarray([[1, 2], [3, 4]]), ValueError, _A_dtype_msg),
|
| 140 |
+
("hi", TypeError, _A_type_msg),
|
| 141 |
+
(np.asarray([[[1., 2.], [3., 4.]]]), ValueError, _A_ndim_msg)]
|
| 142 |
+
|
| 143 |
+
@pytest.mark.parametrize("args", _A_validation_inputs)
|
| 144 |
+
def test_svds_input_validation_A(self, args):
|
| 145 |
+
A, error_type, message = args
|
| 146 |
+
with pytest.raises(error_type, match=message):
|
| 147 |
+
svds(A, k=1, solver=self.solver)
|
| 148 |
+
|
| 149 |
+
@pytest.mark.parametrize("k", [-1, 0, 3, 4, 5, 1.5, "1"])
|
| 150 |
+
def test_svds_input_validation_k_1(self, k):
|
| 151 |
+
rng = np.random.default_rng(0)
|
| 152 |
+
A = rng.random((4, 3))
|
| 153 |
+
|
| 154 |
+
# propack can do complete SVD
|
| 155 |
+
if self.solver == 'propack' and k == 3:
|
| 156 |
+
res = svds(A, k=k, solver=self.solver, random_state=0)
|
| 157 |
+
_check_svds(A, k, *res, check_usvh_A=True, check_svd=True)
|
| 158 |
+
return
|
| 159 |
+
|
| 160 |
+
message = ("`k` must be an integer satisfying")
|
| 161 |
+
with pytest.raises(ValueError, match=message):
|
| 162 |
+
svds(A, k=k, solver=self.solver)
|
| 163 |
+
|
| 164 |
+
def test_svds_input_validation_k_2(self):
|
| 165 |
+
# I think the stack trace is reasonable when `k` can't be converted
|
| 166 |
+
# to an int.
|
| 167 |
+
message = "int() argument must be a"
|
| 168 |
+
with pytest.raises(TypeError, match=re.escape(message)):
|
| 169 |
+
svds(np.eye(10), k=[], solver=self.solver)
|
| 170 |
+
|
| 171 |
+
message = "invalid literal for int()"
|
| 172 |
+
with pytest.raises(ValueError, match=message):
|
| 173 |
+
svds(np.eye(10), k="hi", solver=self.solver)
|
| 174 |
+
|
| 175 |
+
@pytest.mark.parametrize("tol", (-1, np.inf, np.nan))
|
| 176 |
+
def test_svds_input_validation_tol_1(self, tol):
|
| 177 |
+
message = "`tol` must be a non-negative floating point value."
|
| 178 |
+
with pytest.raises(ValueError, match=message):
|
| 179 |
+
svds(np.eye(10), tol=tol, solver=self.solver)
|
| 180 |
+
|
| 181 |
+
@pytest.mark.parametrize("tol", ([], 'hi'))
|
| 182 |
+
def test_svds_input_validation_tol_2(self, tol):
|
| 183 |
+
# I think the stack trace is reasonable here
|
| 184 |
+
message = "'<' not supported between instances"
|
| 185 |
+
with pytest.raises(TypeError, match=message):
|
| 186 |
+
svds(np.eye(10), tol=tol, solver=self.solver)
|
| 187 |
+
|
| 188 |
+
@pytest.mark.parametrize("which", ('LA', 'SA', 'ekki', 0))
|
| 189 |
+
def test_svds_input_validation_which(self, which):
|
| 190 |
+
# Regression test for a github issue.
|
| 191 |
+
# https://github.com/scipy/scipy/issues/4590
|
| 192 |
+
# Function was not checking for eigenvalue type and unintended
|
| 193 |
+
# values could be returned.
|
| 194 |
+
with pytest.raises(ValueError, match="`which` must be in"):
|
| 195 |
+
svds(np.eye(10), which=which, solver=self.solver)
|
| 196 |
+
|
| 197 |
+
@pytest.mark.parametrize("transpose", (True, False))
|
| 198 |
+
@pytest.mark.parametrize("n", range(4, 9))
|
| 199 |
+
def test_svds_input_validation_v0_1(self, transpose, n):
|
| 200 |
+
rng = np.random.default_rng(0)
|
| 201 |
+
A = rng.random((5, 7))
|
| 202 |
+
v0 = rng.random(n)
|
| 203 |
+
if transpose:
|
| 204 |
+
A = A.T
|
| 205 |
+
k = 2
|
| 206 |
+
message = "`v0` must have shape"
|
| 207 |
+
|
| 208 |
+
required_length = (A.shape[0] if self.solver == 'propack'
|
| 209 |
+
else min(A.shape))
|
| 210 |
+
if n != required_length:
|
| 211 |
+
with pytest.raises(ValueError, match=message):
|
| 212 |
+
svds(A, k=k, v0=v0, solver=self.solver)
|
| 213 |
+
|
| 214 |
+
def test_svds_input_validation_v0_2(self):
|
| 215 |
+
A = np.ones((10, 10))
|
| 216 |
+
v0 = np.ones((1, 10))
|
| 217 |
+
message = "`v0` must have shape"
|
| 218 |
+
with pytest.raises(ValueError, match=message):
|
| 219 |
+
svds(A, k=1, v0=v0, solver=self.solver)
|
| 220 |
+
|
| 221 |
+
@pytest.mark.parametrize("v0", ("hi", 1, np.ones(10, dtype=int)))
|
| 222 |
+
def test_svds_input_validation_v0_3(self, v0):
|
| 223 |
+
A = np.ones((10, 10))
|
| 224 |
+
message = "`v0` must be of floating or complex floating data type."
|
| 225 |
+
with pytest.raises(ValueError, match=message):
|
| 226 |
+
svds(A, k=1, v0=v0, solver=self.solver)
|
| 227 |
+
|
| 228 |
+
@pytest.mark.parametrize("maxiter", (-1, 0, 5.5))
|
| 229 |
+
def test_svds_input_validation_maxiter_1(self, maxiter):
|
| 230 |
+
message = ("`maxiter` must be a positive integer.")
|
| 231 |
+
with pytest.raises(ValueError, match=message):
|
| 232 |
+
svds(np.eye(10), maxiter=maxiter, solver=self.solver)
|
| 233 |
+
|
| 234 |
+
def test_svds_input_validation_maxiter_2(self):
|
| 235 |
+
# I think the stack trace is reasonable when `k` can't be converted
|
| 236 |
+
# to an int.
|
| 237 |
+
message = "int() argument must be a"
|
| 238 |
+
with pytest.raises(TypeError, match=re.escape(message)):
|
| 239 |
+
svds(np.eye(10), maxiter=[], solver=self.solver)
|
| 240 |
+
|
| 241 |
+
message = "invalid literal for int()"
|
| 242 |
+
with pytest.raises(ValueError, match=message):
|
| 243 |
+
svds(np.eye(10), maxiter="hi", solver=self.solver)
|
| 244 |
+
|
| 245 |
+
@pytest.mark.parametrize("rsv", ('ekki', 10))
|
| 246 |
+
def test_svds_input_validation_return_singular_vectors(self, rsv):
|
| 247 |
+
message = "`return_singular_vectors` must be in"
|
| 248 |
+
with pytest.raises(ValueError, match=message):
|
| 249 |
+
svds(np.eye(10), return_singular_vectors=rsv, solver=self.solver)
|
| 250 |
+
|
| 251 |
+
# --- Test Parameters ---
|
| 252 |
+
|
| 253 |
+
@pytest.mark.parametrize("k", [3, 5])
|
| 254 |
+
@pytest.mark.parametrize("which", ["LM", "SM"])
|
| 255 |
+
def test_svds_parameter_k_which(self, k, which):
|
| 256 |
+
# check that the `k` parameter sets the number of eigenvalues/
|
| 257 |
+
# eigenvectors returned.
|
| 258 |
+
# Also check that the `which` parameter sets whether the largest or
|
| 259 |
+
# smallest eigenvalues are returned
|
| 260 |
+
rng = np.random.default_rng(0)
|
| 261 |
+
A = rng.random((10, 10))
|
| 262 |
+
if self.solver == 'lobpcg':
|
| 263 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
| 264 |
+
res = svds(A, k=k, which=which, solver=self.solver,
|
| 265 |
+
random_state=0)
|
| 266 |
+
else:
|
| 267 |
+
res = svds(A, k=k, which=which, solver=self.solver,
|
| 268 |
+
random_state=0)
|
| 269 |
+
_check_svds(A, k, *res, which=which, atol=1e-9, rtol=2e-13)
|
| 270 |
+
|
| 271 |
+
@pytest.mark.filterwarnings("ignore:Exited",
|
| 272 |
+
reason="Ignore LOBPCG early exit.")
|
| 273 |
+
# loop instead of parametrize for simplicity
|
| 274 |
+
def test_svds_parameter_tol(self):
|
| 275 |
+
# check the effect of the `tol` parameter on solver accuracy by solving
|
| 276 |
+
# the same problem with varying `tol` and comparing the eigenvalues
|
| 277 |
+
# against ground truth computed
|
| 278 |
+
n = 100 # matrix size
|
| 279 |
+
k = 3 # number of eigenvalues to check
|
| 280 |
+
|
| 281 |
+
# generate a random, sparse-ish matrix
|
| 282 |
+
# effect isn't apparent for matrices that are too small
|
| 283 |
+
rng = np.random.default_rng(0)
|
| 284 |
+
A = rng.random((n, n))
|
| 285 |
+
A[A > .1] = 0
|
| 286 |
+
A = A @ A.T
|
| 287 |
+
|
| 288 |
+
_, s, _ = svd(A) # calculate ground truth
|
| 289 |
+
|
| 290 |
+
# calculate the error as a function of `tol`
|
| 291 |
+
A = csc_matrix(A)
|
| 292 |
+
|
| 293 |
+
def err(tol):
|
| 294 |
+
_, s2, _ = svds(A, k=k, v0=np.ones(n), maxiter=1000,
|
| 295 |
+
solver=self.solver, tol=tol, random_state=0)
|
| 296 |
+
return np.linalg.norm((s2 - s[k-1::-1])/s[k-1::-1])
|
| 297 |
+
|
| 298 |
+
tols = [1e-4, 1e-2, 1e0] # tolerance levels to check
|
| 299 |
+
# for 'arpack' and 'propack', accuracies make discrete steps
|
| 300 |
+
accuracies = {'propack': [1e-12, 1e-6, 1e-4],
|
| 301 |
+
'arpack': [2.5e-15, 1e-10, 1e-10],
|
| 302 |
+
'lobpcg': [2e-12, 4e-2, 2]}
|
| 303 |
+
|
| 304 |
+
for tol, accuracy in zip(tols, accuracies[self.solver]):
|
| 305 |
+
error = err(tol)
|
| 306 |
+
assert error < accuracy
|
| 307 |
+
|
| 308 |
+
def test_svd_v0(self):
|
| 309 |
+
# check that the `v0` parameter affects the solution
|
| 310 |
+
n = 100
|
| 311 |
+
k = 1
|
| 312 |
+
# If k != 1, LOBPCG needs more initial vectors, which are generated
|
| 313 |
+
# with random_state, so it does not pass w/ k >= 2.
|
| 314 |
+
# For some other values of `n`, the AssertionErrors are not raised
|
| 315 |
+
# with different v0s, which is reasonable.
|
| 316 |
+
|
| 317 |
+
rng = np.random.default_rng(0)
|
| 318 |
+
A = rng.random((n, n))
|
| 319 |
+
|
| 320 |
+
# with the same v0, solutions are the same, and they are accurate
|
| 321 |
+
# v0 takes precedence over random_state
|
| 322 |
+
v0a = rng.random(n)
|
| 323 |
+
res1a = svds(A, k, v0=v0a, solver=self.solver, random_state=0)
|
| 324 |
+
res2a = svds(A, k, v0=v0a, solver=self.solver, random_state=1)
|
| 325 |
+
for idx in range(3):
|
| 326 |
+
assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
|
| 327 |
+
_check_svds(A, k, *res1a)
|
| 328 |
+
|
| 329 |
+
# with the same v0, solutions are the same, and they are accurate
|
| 330 |
+
v0b = rng.random(n)
|
| 331 |
+
res1b = svds(A, k, v0=v0b, solver=self.solver, random_state=2)
|
| 332 |
+
res2b = svds(A, k, v0=v0b, solver=self.solver, random_state=3)
|
| 333 |
+
for idx in range(3):
|
| 334 |
+
assert_allclose(res1b[idx], res2b[idx], rtol=1e-15, atol=2e-16)
|
| 335 |
+
_check_svds(A, k, *res1b)
|
| 336 |
+
|
| 337 |
+
# with different v0, solutions can be numerically different
|
| 338 |
+
message = "Arrays are not equal"
|
| 339 |
+
with pytest.raises(AssertionError, match=message):
|
| 340 |
+
assert_equal(res1a, res1b)
|
| 341 |
+
|
| 342 |
+
def test_svd_random_state(self):
|
| 343 |
+
# check that the `random_state` parameter affects the solution
|
| 344 |
+
# Admittedly, `n` and `k` are chosen so that all solver pass all
|
| 345 |
+
# these checks. That's a tall order, since LOBPCG doesn't want to
|
| 346 |
+
# achieve the desired accuracy and ARPACK often returns the same
|
| 347 |
+
# singular values/vectors for different v0.
|
| 348 |
+
n = 100
|
| 349 |
+
k = 1
|
| 350 |
+
|
| 351 |
+
rng = np.random.default_rng(0)
|
| 352 |
+
A = rng.random((n, n))
|
| 353 |
+
|
| 354 |
+
# with the same random_state, solutions are the same and accurate
|
| 355 |
+
res1a = svds(A, k, solver=self.solver, random_state=0)
|
| 356 |
+
res2a = svds(A, k, solver=self.solver, random_state=0)
|
| 357 |
+
for idx in range(3):
|
| 358 |
+
assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
|
| 359 |
+
_check_svds(A, k, *res1a)
|
| 360 |
+
|
| 361 |
+
# with the same random_state, solutions are the same and accurate
|
| 362 |
+
res1b = svds(A, k, solver=self.solver, random_state=1)
|
| 363 |
+
res2b = svds(A, k, solver=self.solver, random_state=1)
|
| 364 |
+
for idx in range(3):
|
| 365 |
+
assert_allclose(res1b[idx], res2b[idx], rtol=1e-15, atol=2e-16)
|
| 366 |
+
_check_svds(A, k, *res1b)
|
| 367 |
+
|
| 368 |
+
# with different random_state, solutions can be numerically different
|
| 369 |
+
message = "Arrays are not equal"
|
| 370 |
+
with pytest.raises(AssertionError, match=message):
|
| 371 |
+
assert_equal(res1a, res1b)
|
| 372 |
+
|
| 373 |
+
@pytest.mark.parametrize("random_state", (0, 1,
|
| 374 |
+
np.random.RandomState(0),
|
| 375 |
+
np.random.default_rng(0)))
|
| 376 |
+
def test_svd_random_state_2(self, random_state):
|
| 377 |
+
n = 100
|
| 378 |
+
k = 1
|
| 379 |
+
|
| 380 |
+
rng = np.random.default_rng(0)
|
| 381 |
+
A = rng.random((n, n))
|
| 382 |
+
|
| 383 |
+
random_state_2 = copy.deepcopy(random_state)
|
| 384 |
+
|
| 385 |
+
# with the same random_state, solutions are the same and accurate
|
| 386 |
+
res1a = svds(A, k, solver=self.solver, random_state=random_state)
|
| 387 |
+
res2a = svds(A, k, solver=self.solver, random_state=random_state_2)
|
| 388 |
+
for idx in range(3):
|
| 389 |
+
assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
|
| 390 |
+
_check_svds(A, k, *res1a)
|
| 391 |
+
|
| 392 |
+
@pytest.mark.parametrize("random_state", (None,
|
| 393 |
+
np.random.RandomState(0),
|
| 394 |
+
np.random.default_rng(0)))
|
| 395 |
+
@pytest.mark.filterwarnings("ignore:Exited",
|
| 396 |
+
reason="Ignore LOBPCG early exit.")
|
| 397 |
+
def test_svd_random_state_3(self, random_state):
|
| 398 |
+
n = 100
|
| 399 |
+
k = 5
|
| 400 |
+
|
| 401 |
+
rng = np.random.default_rng(0)
|
| 402 |
+
A = rng.random((n, n))
|
| 403 |
+
|
| 404 |
+
random_state = copy.deepcopy(random_state)
|
| 405 |
+
|
| 406 |
+
# random_state in different state produces accurate - but not
|
| 407 |
+
# not necessarily identical - results
|
| 408 |
+
res1a = svds(A, k, solver=self.solver, random_state=random_state, maxiter=1000)
|
| 409 |
+
res2a = svds(A, k, solver=self.solver, random_state=random_state, maxiter=1000)
|
| 410 |
+
_check_svds(A, k, *res1a, atol=2e-7)
|
| 411 |
+
_check_svds(A, k, *res2a, atol=2e-7)
|
| 412 |
+
|
| 413 |
+
message = "Arrays are not equal"
|
| 414 |
+
with pytest.raises(AssertionError, match=message):
|
| 415 |
+
assert_equal(res1a, res2a)
|
| 416 |
+
|
| 417 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
| 418 |
+
def test_svd_maxiter(self):
|
| 419 |
+
# check that maxiter works as expected: should not return accurate
|
| 420 |
+
# solution after 1 iteration, but should with default `maxiter`
|
| 421 |
+
A = np.diag(np.arange(9)).astype(np.float64)
|
| 422 |
+
k = 1
|
| 423 |
+
u, s, vh = sorted_svd(A, k)
|
| 424 |
+
# Use default maxiter by default
|
| 425 |
+
maxiter = None
|
| 426 |
+
|
| 427 |
+
if self.solver == 'arpack':
|
| 428 |
+
message = "ARPACK error -1: No convergence"
|
| 429 |
+
with pytest.raises(ArpackNoConvergence, match=message):
|
| 430 |
+
svds(A, k, ncv=3, maxiter=1, solver=self.solver)
|
| 431 |
+
elif self.solver == 'lobpcg':
|
| 432 |
+
# Set maxiter higher so test passes without changing
|
| 433 |
+
# default and breaking backward compatibility (gh-20221)
|
| 434 |
+
maxiter = 30
|
| 435 |
+
with pytest.warns(UserWarning, match="Exited at iteration"):
|
| 436 |
+
svds(A, k, maxiter=1, solver=self.solver)
|
| 437 |
+
elif self.solver == 'propack':
|
| 438 |
+
message = "k=1 singular triplets did not converge within"
|
| 439 |
+
with pytest.raises(np.linalg.LinAlgError, match=message):
|
| 440 |
+
svds(A, k, maxiter=1, solver=self.solver)
|
| 441 |
+
|
| 442 |
+
ud, sd, vhd = svds(A, k, solver=self.solver, maxiter=maxiter,
|
| 443 |
+
random_state=0)
|
| 444 |
+
_check_svds(A, k, ud, sd, vhd, atol=1e-8)
|
| 445 |
+
assert_allclose(np.abs(ud), np.abs(u), atol=1e-8)
|
| 446 |
+
assert_allclose(np.abs(vhd), np.abs(vh), atol=1e-8)
|
| 447 |
+
assert_allclose(np.abs(sd), np.abs(s), atol=1e-9)
|
| 448 |
+
|
| 449 |
+
@pytest.mark.parametrize("rsv", (True, False, 'u', 'vh'))
|
| 450 |
+
@pytest.mark.parametrize("shape", ((5, 7), (6, 6), (7, 5)))
|
| 451 |
+
def test_svd_return_singular_vectors(self, rsv, shape):
|
| 452 |
+
# check that the return_singular_vectors parameter works as expected
|
| 453 |
+
rng = np.random.default_rng(0)
|
| 454 |
+
A = rng.random(shape)
|
| 455 |
+
k = 2
|
| 456 |
+
M, N = shape
|
| 457 |
+
u, s, vh = sorted_svd(A, k)
|
| 458 |
+
|
| 459 |
+
respect_u = True if self.solver == 'propack' else M <= N
|
| 460 |
+
respect_vh = True if self.solver == 'propack' else M > N
|
| 461 |
+
|
| 462 |
+
if self.solver == 'lobpcg':
|
| 463 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
| 464 |
+
if rsv is False:
|
| 465 |
+
s2 = svds(A, k, return_singular_vectors=rsv,
|
| 466 |
+
solver=self.solver, random_state=rng)
|
| 467 |
+
assert_allclose(s2, s)
|
| 468 |
+
elif rsv == 'u' and respect_u:
|
| 469 |
+
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
|
| 470 |
+
solver=self.solver, random_state=rng)
|
| 471 |
+
assert_allclose(np.abs(u2), np.abs(u))
|
| 472 |
+
assert_allclose(s2, s)
|
| 473 |
+
assert vh2 is None
|
| 474 |
+
elif rsv == 'vh' and respect_vh:
|
| 475 |
+
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
|
| 476 |
+
solver=self.solver, random_state=rng)
|
| 477 |
+
assert u2 is None
|
| 478 |
+
assert_allclose(s2, s)
|
| 479 |
+
assert_allclose(np.abs(vh2), np.abs(vh))
|
| 480 |
+
else:
|
| 481 |
+
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
|
| 482 |
+
solver=self.solver, random_state=rng)
|
| 483 |
+
if u2 is not None:
|
| 484 |
+
assert_allclose(np.abs(u2), np.abs(u))
|
| 485 |
+
assert_allclose(s2, s)
|
| 486 |
+
if vh2 is not None:
|
| 487 |
+
assert_allclose(np.abs(vh2), np.abs(vh))
|
| 488 |
+
else:
|
| 489 |
+
if rsv is False:
|
| 490 |
+
s2 = svds(A, k, return_singular_vectors=rsv,
|
| 491 |
+
solver=self.solver, random_state=rng)
|
| 492 |
+
assert_allclose(s2, s)
|
| 493 |
+
elif rsv == 'u' and respect_u:
|
| 494 |
+
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
|
| 495 |
+
solver=self.solver, random_state=rng)
|
| 496 |
+
assert_allclose(np.abs(u2), np.abs(u))
|
| 497 |
+
assert_allclose(s2, s)
|
| 498 |
+
assert vh2 is None
|
| 499 |
+
elif rsv == 'vh' and respect_vh:
|
| 500 |
+
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
|
| 501 |
+
solver=self.solver, random_state=rng)
|
| 502 |
+
assert u2 is None
|
| 503 |
+
assert_allclose(s2, s)
|
| 504 |
+
assert_allclose(np.abs(vh2), np.abs(vh))
|
| 505 |
+
else:
|
| 506 |
+
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
|
| 507 |
+
solver=self.solver, random_state=rng)
|
| 508 |
+
if u2 is not None:
|
| 509 |
+
assert_allclose(np.abs(u2), np.abs(u))
|
| 510 |
+
assert_allclose(s2, s)
|
| 511 |
+
if vh2 is not None:
|
| 512 |
+
assert_allclose(np.abs(vh2), np.abs(vh))
|
| 513 |
+
|
| 514 |
+
# --- Test Basic Functionality ---
|
| 515 |
+
# Tests the accuracy of each solver for real and complex matrices provided
|
| 516 |
+
# as list, dense array, sparse matrix, and LinearOperator.
|
| 517 |
+
|
| 518 |
+
A1 = [[1, 2, 3], [3, 4, 3], [1 + 1j, 0, 2], [0, 0, 1]]
|
| 519 |
+
A2 = [[1, 2, 3, 8 + 5j], [3 - 2j, 4, 3, 5], [1, 0, 2, 3], [0, 0, 1, 0]]
|
| 520 |
+
|
| 521 |
+
@pytest.mark.filterwarnings("ignore:k >= N - 1",
|
| 522 |
+
reason="needed to demonstrate #16725")
|
| 523 |
+
@pytest.mark.parametrize('A', (A1, A2))
|
| 524 |
+
@pytest.mark.parametrize('k', range(1, 5))
|
| 525 |
+
# PROPACK fails a lot if @pytest.mark.parametrize('which', ("SM", "LM"))
|
| 526 |
+
@pytest.mark.parametrize('real', (True, False))
|
| 527 |
+
@pytest.mark.parametrize('transpose', (False, True))
|
| 528 |
+
# In gh-14299, it was suggested the `svds` should _not_ work with lists
|
| 529 |
+
@pytest.mark.parametrize('lo_type', (np.asarray, csc_matrix,
|
| 530 |
+
aslinearoperator))
|
| 531 |
+
def test_svd_simple(self, A, k, real, transpose, lo_type):
|
| 532 |
+
|
| 533 |
+
A = np.asarray(A)
|
| 534 |
+
A = np.real(A) if real else A
|
| 535 |
+
A = A.T if transpose else A
|
| 536 |
+
A2 = lo_type(A)
|
| 537 |
+
|
| 538 |
+
# could check for the appropriate errors, but that is tested above
|
| 539 |
+
if k > min(A.shape):
|
| 540 |
+
pytest.skip("`k` cannot be greater than `min(A.shape)`")
|
| 541 |
+
if self.solver != 'propack' and k >= min(A.shape):
|
| 542 |
+
pytest.skip("Only PROPACK supports complete SVD")
|
| 543 |
+
if self.solver == 'arpack' and not real and k == min(A.shape) - 1:
|
| 544 |
+
pytest.skip("#16725")
|
| 545 |
+
|
| 546 |
+
atol = 3e-10
|
| 547 |
+
if self.solver == 'propack':
|
| 548 |
+
atol = 3e-9 # otherwise test fails on Linux aarch64 (see gh-19855)
|
| 549 |
+
|
| 550 |
+
if self.solver == 'lobpcg':
|
| 551 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
| 552 |
+
u, s, vh = svds(A2, k, solver=self.solver, random_state=0)
|
| 553 |
+
else:
|
| 554 |
+
u, s, vh = svds(A2, k, solver=self.solver, random_state=0)
|
| 555 |
+
_check_svds(A, k, u, s, vh, atol=atol)
|
| 556 |
+
|
| 557 |
+
def test_svd_linop(self):
|
| 558 |
+
solver = self.solver
|
| 559 |
+
|
| 560 |
+
nmks = [(6, 7, 3),
|
| 561 |
+
(9, 5, 4),
|
| 562 |
+
(10, 8, 5)]
|
| 563 |
+
|
| 564 |
+
def reorder(args):
|
| 565 |
+
U, s, VH = args
|
| 566 |
+
j = np.argsort(s)
|
| 567 |
+
return U[:, j], s[j], VH[j, :]
|
| 568 |
+
|
| 569 |
+
for n, m, k in nmks:
|
| 570 |
+
# Test svds on a LinearOperator.
|
| 571 |
+
A = np.random.RandomState(52).randn(n, m)
|
| 572 |
+
L = CheckingLinearOperator(A)
|
| 573 |
+
|
| 574 |
+
if solver == 'propack':
|
| 575 |
+
v0 = np.ones(n)
|
| 576 |
+
else:
|
| 577 |
+
v0 = np.ones(min(A.shape))
|
| 578 |
+
if solver == 'lobpcg':
|
| 579 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
| 580 |
+
U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver,
|
| 581 |
+
random_state=0))
|
| 582 |
+
U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver,
|
| 583 |
+
random_state=0))
|
| 584 |
+
else:
|
| 585 |
+
U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver,
|
| 586 |
+
random_state=0))
|
| 587 |
+
U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver,
|
| 588 |
+
random_state=0))
|
| 589 |
+
|
| 590 |
+
assert_allclose(np.abs(U1), np.abs(U2))
|
| 591 |
+
assert_allclose(s1, s2)
|
| 592 |
+
assert_allclose(np.abs(VH1), np.abs(VH2))
|
| 593 |
+
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
|
| 594 |
+
np.dot(U2, np.dot(np.diag(s2), VH2)))
|
| 595 |
+
|
| 596 |
+
# Try again with which="SM".
|
| 597 |
+
A = np.random.RandomState(1909).randn(n, m)
|
| 598 |
+
L = CheckingLinearOperator(A)
|
| 599 |
+
|
| 600 |
+
# TODO: arpack crashes when v0=v0, which="SM"
|
| 601 |
+
kwargs = {'v0': v0} if solver not in {None, 'arpack'} else {}
|
| 602 |
+
if self.solver == 'lobpcg':
|
| 603 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
| 604 |
+
U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver,
|
| 605 |
+
random_state=0, **kwargs))
|
| 606 |
+
U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver,
|
| 607 |
+
random_state=0, **kwargs))
|
| 608 |
+
else:
|
| 609 |
+
U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver,
|
| 610 |
+
random_state=0, **kwargs))
|
| 611 |
+
U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver,
|
| 612 |
+
random_state=0, **kwargs))
|
| 613 |
+
|
| 614 |
+
assert_allclose(np.abs(U1), np.abs(U2))
|
| 615 |
+
assert_allclose(s1 + 1, s2 + 1)
|
| 616 |
+
assert_allclose(np.abs(VH1), np.abs(VH2))
|
| 617 |
+
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
|
| 618 |
+
np.dot(U2, np.dot(np.diag(s2), VH2)))
|
| 619 |
+
|
| 620 |
+
if k < min(n, m) - 1:
|
| 621 |
+
# Complex input and explicit which="LM".
|
| 622 |
+
for (dt, eps) in [(complex, 1e-7), (np.complex64, 3e-3)]:
|
| 623 |
+
rng = np.random.RandomState(1648)
|
| 624 |
+
A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt)
|
| 625 |
+
L = CheckingLinearOperator(A)
|
| 626 |
+
|
| 627 |
+
if self.solver == 'lobpcg':
|
| 628 |
+
with pytest.warns(UserWarning,
|
| 629 |
+
match="The problem size"):
|
| 630 |
+
U1, s1, VH1 = reorder(svds(A, k, which="LM",
|
| 631 |
+
solver=solver,
|
| 632 |
+
random_state=0))
|
| 633 |
+
U2, s2, VH2 = reorder(svds(L, k, which="LM",
|
| 634 |
+
solver=solver,
|
| 635 |
+
random_state=0))
|
| 636 |
+
else:
|
| 637 |
+
U1, s1, VH1 = reorder(svds(A, k, which="LM",
|
| 638 |
+
solver=solver,
|
| 639 |
+
random_state=0))
|
| 640 |
+
U2, s2, VH2 = reorder(svds(L, k, which="LM",
|
| 641 |
+
solver=solver,
|
| 642 |
+
random_state=0))
|
| 643 |
+
|
| 644 |
+
assert_allclose(np.abs(U1), np.abs(U2), rtol=eps)
|
| 645 |
+
assert_allclose(s1, s2, rtol=eps)
|
| 646 |
+
assert_allclose(np.abs(VH1), np.abs(VH2), rtol=eps)
|
| 647 |
+
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
|
| 648 |
+
np.dot(U2, np.dot(np.diag(s2), VH2)),
|
| 649 |
+
rtol=eps)
|
| 650 |
+
|
| 651 |
+
SHAPES = ((100, 100), (100, 101), (101, 100))
|
| 652 |
+
|
| 653 |
+
@pytest.mark.filterwarnings("ignore:Exited at iteration")
|
| 654 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
| 655 |
+
@pytest.mark.parametrize("shape", SHAPES)
|
| 656 |
+
# ARPACK supports only dtype float, complex, or np.float32
|
| 657 |
+
@pytest.mark.parametrize("dtype", (float, complex, np.float32))
|
| 658 |
+
def test_small_sigma_sparse(self, shape, dtype):
|
| 659 |
+
# https://github.com/scipy/scipy/pull/11829
|
| 660 |
+
solver = self.solver
|
| 661 |
+
# 2do: PROPACK fails orthogonality of singular vectors
|
| 662 |
+
# if dtype == complex and self.solver == 'propack':
|
| 663 |
+
# pytest.skip("PROPACK unsupported for complex dtype")
|
| 664 |
+
rng = np.random.default_rng(0)
|
| 665 |
+
k = 5
|
| 666 |
+
(m, n) = shape
|
| 667 |
+
S = random(m, n, density=0.1, random_state=rng)
|
| 668 |
+
if dtype == complex:
|
| 669 |
+
S = + 1j * random(m, n, density=0.1, random_state=rng)
|
| 670 |
+
e = np.ones(m)
|
| 671 |
+
e[0:5] *= 1e1 ** np.arange(-5, 0, 1)
|
| 672 |
+
S = spdiags(e, 0, m, m) @ S
|
| 673 |
+
S = S.astype(dtype)
|
| 674 |
+
u, s, vh = svds(S, k, which='SM', solver=solver, maxiter=1000,
|
| 675 |
+
random_state=0)
|
| 676 |
+
c_svd = False # partial SVD can be different from full SVD
|
| 677 |
+
_check_svds_n(S, k, u, s, vh, which="SM", check_svd=c_svd, atol=2e-1)
|
| 678 |
+
|
| 679 |
+
# --- Test Edge Cases ---
|
| 680 |
+
# Checks a few edge cases.
|
| 681 |
+
|
| 682 |
+
@pytest.mark.parametrize("shape", ((6, 5), (5, 5), (5, 6)))
|
| 683 |
+
@pytest.mark.parametrize("dtype", (float, complex))
|
| 684 |
+
def test_svd_LM_ones_matrix(self, shape, dtype):
|
| 685 |
+
# Check that svds can deal with matrix_rank less than k in LM mode.
|
| 686 |
+
k = 3
|
| 687 |
+
n, m = shape
|
| 688 |
+
A = np.ones((n, m), dtype=dtype)
|
| 689 |
+
|
| 690 |
+
if self.solver == 'lobpcg':
|
| 691 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
| 692 |
+
U, s, VH = svds(A, k, solver=self.solver, random_state=0)
|
| 693 |
+
else:
|
| 694 |
+
U, s, VH = svds(A, k, solver=self.solver, random_state=0)
|
| 695 |
+
|
| 696 |
+
_check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False)
|
| 697 |
+
|
| 698 |
+
# Check that the largest singular value is near sqrt(n*m)
|
| 699 |
+
# and the other singular values have been forced to zero.
|
| 700 |
+
assert_allclose(np.max(s), np.sqrt(n*m))
|
| 701 |
+
s = np.array(sorted(s)[:-1]) + 1
|
| 702 |
+
z = np.ones_like(s)
|
| 703 |
+
assert_allclose(s, z)
|
| 704 |
+
|
| 705 |
+
@pytest.mark.filterwarnings("ignore:k >= N - 1",
|
| 706 |
+
reason="needed to demonstrate #16725")
|
| 707 |
+
@pytest.mark.parametrize("shape", ((3, 4), (4, 4), (4, 3), (4, 2)))
|
| 708 |
+
@pytest.mark.parametrize("dtype", (float, complex))
|
| 709 |
+
def test_zero_matrix(self, shape, dtype):
|
| 710 |
+
# Check that svds can deal with matrices containing only zeros;
|
| 711 |
+
# see https://github.com/scipy/scipy/issues/3452/
|
| 712 |
+
# shape = (4, 2) is included because it is the particular case
|
| 713 |
+
# reported in the issue
|
| 714 |
+
k = 1
|
| 715 |
+
n, m = shape
|
| 716 |
+
A = np.zeros((n, m), dtype=dtype)
|
| 717 |
+
|
| 718 |
+
if (self.solver == 'arpack' and dtype is complex
|
| 719 |
+
and k == min(A.shape) - 1):
|
| 720 |
+
pytest.skip("#16725")
|
| 721 |
+
|
| 722 |
+
if self.solver == 'propack':
|
| 723 |
+
pytest.skip("PROPACK failures unrelated to PR #16712")
|
| 724 |
+
|
| 725 |
+
if self.solver == 'lobpcg':
|
| 726 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
| 727 |
+
U, s, VH = svds(A, k, solver=self.solver, random_state=0)
|
| 728 |
+
else:
|
| 729 |
+
U, s, VH = svds(A, k, solver=self.solver, random_state=0)
|
| 730 |
+
|
| 731 |
+
# Check some generic properties of svd.
|
| 732 |
+
_check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False)
|
| 733 |
+
|
| 734 |
+
# Check that the singular values are zero.
|
| 735 |
+
assert_array_equal(s, 0)
|
| 736 |
+
|
| 737 |
+
@pytest.mark.parametrize("shape", ((20, 20), (20, 21), (21, 20)))
|
| 738 |
+
# ARPACK supports only dtype float, complex, or np.float32
|
| 739 |
+
@pytest.mark.parametrize("dtype", (float, complex, np.float32))
|
| 740 |
+
@pytest.mark.filterwarnings("ignore:Exited",
|
| 741 |
+
reason="Ignore LOBPCG early exit.")
|
| 742 |
+
def test_small_sigma(self, shape, dtype):
|
| 743 |
+
rng = np.random.default_rng(179847540)
|
| 744 |
+
A = rng.random(shape).astype(dtype)
|
| 745 |
+
u, _, vh = svd(A, full_matrices=False)
|
| 746 |
+
if dtype == np.float32:
|
| 747 |
+
e = 10.0
|
| 748 |
+
else:
|
| 749 |
+
e = 100.0
|
| 750 |
+
t = e**(-np.arange(len(vh))).astype(dtype)
|
| 751 |
+
A = (u*t).dot(vh)
|
| 752 |
+
k = 4
|
| 753 |
+
u, s, vh = svds(A, k, solver=self.solver, maxiter=100, random_state=0)
|
| 754 |
+
t = np.sum(s > 0)
|
| 755 |
+
assert_equal(t, k)
|
| 756 |
+
# LOBPCG needs larger atol and rtol to pass
|
| 757 |
+
_check_svds_n(A, k, u, s, vh, atol=1e-3, rtol=1e0, check_svd=False)
|
| 758 |
+
|
| 759 |
+
# ARPACK supports only dtype float, complex, or np.float32
|
| 760 |
+
@pytest.mark.filterwarnings("ignore:The problem size")
|
| 761 |
+
@pytest.mark.parametrize("dtype", (float, complex, np.float32))
|
| 762 |
+
def test_small_sigma2(self, dtype):
|
| 763 |
+
rng = np.random.default_rng(179847540)
|
| 764 |
+
# create a 10x10 singular matrix with a 4-dim null space
|
| 765 |
+
dim = 4
|
| 766 |
+
size = 10
|
| 767 |
+
x = rng.random((size, size-dim))
|
| 768 |
+
y = x[:, :dim] * rng.random(dim)
|
| 769 |
+
mat = np.hstack((x, y))
|
| 770 |
+
mat = mat.astype(dtype)
|
| 771 |
+
|
| 772 |
+
nz = null_space(mat)
|
| 773 |
+
assert_equal(nz.shape[1], dim)
|
| 774 |
+
|
| 775 |
+
# Tolerances atol and rtol adjusted to pass np.float32
|
| 776 |
+
# Use non-sparse svd
|
| 777 |
+
u, s, vh = svd(mat)
|
| 778 |
+
# Singular values are 0:
|
| 779 |
+
assert_allclose(s[-dim:], 0, atol=1e-6, rtol=1e0)
|
| 780 |
+
# Smallest right singular vectors in null space:
|
| 781 |
+
assert_allclose(mat @ vh[-dim:, :].T, 0, atol=1e-6, rtol=1e0)
|
| 782 |
+
|
| 783 |
+
# Smallest singular values should be 0
|
| 784 |
+
sp_mat = csc_matrix(mat)
|
| 785 |
+
su, ss, svh = svds(sp_mat, k=dim, which='SM', solver=self.solver,
|
| 786 |
+
random_state=0)
|
| 787 |
+
# Smallest dim singular values are 0:
|
| 788 |
+
assert_allclose(ss, 0, atol=1e-5, rtol=1e0)
|
| 789 |
+
# Smallest singular vectors via svds in null space:
|
| 790 |
+
n, m = mat.shape
|
| 791 |
+
if n < m: # else the assert fails with some libraries unclear why
|
| 792 |
+
assert_allclose(sp_mat.transpose() @ su, 0, atol=1e-5, rtol=1e0)
|
| 793 |
+
assert_allclose(sp_mat @ svh.T, 0, atol=1e-5, rtol=1e0)
|
| 794 |
+
|
| 795 |
+
# --- Perform tests with each solver ---
|
| 796 |
+
|
| 797 |
+
|
| 798 |
+
class Test_SVDS_once:
|
| 799 |
+
@pytest.mark.parametrize("solver", ['ekki', object])
|
| 800 |
+
def test_svds_input_validation_solver(self, solver):
|
| 801 |
+
message = "solver must be one of"
|
| 802 |
+
with pytest.raises(ValueError, match=message):
|
| 803 |
+
svds(np.ones((3, 4)), k=2, solver=solver)
|
| 804 |
+
|
| 805 |
+
|
| 806 |
+
class Test_SVDS_ARPACK(SVDSCommonTests):
|
| 807 |
+
|
| 808 |
+
def setup_method(self):
|
| 809 |
+
self.solver = 'arpack'
|
| 810 |
+
|
| 811 |
+
@pytest.mark.parametrize("ncv", list(range(-1, 8)) + [4.5, "5"])
|
| 812 |
+
def test_svds_input_validation_ncv_1(self, ncv):
|
| 813 |
+
rng = np.random.default_rng(0)
|
| 814 |
+
A = rng.random((6, 7))
|
| 815 |
+
k = 3
|
| 816 |
+
if ncv in {4, 5}:
|
| 817 |
+
u, s, vh = svds(A, k=k, ncv=ncv, solver=self.solver, random_state=0)
|
| 818 |
+
# partial decomposition, so don't check that u@diag(s)@vh=A;
|
| 819 |
+
# do check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
|
| 820 |
+
_check_svds(A, k, u, s, vh)
|
| 821 |
+
else:
|
| 822 |
+
message = ("`ncv` must be an integer satisfying")
|
| 823 |
+
with pytest.raises(ValueError, match=message):
|
| 824 |
+
svds(A, k=k, ncv=ncv, solver=self.solver)
|
| 825 |
+
|
| 826 |
+
def test_svds_input_validation_ncv_2(self):
|
| 827 |
+
# I think the stack trace is reasonable when `ncv` can't be converted
|
| 828 |
+
# to an int.
|
| 829 |
+
message = "int() argument must be a"
|
| 830 |
+
with pytest.raises(TypeError, match=re.escape(message)):
|
| 831 |
+
svds(np.eye(10), ncv=[], solver=self.solver)
|
| 832 |
+
|
| 833 |
+
message = "invalid literal for int()"
|
| 834 |
+
with pytest.raises(ValueError, match=message):
|
| 835 |
+
svds(np.eye(10), ncv="hi", solver=self.solver)
|
| 836 |
+
|
| 837 |
+
# I can't see a robust relationship between `ncv` and relevant outputs
|
| 838 |
+
# (e.g. accuracy, time), so no test of the parameter.
|
| 839 |
+
|
| 840 |
+
|
| 841 |
+
class Test_SVDS_LOBPCG(SVDSCommonTests):
|
| 842 |
+
|
| 843 |
+
def setup_method(self):
|
| 844 |
+
self.solver = 'lobpcg'
|
| 845 |
+
|
| 846 |
+
|
| 847 |
+
class Test_SVDS_PROPACK(SVDSCommonTests):
|
| 848 |
+
|
| 849 |
+
def setup_method(self):
|
| 850 |
+
self.solver = 'propack'
|
| 851 |
+
|
| 852 |
+
def test_svd_LM_ones_matrix(self):
|
| 853 |
+
message = ("PROPACK does not return orthonormal singular vectors "
|
| 854 |
+
"associated with zero singular values.")
|
| 855 |
+
# There are some other issues with this matrix of all ones, e.g.
|
| 856 |
+
# `which='sm'` and `k=1` returns the largest singular value
|
| 857 |
+
pytest.xfail(message)
|
| 858 |
+
|
| 859 |
+
def test_svd_LM_zeros_matrix(self):
|
| 860 |
+
message = ("PROPACK does not return orthonormal singular vectors "
|
| 861 |
+
"associated with zero singular values.")
|
| 862 |
+
pytest.xfail(message)
|
.venv/Lib/site-packages/scipy/sparse/linalg/_expm_multiply.py
ADDED
|
@@ -0,0 +1,810 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Compute the action of the matrix exponential."""
|
| 2 |
+
from warnings import warn
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
import scipy.linalg
|
| 7 |
+
import scipy.sparse.linalg
|
| 8 |
+
from scipy.linalg._decomp_qr import qr
|
| 9 |
+
from scipy.sparse._sputils import is_pydata_spmatrix
|
| 10 |
+
from scipy.sparse.linalg import aslinearoperator
|
| 11 |
+
from scipy.sparse.linalg._interface import IdentityOperator
|
| 12 |
+
from scipy.sparse.linalg._onenormest import onenormest
|
| 13 |
+
|
| 14 |
+
__all__ = ['expm_multiply']
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _exact_inf_norm(A):
|
| 18 |
+
# A compatibility function which should eventually disappear.
|
| 19 |
+
if scipy.sparse.issparse(A):
|
| 20 |
+
return max(abs(A).sum(axis=1).flat)
|
| 21 |
+
elif is_pydata_spmatrix(A):
|
| 22 |
+
return max(abs(A).sum(axis=1))
|
| 23 |
+
else:
|
| 24 |
+
return np.linalg.norm(A, np.inf)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _exact_1_norm(A):
|
| 28 |
+
# A compatibility function which should eventually disappear.
|
| 29 |
+
if scipy.sparse.issparse(A):
|
| 30 |
+
return max(abs(A).sum(axis=0).flat)
|
| 31 |
+
elif is_pydata_spmatrix(A):
|
| 32 |
+
return max(abs(A).sum(axis=0))
|
| 33 |
+
else:
|
| 34 |
+
return np.linalg.norm(A, 1)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _trace(A):
|
| 38 |
+
# A compatibility function which should eventually disappear.
|
| 39 |
+
if is_pydata_spmatrix(A):
|
| 40 |
+
return A.to_scipy_sparse().trace()
|
| 41 |
+
else:
|
| 42 |
+
return A.trace()
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def traceest(A, m3, seed=None):
|
| 46 |
+
"""Estimate `np.trace(A)` using `3*m3` matrix-vector products.
|
| 47 |
+
|
| 48 |
+
The result is not deterministic.
|
| 49 |
+
|
| 50 |
+
Parameters
|
| 51 |
+
----------
|
| 52 |
+
A : LinearOperator
|
| 53 |
+
Linear operator whose trace will be estimated. Has to be square.
|
| 54 |
+
m3 : int
|
| 55 |
+
Number of matrix-vector products divided by 3 used to estimate the
|
| 56 |
+
trace.
|
| 57 |
+
seed : optional
|
| 58 |
+
Seed for `numpy.random.default_rng`.
|
| 59 |
+
Can be provided to obtain deterministic results.
|
| 60 |
+
|
| 61 |
+
Returns
|
| 62 |
+
-------
|
| 63 |
+
trace : LinearOperator.dtype
|
| 64 |
+
Estimate of the trace
|
| 65 |
+
|
| 66 |
+
Notes
|
| 67 |
+
-----
|
| 68 |
+
This is the Hutch++ algorithm given in [1]_.
|
| 69 |
+
|
| 70 |
+
References
|
| 71 |
+
----------
|
| 72 |
+
.. [1] Meyer, Raphael A., Cameron Musco, Christopher Musco, and David P.
|
| 73 |
+
Woodruff. "Hutch++: Optimal Stochastic Trace Estimation." In Symposium
|
| 74 |
+
on Simplicity in Algorithms (SOSA), pp. 142-155. Society for Industrial
|
| 75 |
+
and Applied Mathematics, 2021
|
| 76 |
+
https://doi.org/10.1137/1.9781611976496.16
|
| 77 |
+
|
| 78 |
+
"""
|
| 79 |
+
rng = np.random.default_rng(seed)
|
| 80 |
+
if len(A.shape) != 2 or A.shape[-1] != A.shape[-2]:
|
| 81 |
+
raise ValueError("Expected A to be like a square matrix.")
|
| 82 |
+
n = A.shape[-1]
|
| 83 |
+
S = rng.choice([-1.0, +1.0], [n, m3])
|
| 84 |
+
Q, _ = qr(A.matmat(S), overwrite_a=True, mode='economic')
|
| 85 |
+
trQAQ = np.trace(Q.conj().T @ A.matmat(Q))
|
| 86 |
+
G = rng.choice([-1, +1], [n, m3])
|
| 87 |
+
right = G - Q@(Q.conj().T @ G)
|
| 88 |
+
trGAG = np.trace(right.conj().T @ A.matmat(right))
|
| 89 |
+
return trQAQ + trGAG/m3
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _ident_like(A):
|
| 93 |
+
# A compatibility function which should eventually disappear.
|
| 94 |
+
if scipy.sparse.issparse(A):
|
| 95 |
+
# Creates a sparse matrix in dia format
|
| 96 |
+
out = scipy.sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype)
|
| 97 |
+
if isinstance(A, scipy.sparse.spmatrix):
|
| 98 |
+
return out.asformat(A.format)
|
| 99 |
+
return scipy.sparse.dia_array(out).asformat(A.format)
|
| 100 |
+
elif is_pydata_spmatrix(A):
|
| 101 |
+
import sparse
|
| 102 |
+
return sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype)
|
| 103 |
+
elif isinstance(A, scipy.sparse.linalg.LinearOperator):
|
| 104 |
+
return IdentityOperator(A.shape, dtype=A.dtype)
|
| 105 |
+
else:
|
| 106 |
+
return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def expm_multiply(A, B, start=None, stop=None, num=None,
|
| 110 |
+
endpoint=None, traceA=None):
|
| 111 |
+
"""
|
| 112 |
+
Compute the action of the matrix exponential of A on B.
|
| 113 |
+
|
| 114 |
+
Parameters
|
| 115 |
+
----------
|
| 116 |
+
A : transposable linear operator
|
| 117 |
+
The operator whose exponential is of interest.
|
| 118 |
+
B : ndarray
|
| 119 |
+
The matrix or vector to be multiplied by the matrix exponential of A.
|
| 120 |
+
start : scalar, optional
|
| 121 |
+
The starting time point of the sequence.
|
| 122 |
+
stop : scalar, optional
|
| 123 |
+
The end time point of the sequence, unless `endpoint` is set to False.
|
| 124 |
+
In that case, the sequence consists of all but the last of ``num + 1``
|
| 125 |
+
evenly spaced time points, so that `stop` is excluded.
|
| 126 |
+
Note that the step size changes when `endpoint` is False.
|
| 127 |
+
num : int, optional
|
| 128 |
+
Number of time points to use.
|
| 129 |
+
endpoint : bool, optional
|
| 130 |
+
If True, `stop` is the last time point. Otherwise, it is not included.
|
| 131 |
+
traceA : scalar, optional
|
| 132 |
+
Trace of `A`. If not given the trace is estimated for linear operators,
|
| 133 |
+
or calculated exactly for sparse matrices. It is used to precondition
|
| 134 |
+
`A`, thus an approximate trace is acceptable.
|
| 135 |
+
For linear operators, `traceA` should be provided to ensure performance
|
| 136 |
+
as the estimation is not guaranteed to be reliable for all cases.
|
| 137 |
+
|
| 138 |
+
.. versionadded:: 1.9.0
|
| 139 |
+
|
| 140 |
+
Returns
|
| 141 |
+
-------
|
| 142 |
+
expm_A_B : ndarray
|
| 143 |
+
The result of the action :math:`e^{t_k A} B`.
|
| 144 |
+
|
| 145 |
+
Warns
|
| 146 |
+
-----
|
| 147 |
+
UserWarning
|
| 148 |
+
If `A` is a linear operator and ``traceA=None`` (default).
|
| 149 |
+
|
| 150 |
+
Notes
|
| 151 |
+
-----
|
| 152 |
+
The optional arguments defining the sequence of evenly spaced time points
|
| 153 |
+
are compatible with the arguments of `numpy.linspace`.
|
| 154 |
+
|
| 155 |
+
The output ndarray shape is somewhat complicated so I explain it here.
|
| 156 |
+
The ndim of the output could be either 1, 2, or 3.
|
| 157 |
+
It would be 1 if you are computing the expm action on a single vector
|
| 158 |
+
at a single time point.
|
| 159 |
+
It would be 2 if you are computing the expm action on a vector
|
| 160 |
+
at multiple time points, or if you are computing the expm action
|
| 161 |
+
on a matrix at a single time point.
|
| 162 |
+
It would be 3 if you want the action on a matrix with multiple
|
| 163 |
+
columns at multiple time points.
|
| 164 |
+
If multiple time points are requested, expm_A_B[0] will always
|
| 165 |
+
be the action of the expm at the first time point,
|
| 166 |
+
regardless of whether the action is on a vector or a matrix.
|
| 167 |
+
|
| 168 |
+
References
|
| 169 |
+
----------
|
| 170 |
+
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011)
|
| 171 |
+
"Computing the Action of the Matrix Exponential,
|
| 172 |
+
with an Application to Exponential Integrators."
|
| 173 |
+
SIAM Journal on Scientific Computing,
|
| 174 |
+
33 (2). pp. 488-511. ISSN 1064-8275
|
| 175 |
+
http://eprints.ma.man.ac.uk/1591/
|
| 176 |
+
|
| 177 |
+
.. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010)
|
| 178 |
+
"Computing Matrix Functions."
|
| 179 |
+
Acta Numerica,
|
| 180 |
+
19. 159-208. ISSN 0962-4929
|
| 181 |
+
http://eprints.ma.man.ac.uk/1451/
|
| 182 |
+
|
| 183 |
+
Examples
|
| 184 |
+
--------
|
| 185 |
+
>>> import numpy as np
|
| 186 |
+
>>> from scipy.sparse import csc_matrix
|
| 187 |
+
>>> from scipy.sparse.linalg import expm, expm_multiply
|
| 188 |
+
>>> A = csc_matrix([[1, 0], [0, 1]])
|
| 189 |
+
>>> A.toarray()
|
| 190 |
+
array([[1, 0],
|
| 191 |
+
[0, 1]], dtype=int64)
|
| 192 |
+
>>> B = np.array([np.exp(-1.), np.exp(-2.)])
|
| 193 |
+
>>> B
|
| 194 |
+
array([ 0.36787944, 0.13533528])
|
| 195 |
+
>>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True)
|
| 196 |
+
array([[ 1. , 0.36787944],
|
| 197 |
+
[ 1.64872127, 0.60653066],
|
| 198 |
+
[ 2.71828183, 1. ]])
|
| 199 |
+
>>> expm(A).dot(B) # Verify 1st timestep
|
| 200 |
+
array([ 1. , 0.36787944])
|
| 201 |
+
>>> expm(1.5*A).dot(B) # Verify 2nd timestep
|
| 202 |
+
array([ 1.64872127, 0.60653066])
|
| 203 |
+
>>> expm(2*A).dot(B) # Verify 3rd timestep
|
| 204 |
+
array([ 2.71828183, 1. ])
|
| 205 |
+
"""
|
| 206 |
+
if all(arg is None for arg in (start, stop, num, endpoint)):
|
| 207 |
+
X = _expm_multiply_simple(A, B, traceA=traceA)
|
| 208 |
+
else:
|
| 209 |
+
X, status = _expm_multiply_interval(A, B, start, stop, num,
|
| 210 |
+
endpoint, traceA=traceA)
|
| 211 |
+
return X
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def _expm_multiply_simple(A, B, t=1.0, traceA=None, balance=False):
|
| 215 |
+
"""
|
| 216 |
+
Compute the action of the matrix exponential at a single time point.
|
| 217 |
+
|
| 218 |
+
Parameters
|
| 219 |
+
----------
|
| 220 |
+
A : transposable linear operator
|
| 221 |
+
The operator whose exponential is of interest.
|
| 222 |
+
B : ndarray
|
| 223 |
+
The matrix to be multiplied by the matrix exponential of A.
|
| 224 |
+
t : float
|
| 225 |
+
A time point.
|
| 226 |
+
traceA : scalar, optional
|
| 227 |
+
Trace of `A`. If not given the trace is estimated for linear operators,
|
| 228 |
+
or calculated exactly for sparse matrices. It is used to precondition
|
| 229 |
+
`A`, thus an approximate trace is acceptable
|
| 230 |
+
balance : bool
|
| 231 |
+
Indicates whether or not to apply balancing.
|
| 232 |
+
|
| 233 |
+
Returns
|
| 234 |
+
-------
|
| 235 |
+
F : ndarray
|
| 236 |
+
:math:`e^{t A} B`
|
| 237 |
+
|
| 238 |
+
Notes
|
| 239 |
+
-----
|
| 240 |
+
This is algorithm (3.2) in Al-Mohy and Higham (2011).
|
| 241 |
+
|
| 242 |
+
"""
|
| 243 |
+
if balance:
|
| 244 |
+
raise NotImplementedError
|
| 245 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
| 246 |
+
raise ValueError('expected A to be like a square matrix')
|
| 247 |
+
if A.shape[1] != B.shape[0]:
|
| 248 |
+
raise ValueError('shapes of matrices A {} and B {} are incompatible'
|
| 249 |
+
.format(A.shape, B.shape))
|
| 250 |
+
ident = _ident_like(A)
|
| 251 |
+
is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator)
|
| 252 |
+
n = A.shape[0]
|
| 253 |
+
if len(B.shape) == 1:
|
| 254 |
+
n0 = 1
|
| 255 |
+
elif len(B.shape) == 2:
|
| 256 |
+
n0 = B.shape[1]
|
| 257 |
+
else:
|
| 258 |
+
raise ValueError('expected B to be like a matrix or a vector')
|
| 259 |
+
u_d = 2**-53
|
| 260 |
+
tol = u_d
|
| 261 |
+
if traceA is None:
|
| 262 |
+
if is_linear_operator:
|
| 263 |
+
warn("Trace of LinearOperator not available, it will be estimated."
|
| 264 |
+
" Provide `traceA` to ensure performance.", stacklevel=3)
|
| 265 |
+
# m3=1 is bit arbitrary choice, a more accurate trace (larger m3) might
|
| 266 |
+
# speed up exponential calculation, but trace estimation is more costly
|
| 267 |
+
traceA = traceest(A, m3=1) if is_linear_operator else _trace(A)
|
| 268 |
+
mu = traceA / float(n)
|
| 269 |
+
A = A - mu * ident
|
| 270 |
+
A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A)
|
| 271 |
+
if t*A_1_norm == 0:
|
| 272 |
+
m_star, s = 0, 1
|
| 273 |
+
else:
|
| 274 |
+
ell = 2
|
| 275 |
+
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
|
| 276 |
+
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
|
| 277 |
+
return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance)
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False):
|
| 281 |
+
"""
|
| 282 |
+
A helper function.
|
| 283 |
+
"""
|
| 284 |
+
if balance:
|
| 285 |
+
raise NotImplementedError
|
| 286 |
+
if tol is None:
|
| 287 |
+
u_d = 2 ** -53
|
| 288 |
+
tol = u_d
|
| 289 |
+
F = B
|
| 290 |
+
eta = np.exp(t*mu / float(s))
|
| 291 |
+
for i in range(s):
|
| 292 |
+
c1 = _exact_inf_norm(B)
|
| 293 |
+
for j in range(m_star):
|
| 294 |
+
coeff = t / float(s*(j+1))
|
| 295 |
+
B = coeff * A.dot(B)
|
| 296 |
+
c2 = _exact_inf_norm(B)
|
| 297 |
+
F = F + B
|
| 298 |
+
if c1 + c2 <= tol * _exact_inf_norm(F):
|
| 299 |
+
break
|
| 300 |
+
c1 = c2
|
| 301 |
+
F = eta * F
|
| 302 |
+
B = F
|
| 303 |
+
return F
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
# This table helps to compute bounds.
|
| 307 |
+
# They seem to have been difficult to calculate, involving symbolic
|
| 308 |
+
# manipulation of equations, followed by numerical root finding.
|
| 309 |
+
_theta = {
|
| 310 |
+
# The first 30 values are from table A.3 of Computing Matrix Functions.
|
| 311 |
+
1: 2.29e-16,
|
| 312 |
+
2: 2.58e-8,
|
| 313 |
+
3: 1.39e-5,
|
| 314 |
+
4: 3.40e-4,
|
| 315 |
+
5: 2.40e-3,
|
| 316 |
+
6: 9.07e-3,
|
| 317 |
+
7: 2.38e-2,
|
| 318 |
+
8: 5.00e-2,
|
| 319 |
+
9: 8.96e-2,
|
| 320 |
+
10: 1.44e-1,
|
| 321 |
+
# 11
|
| 322 |
+
11: 2.14e-1,
|
| 323 |
+
12: 3.00e-1,
|
| 324 |
+
13: 4.00e-1,
|
| 325 |
+
14: 5.14e-1,
|
| 326 |
+
15: 6.41e-1,
|
| 327 |
+
16: 7.81e-1,
|
| 328 |
+
17: 9.31e-1,
|
| 329 |
+
18: 1.09,
|
| 330 |
+
19: 1.26,
|
| 331 |
+
20: 1.44,
|
| 332 |
+
# 21
|
| 333 |
+
21: 1.62,
|
| 334 |
+
22: 1.82,
|
| 335 |
+
23: 2.01,
|
| 336 |
+
24: 2.22,
|
| 337 |
+
25: 2.43,
|
| 338 |
+
26: 2.64,
|
| 339 |
+
27: 2.86,
|
| 340 |
+
28: 3.08,
|
| 341 |
+
29: 3.31,
|
| 342 |
+
30: 3.54,
|
| 343 |
+
# The rest are from table 3.1 of
|
| 344 |
+
# Computing the Action of the Matrix Exponential.
|
| 345 |
+
35: 4.7,
|
| 346 |
+
40: 6.0,
|
| 347 |
+
45: 7.2,
|
| 348 |
+
50: 8.5,
|
| 349 |
+
55: 9.9,
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def _onenormest_matrix_power(A, p,
|
| 354 |
+
t=2, itmax=5, compute_v=False, compute_w=False):
|
| 355 |
+
"""
|
| 356 |
+
Efficiently estimate the 1-norm of A^p.
|
| 357 |
+
|
| 358 |
+
Parameters
|
| 359 |
+
----------
|
| 360 |
+
A : ndarray
|
| 361 |
+
Matrix whose 1-norm of a power is to be computed.
|
| 362 |
+
p : int
|
| 363 |
+
Non-negative integer power.
|
| 364 |
+
t : int, optional
|
| 365 |
+
A positive parameter controlling the tradeoff between
|
| 366 |
+
accuracy versus time and memory usage.
|
| 367 |
+
Larger values take longer and use more memory
|
| 368 |
+
but give more accurate output.
|
| 369 |
+
itmax : int, optional
|
| 370 |
+
Use at most this many iterations.
|
| 371 |
+
compute_v : bool, optional
|
| 372 |
+
Request a norm-maximizing linear operator input vector if True.
|
| 373 |
+
compute_w : bool, optional
|
| 374 |
+
Request a norm-maximizing linear operator output vector if True.
|
| 375 |
+
|
| 376 |
+
Returns
|
| 377 |
+
-------
|
| 378 |
+
est : float
|
| 379 |
+
An underestimate of the 1-norm of the sparse matrix.
|
| 380 |
+
v : ndarray, optional
|
| 381 |
+
The vector such that ||Av||_1 == est*||v||_1.
|
| 382 |
+
It can be thought of as an input to the linear operator
|
| 383 |
+
that gives an output with particularly large norm.
|
| 384 |
+
w : ndarray, optional
|
| 385 |
+
The vector Av which has relatively large 1-norm.
|
| 386 |
+
It can be thought of as an output of the linear operator
|
| 387 |
+
that is relatively large in norm compared to the input.
|
| 388 |
+
|
| 389 |
+
"""
|
| 390 |
+
#XXX Eventually turn this into an API function in the _onenormest module,
|
| 391 |
+
#XXX and remove its underscore,
|
| 392 |
+
#XXX but wait until expm_multiply goes into scipy.
|
| 393 |
+
from scipy.sparse.linalg._onenormest import onenormest
|
| 394 |
+
return onenormest(aslinearoperator(A) ** p)
|
| 395 |
+
|
| 396 |
+
class LazyOperatorNormInfo:
|
| 397 |
+
"""
|
| 398 |
+
Information about an operator is lazily computed.
|
| 399 |
+
|
| 400 |
+
The information includes the exact 1-norm of the operator,
|
| 401 |
+
in addition to estimates of 1-norms of powers of the operator.
|
| 402 |
+
This uses the notation of Computing the Action (2011).
|
| 403 |
+
This class is specialized enough to probably not be of general interest
|
| 404 |
+
outside of this module.
|
| 405 |
+
|
| 406 |
+
"""
|
| 407 |
+
|
| 408 |
+
def __init__(self, A, A_1_norm=None, ell=2, scale=1):
|
| 409 |
+
"""
|
| 410 |
+
Provide the operator and some norm-related information.
|
| 411 |
+
|
| 412 |
+
Parameters
|
| 413 |
+
----------
|
| 414 |
+
A : linear operator
|
| 415 |
+
The operator of interest.
|
| 416 |
+
A_1_norm : float, optional
|
| 417 |
+
The exact 1-norm of A.
|
| 418 |
+
ell : int, optional
|
| 419 |
+
A technical parameter controlling norm estimation quality.
|
| 420 |
+
scale : int, optional
|
| 421 |
+
If specified, return the norms of scale*A instead of A.
|
| 422 |
+
|
| 423 |
+
"""
|
| 424 |
+
self._A = A
|
| 425 |
+
self._A_1_norm = A_1_norm
|
| 426 |
+
self._ell = ell
|
| 427 |
+
self._d = {}
|
| 428 |
+
self._scale = scale
|
| 429 |
+
|
| 430 |
+
def set_scale(self,scale):
|
| 431 |
+
"""
|
| 432 |
+
Set the scale parameter.
|
| 433 |
+
"""
|
| 434 |
+
self._scale = scale
|
| 435 |
+
|
| 436 |
+
def onenorm(self):
|
| 437 |
+
"""
|
| 438 |
+
Compute the exact 1-norm.
|
| 439 |
+
"""
|
| 440 |
+
if self._A_1_norm is None:
|
| 441 |
+
self._A_1_norm = _exact_1_norm(self._A)
|
| 442 |
+
return self._scale*self._A_1_norm
|
| 443 |
+
|
| 444 |
+
def d(self, p):
|
| 445 |
+
"""
|
| 446 |
+
Lazily estimate :math:`d_p(A) ~= || A^p ||^(1/p)` where :math:`||.||` is the 1-norm.
|
| 447 |
+
"""
|
| 448 |
+
if p not in self._d:
|
| 449 |
+
est = _onenormest_matrix_power(self._A, p, self._ell)
|
| 450 |
+
self._d[p] = est ** (1.0 / p)
|
| 451 |
+
return self._scale*self._d[p]
|
| 452 |
+
|
| 453 |
+
def alpha(self, p):
|
| 454 |
+
"""
|
| 455 |
+
Lazily compute max(d(p), d(p+1)).
|
| 456 |
+
"""
|
| 457 |
+
return max(self.d(p), self.d(p+1))
|
| 458 |
+
|
| 459 |
+
def _compute_cost_div_m(m, p, norm_info):
|
| 460 |
+
"""
|
| 461 |
+
A helper function for computing bounds.
|
| 462 |
+
|
| 463 |
+
This is equation (3.10).
|
| 464 |
+
It measures cost in terms of the number of required matrix products.
|
| 465 |
+
|
| 466 |
+
Parameters
|
| 467 |
+
----------
|
| 468 |
+
m : int
|
| 469 |
+
A valid key of _theta.
|
| 470 |
+
p : int
|
| 471 |
+
A matrix power.
|
| 472 |
+
norm_info : LazyOperatorNormInfo
|
| 473 |
+
Information about 1-norms of related operators.
|
| 474 |
+
|
| 475 |
+
Returns
|
| 476 |
+
-------
|
| 477 |
+
cost_div_m : int
|
| 478 |
+
Required number of matrix products divided by m.
|
| 479 |
+
|
| 480 |
+
"""
|
| 481 |
+
return int(np.ceil(norm_info.alpha(p) / _theta[m]))
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
def _compute_p_max(m_max):
|
| 485 |
+
"""
|
| 486 |
+
Compute the largest positive integer p such that p*(p-1) <= m_max + 1.
|
| 487 |
+
|
| 488 |
+
Do this in a slightly dumb way, but safe and not too slow.
|
| 489 |
+
|
| 490 |
+
Parameters
|
| 491 |
+
----------
|
| 492 |
+
m_max : int
|
| 493 |
+
A count related to bounds.
|
| 494 |
+
|
| 495 |
+
"""
|
| 496 |
+
sqrt_m_max = np.sqrt(m_max)
|
| 497 |
+
p_low = int(np.floor(sqrt_m_max))
|
| 498 |
+
p_high = int(np.ceil(sqrt_m_max + 1))
|
| 499 |
+
return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1)
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):
|
| 503 |
+
"""
|
| 504 |
+
A helper function for the _expm_multiply_* functions.
|
| 505 |
+
|
| 506 |
+
Parameters
|
| 507 |
+
----------
|
| 508 |
+
norm_info : LazyOperatorNormInfo
|
| 509 |
+
Information about norms of certain linear operators of interest.
|
| 510 |
+
n0 : int
|
| 511 |
+
Number of columns in the _expm_multiply_* B matrix.
|
| 512 |
+
tol : float
|
| 513 |
+
Expected to be
|
| 514 |
+
:math:`2^{-24}` for single precision or
|
| 515 |
+
:math:`2^{-53}` for double precision.
|
| 516 |
+
m_max : int
|
| 517 |
+
A value related to a bound.
|
| 518 |
+
ell : int
|
| 519 |
+
The number of columns used in the 1-norm approximation.
|
| 520 |
+
This is usually taken to be small, maybe between 1 and 5.
|
| 521 |
+
|
| 522 |
+
Returns
|
| 523 |
+
-------
|
| 524 |
+
best_m : int
|
| 525 |
+
Related to bounds for error control.
|
| 526 |
+
best_s : int
|
| 527 |
+
Amount of scaling.
|
| 528 |
+
|
| 529 |
+
Notes
|
| 530 |
+
-----
|
| 531 |
+
This is code fragment (3.1) in Al-Mohy and Higham (2011).
|
| 532 |
+
The discussion of default values for m_max and ell
|
| 533 |
+
is given between the definitions of equation (3.11)
|
| 534 |
+
and the definition of equation (3.12).
|
| 535 |
+
|
| 536 |
+
"""
|
| 537 |
+
if ell < 1:
|
| 538 |
+
raise ValueError('expected ell to be a positive integer')
|
| 539 |
+
best_m = None
|
| 540 |
+
best_s = None
|
| 541 |
+
if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):
|
| 542 |
+
for m, theta in _theta.items():
|
| 543 |
+
s = int(np.ceil(norm_info.onenorm() / theta))
|
| 544 |
+
if best_m is None or m * s < best_m * best_s:
|
| 545 |
+
best_m = m
|
| 546 |
+
best_s = s
|
| 547 |
+
else:
|
| 548 |
+
# Equation (3.11).
|
| 549 |
+
for p in range(2, _compute_p_max(m_max) + 1):
|
| 550 |
+
for m in range(p*(p-1)-1, m_max+1):
|
| 551 |
+
if m in _theta:
|
| 552 |
+
s = _compute_cost_div_m(m, p, norm_info)
|
| 553 |
+
if best_m is None or m * s < best_m * best_s:
|
| 554 |
+
best_m = m
|
| 555 |
+
best_s = s
|
| 556 |
+
best_s = max(best_s, 1)
|
| 557 |
+
return best_m, best_s
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
def _condition_3_13(A_1_norm, n0, m_max, ell):
|
| 561 |
+
"""
|
| 562 |
+
A helper function for the _expm_multiply_* functions.
|
| 563 |
+
|
| 564 |
+
Parameters
|
| 565 |
+
----------
|
| 566 |
+
A_1_norm : float
|
| 567 |
+
The precomputed 1-norm of A.
|
| 568 |
+
n0 : int
|
| 569 |
+
Number of columns in the _expm_multiply_* B matrix.
|
| 570 |
+
m_max : int
|
| 571 |
+
A value related to a bound.
|
| 572 |
+
ell : int
|
| 573 |
+
The number of columns used in the 1-norm approximation.
|
| 574 |
+
This is usually taken to be small, maybe between 1 and 5.
|
| 575 |
+
|
| 576 |
+
Returns
|
| 577 |
+
-------
|
| 578 |
+
value : bool
|
| 579 |
+
Indicates whether or not the condition has been met.
|
| 580 |
+
|
| 581 |
+
Notes
|
| 582 |
+
-----
|
| 583 |
+
This is condition (3.13) in Al-Mohy and Higham (2011).
|
| 584 |
+
|
| 585 |
+
"""
|
| 586 |
+
|
| 587 |
+
# This is the rhs of equation (3.12).
|
| 588 |
+
p_max = _compute_p_max(m_max)
|
| 589 |
+
a = 2 * ell * p_max * (p_max + 3)
|
| 590 |
+
|
| 591 |
+
# Evaluate the condition (3.13).
|
| 592 |
+
b = _theta[m_max] / float(n0 * m_max)
|
| 593 |
+
return A_1_norm <= a * b
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
def _expm_multiply_interval(A, B, start=None, stop=None, num=None,
|
| 597 |
+
endpoint=None, traceA=None, balance=False,
|
| 598 |
+
status_only=False):
|
| 599 |
+
"""
|
| 600 |
+
Compute the action of the matrix exponential at multiple time points.
|
| 601 |
+
|
| 602 |
+
Parameters
|
| 603 |
+
----------
|
| 604 |
+
A : transposable linear operator
|
| 605 |
+
The operator whose exponential is of interest.
|
| 606 |
+
B : ndarray
|
| 607 |
+
The matrix to be multiplied by the matrix exponential of A.
|
| 608 |
+
start : scalar, optional
|
| 609 |
+
The starting time point of the sequence.
|
| 610 |
+
stop : scalar, optional
|
| 611 |
+
The end time point of the sequence, unless `endpoint` is set to False.
|
| 612 |
+
In that case, the sequence consists of all but the last of ``num + 1``
|
| 613 |
+
evenly spaced time points, so that `stop` is excluded.
|
| 614 |
+
Note that the step size changes when `endpoint` is False.
|
| 615 |
+
num : int, optional
|
| 616 |
+
Number of time points to use.
|
| 617 |
+
traceA : scalar, optional
|
| 618 |
+
Trace of `A`. If not given the trace is estimated for linear operators,
|
| 619 |
+
or calculated exactly for sparse matrices. It is used to precondition
|
| 620 |
+
`A`, thus an approximate trace is acceptable
|
| 621 |
+
endpoint : bool, optional
|
| 622 |
+
If True, `stop` is the last time point. Otherwise, it is not included.
|
| 623 |
+
balance : bool
|
| 624 |
+
Indicates whether or not to apply balancing.
|
| 625 |
+
status_only : bool
|
| 626 |
+
A flag that is set to True for some debugging and testing operations.
|
| 627 |
+
|
| 628 |
+
Returns
|
| 629 |
+
-------
|
| 630 |
+
F : ndarray
|
| 631 |
+
:math:`e^{t_k A} B`
|
| 632 |
+
status : int
|
| 633 |
+
An integer status for testing and debugging.
|
| 634 |
+
|
| 635 |
+
Notes
|
| 636 |
+
-----
|
| 637 |
+
This is algorithm (5.2) in Al-Mohy and Higham (2011).
|
| 638 |
+
|
| 639 |
+
There seems to be a typo, where line 15 of the algorithm should be
|
| 640 |
+
moved to line 6.5 (between lines 6 and 7).
|
| 641 |
+
|
| 642 |
+
"""
|
| 643 |
+
if balance:
|
| 644 |
+
raise NotImplementedError
|
| 645 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
| 646 |
+
raise ValueError('expected A to be like a square matrix')
|
| 647 |
+
if A.shape[1] != B.shape[0]:
|
| 648 |
+
raise ValueError('shapes of matrices A {} and B {} are incompatible'
|
| 649 |
+
.format(A.shape, B.shape))
|
| 650 |
+
ident = _ident_like(A)
|
| 651 |
+
is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator)
|
| 652 |
+
n = A.shape[0]
|
| 653 |
+
if len(B.shape) == 1:
|
| 654 |
+
n0 = 1
|
| 655 |
+
elif len(B.shape) == 2:
|
| 656 |
+
n0 = B.shape[1]
|
| 657 |
+
else:
|
| 658 |
+
raise ValueError('expected B to be like a matrix or a vector')
|
| 659 |
+
u_d = 2**-53
|
| 660 |
+
tol = u_d
|
| 661 |
+
if traceA is None:
|
| 662 |
+
if is_linear_operator:
|
| 663 |
+
warn("Trace of LinearOperator not available, it will be estimated."
|
| 664 |
+
" Provide `traceA` to ensure performance.", stacklevel=3)
|
| 665 |
+
# m3=5 is bit arbitrary choice, a more accurate trace (larger m3) might
|
| 666 |
+
# speed up exponential calculation, but trace estimation is also costly
|
| 667 |
+
# an educated guess would need to consider the number of time points
|
| 668 |
+
traceA = traceest(A, m3=5) if is_linear_operator else _trace(A)
|
| 669 |
+
mu = traceA / float(n)
|
| 670 |
+
|
| 671 |
+
# Get the linspace samples, attempting to preserve the linspace defaults.
|
| 672 |
+
linspace_kwargs = {'retstep': True}
|
| 673 |
+
if num is not None:
|
| 674 |
+
linspace_kwargs['num'] = num
|
| 675 |
+
if endpoint is not None:
|
| 676 |
+
linspace_kwargs['endpoint'] = endpoint
|
| 677 |
+
samples, step = np.linspace(start, stop, **linspace_kwargs)
|
| 678 |
+
|
| 679 |
+
# Convert the linspace output to the notation used by the publication.
|
| 680 |
+
nsamples = len(samples)
|
| 681 |
+
if nsamples < 2:
|
| 682 |
+
raise ValueError('at least two time points are required')
|
| 683 |
+
q = nsamples - 1
|
| 684 |
+
h = step
|
| 685 |
+
t_0 = samples[0]
|
| 686 |
+
t_q = samples[q]
|
| 687 |
+
|
| 688 |
+
# Define the output ndarray.
|
| 689 |
+
# Use an ndim=3 shape, such that the last two indices
|
| 690 |
+
# are the ones that may be involved in level 3 BLAS operations.
|
| 691 |
+
X_shape = (nsamples,) + B.shape
|
| 692 |
+
X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float))
|
| 693 |
+
t = t_q - t_0
|
| 694 |
+
A = A - mu * ident
|
| 695 |
+
A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A)
|
| 696 |
+
ell = 2
|
| 697 |
+
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
|
| 698 |
+
if t*A_1_norm == 0:
|
| 699 |
+
m_star, s = 0, 1
|
| 700 |
+
else:
|
| 701 |
+
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
|
| 702 |
+
|
| 703 |
+
# Compute the expm action up to the initial time point.
|
| 704 |
+
X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s)
|
| 705 |
+
|
| 706 |
+
# Compute the expm action at the rest of the time points.
|
| 707 |
+
if q <= s:
|
| 708 |
+
if status_only:
|
| 709 |
+
return 0
|
| 710 |
+
else:
|
| 711 |
+
return _expm_multiply_interval_core_0(A, X,
|
| 712 |
+
h, mu, q, norm_info, tol, ell,n0)
|
| 713 |
+
elif not (q % s):
|
| 714 |
+
if status_only:
|
| 715 |
+
return 1
|
| 716 |
+
else:
|
| 717 |
+
return _expm_multiply_interval_core_1(A, X,
|
| 718 |
+
h, mu, m_star, s, q, tol)
|
| 719 |
+
elif (q % s):
|
| 720 |
+
if status_only:
|
| 721 |
+
return 2
|
| 722 |
+
else:
|
| 723 |
+
return _expm_multiply_interval_core_2(A, X,
|
| 724 |
+
h, mu, m_star, s, q, tol)
|
| 725 |
+
else:
|
| 726 |
+
raise Exception('internal error')
|
| 727 |
+
|
| 728 |
+
|
| 729 |
+
def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0):
|
| 730 |
+
"""
|
| 731 |
+
A helper function, for the case q <= s.
|
| 732 |
+
"""
|
| 733 |
+
|
| 734 |
+
# Compute the new values of m_star and s which should be applied
|
| 735 |
+
# over intervals of size t/q
|
| 736 |
+
if norm_info.onenorm() == 0:
|
| 737 |
+
m_star, s = 0, 1
|
| 738 |
+
else:
|
| 739 |
+
norm_info.set_scale(1./q)
|
| 740 |
+
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
|
| 741 |
+
norm_info.set_scale(1)
|
| 742 |
+
|
| 743 |
+
for k in range(q):
|
| 744 |
+
X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s)
|
| 745 |
+
return X, 0
|
| 746 |
+
|
| 747 |
+
|
| 748 |
+
def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol):
|
| 749 |
+
"""
|
| 750 |
+
A helper function, for the case q > s and q % s == 0.
|
| 751 |
+
"""
|
| 752 |
+
d = q // s
|
| 753 |
+
input_shape = X.shape[1:]
|
| 754 |
+
K_shape = (m_star + 1, ) + input_shape
|
| 755 |
+
K = np.empty(K_shape, dtype=X.dtype)
|
| 756 |
+
for i in range(s):
|
| 757 |
+
Z = X[i*d]
|
| 758 |
+
K[0] = Z
|
| 759 |
+
high_p = 0
|
| 760 |
+
for k in range(1, d+1):
|
| 761 |
+
F = K[0]
|
| 762 |
+
c1 = _exact_inf_norm(F)
|
| 763 |
+
for p in range(1, m_star+1):
|
| 764 |
+
if p > high_p:
|
| 765 |
+
K[p] = h * A.dot(K[p-1]) / float(p)
|
| 766 |
+
coeff = float(pow(k, p))
|
| 767 |
+
F = F + coeff * K[p]
|
| 768 |
+
inf_norm_K_p_1 = _exact_inf_norm(K[p])
|
| 769 |
+
c2 = coeff * inf_norm_K_p_1
|
| 770 |
+
if c1 + c2 <= tol * _exact_inf_norm(F):
|
| 771 |
+
break
|
| 772 |
+
c1 = c2
|
| 773 |
+
X[k + i*d] = np.exp(k*h*mu) * F
|
| 774 |
+
return X, 1
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol):
|
| 778 |
+
"""
|
| 779 |
+
A helper function, for the case q > s and q % s > 0.
|
| 780 |
+
"""
|
| 781 |
+
d = q // s
|
| 782 |
+
j = q // d
|
| 783 |
+
r = q - d * j
|
| 784 |
+
input_shape = X.shape[1:]
|
| 785 |
+
K_shape = (m_star + 1, ) + input_shape
|
| 786 |
+
K = np.empty(K_shape, dtype=X.dtype)
|
| 787 |
+
for i in range(j + 1):
|
| 788 |
+
Z = X[i*d]
|
| 789 |
+
K[0] = Z
|
| 790 |
+
high_p = 0
|
| 791 |
+
if i < j:
|
| 792 |
+
effective_d = d
|
| 793 |
+
else:
|
| 794 |
+
effective_d = r
|
| 795 |
+
for k in range(1, effective_d+1):
|
| 796 |
+
F = K[0]
|
| 797 |
+
c1 = _exact_inf_norm(F)
|
| 798 |
+
for p in range(1, m_star+1):
|
| 799 |
+
if p == high_p + 1:
|
| 800 |
+
K[p] = h * A.dot(K[p-1]) / float(p)
|
| 801 |
+
high_p = p
|
| 802 |
+
coeff = float(pow(k, p))
|
| 803 |
+
F = F + coeff * K[p]
|
| 804 |
+
inf_norm_K_p_1 = _exact_inf_norm(K[p])
|
| 805 |
+
c2 = coeff * inf_norm_K_p_1
|
| 806 |
+
if c1 + c2 <= tol * _exact_inf_norm(F):
|
| 807 |
+
break
|
| 808 |
+
c1 = c2
|
| 809 |
+
X[k + i*d] = np.exp(k*h*mu) * F
|
| 810 |
+
return X, 2
|
.venv/Lib/site-packages/scipy/sparse/linalg/_interface.py
ADDED
|
@@ -0,0 +1,896 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Abstract linear algebra library.
|
| 2 |
+
|
| 3 |
+
This module defines a class hierarchy that implements a kind of "lazy"
|
| 4 |
+
matrix representation, called the ``LinearOperator``. It can be used to do
|
| 5 |
+
linear algebra with extremely large sparse or structured matrices, without
|
| 6 |
+
representing those explicitly in memory. Such matrices can be added,
|
| 7 |
+
multiplied, transposed, etc.
|
| 8 |
+
|
| 9 |
+
As a motivating example, suppose you want have a matrix where almost all of
|
| 10 |
+
the elements have the value one. The standard sparse matrix representation
|
| 11 |
+
skips the storage of zeros, but not ones. By contrast, a LinearOperator is
|
| 12 |
+
able to represent such matrices efficiently. First, we need a compact way to
|
| 13 |
+
represent an all-ones matrix::
|
| 14 |
+
|
| 15 |
+
>>> import numpy as np
|
| 16 |
+
>>> from scipy.sparse.linalg._interface import LinearOperator
|
| 17 |
+
>>> class Ones(LinearOperator):
|
| 18 |
+
... def __init__(self, shape):
|
| 19 |
+
... super().__init__(dtype=None, shape=shape)
|
| 20 |
+
... def _matvec(self, x):
|
| 21 |
+
... return np.repeat(x.sum(), self.shape[0])
|
| 22 |
+
|
| 23 |
+
Instances of this class emulate ``np.ones(shape)``, but using a constant
|
| 24 |
+
amount of storage, independent of ``shape``. The ``_matvec`` method specifies
|
| 25 |
+
how this linear operator multiplies with (operates on) a vector. We can now
|
| 26 |
+
add this operator to a sparse matrix that stores only offsets from one::
|
| 27 |
+
|
| 28 |
+
>>> from scipy.sparse.linalg._interface import aslinearoperator
|
| 29 |
+
>>> from scipy.sparse import csr_matrix
|
| 30 |
+
>>> offsets = csr_matrix([[1, 0, 2], [0, -1, 0], [0, 0, 3]])
|
| 31 |
+
>>> A = aslinearoperator(offsets) + Ones(offsets.shape)
|
| 32 |
+
>>> A.dot([1, 2, 3])
|
| 33 |
+
array([13, 4, 15])
|
| 34 |
+
|
| 35 |
+
The result is the same as that given by its dense, explicitly-stored
|
| 36 |
+
counterpart::
|
| 37 |
+
|
| 38 |
+
>>> (np.ones(A.shape, A.dtype) + offsets.toarray()).dot([1, 2, 3])
|
| 39 |
+
array([13, 4, 15])
|
| 40 |
+
|
| 41 |
+
Several algorithms in the ``scipy.sparse`` library are able to operate on
|
| 42 |
+
``LinearOperator`` instances.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
import warnings
|
| 46 |
+
|
| 47 |
+
import numpy as np
|
| 48 |
+
|
| 49 |
+
from scipy.sparse import issparse
|
| 50 |
+
from scipy.sparse._sputils import isshape, isintlike, asmatrix, is_pydata_spmatrix
|
| 51 |
+
|
| 52 |
+
__all__ = ['LinearOperator', 'aslinearoperator']
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class LinearOperator:
|
| 56 |
+
"""Common interface for performing matrix vector products
|
| 57 |
+
|
| 58 |
+
Many iterative methods (e.g. cg, gmres) do not need to know the
|
| 59 |
+
individual entries of a matrix to solve a linear system A*x=b.
|
| 60 |
+
Such solvers only require the computation of matrix vector
|
| 61 |
+
products, A*v where v is a dense vector. This class serves as
|
| 62 |
+
an abstract interface between iterative solvers and matrix-like
|
| 63 |
+
objects.
|
| 64 |
+
|
| 65 |
+
To construct a concrete LinearOperator, either pass appropriate
|
| 66 |
+
callables to the constructor of this class, or subclass it.
|
| 67 |
+
|
| 68 |
+
A subclass must implement either one of the methods ``_matvec``
|
| 69 |
+
and ``_matmat``, and the attributes/properties ``shape`` (pair of
|
| 70 |
+
integers) and ``dtype`` (may be None). It may call the ``__init__``
|
| 71 |
+
on this class to have these attributes validated. Implementing
|
| 72 |
+
``_matvec`` automatically implements ``_matmat`` (using a naive
|
| 73 |
+
algorithm) and vice-versa.
|
| 74 |
+
|
| 75 |
+
Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``
|
| 76 |
+
to implement the Hermitian adjoint (conjugate transpose). As with
|
| 77 |
+
``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or
|
| 78 |
+
``_adjoint`` implements the other automatically. Implementing
|
| 79 |
+
``_adjoint`` is preferable; ``_rmatvec`` is mostly there for
|
| 80 |
+
backwards compatibility.
|
| 81 |
+
|
| 82 |
+
Parameters
|
| 83 |
+
----------
|
| 84 |
+
shape : tuple
|
| 85 |
+
Matrix dimensions (M, N).
|
| 86 |
+
matvec : callable f(v)
|
| 87 |
+
Returns returns A * v.
|
| 88 |
+
rmatvec : callable f(v)
|
| 89 |
+
Returns A^H * v, where A^H is the conjugate transpose of A.
|
| 90 |
+
matmat : callable f(V)
|
| 91 |
+
Returns A * V, where V is a dense matrix with dimensions (N, K).
|
| 92 |
+
dtype : dtype
|
| 93 |
+
Data type of the matrix.
|
| 94 |
+
rmatmat : callable f(V)
|
| 95 |
+
Returns A^H * V, where V is a dense matrix with dimensions (M, K).
|
| 96 |
+
|
| 97 |
+
Attributes
|
| 98 |
+
----------
|
| 99 |
+
args : tuple
|
| 100 |
+
For linear operators describing products etc. of other linear
|
| 101 |
+
operators, the operands of the binary operation.
|
| 102 |
+
ndim : int
|
| 103 |
+
Number of dimensions (this is always 2)
|
| 104 |
+
|
| 105 |
+
See Also
|
| 106 |
+
--------
|
| 107 |
+
aslinearoperator : Construct LinearOperators
|
| 108 |
+
|
| 109 |
+
Notes
|
| 110 |
+
-----
|
| 111 |
+
The user-defined matvec() function must properly handle the case
|
| 112 |
+
where v has shape (N,) as well as the (N,1) case. The shape of
|
| 113 |
+
the return type is handled internally by LinearOperator.
|
| 114 |
+
|
| 115 |
+
LinearOperator instances can also be multiplied, added with each
|
| 116 |
+
other and exponentiated, all lazily: the result of these operations
|
| 117 |
+
is always a new, composite LinearOperator, that defers linear
|
| 118 |
+
operations to the original operators and combines the results.
|
| 119 |
+
|
| 120 |
+
More details regarding how to subclass a LinearOperator and several
|
| 121 |
+
examples of concrete LinearOperator instances can be found in the
|
| 122 |
+
external project `PyLops <https://pylops.readthedocs.io>`_.
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
Examples
|
| 126 |
+
--------
|
| 127 |
+
>>> import numpy as np
|
| 128 |
+
>>> from scipy.sparse.linalg import LinearOperator
|
| 129 |
+
>>> def mv(v):
|
| 130 |
+
... return np.array([2*v[0], 3*v[1]])
|
| 131 |
+
...
|
| 132 |
+
>>> A = LinearOperator((2,2), matvec=mv)
|
| 133 |
+
>>> A
|
| 134 |
+
<2x2 _CustomLinearOperator with dtype=float64>
|
| 135 |
+
>>> A.matvec(np.ones(2))
|
| 136 |
+
array([ 2., 3.])
|
| 137 |
+
>>> A * np.ones(2)
|
| 138 |
+
array([ 2., 3.])
|
| 139 |
+
|
| 140 |
+
"""
|
| 141 |
+
|
| 142 |
+
ndim = 2
|
| 143 |
+
# Necessary for right matmul with numpy arrays.
|
| 144 |
+
__array_ufunc__ = None
|
| 145 |
+
|
| 146 |
+
def __new__(cls, *args, **kwargs):
|
| 147 |
+
if cls is LinearOperator:
|
| 148 |
+
# Operate as _CustomLinearOperator factory.
|
| 149 |
+
return super().__new__(_CustomLinearOperator)
|
| 150 |
+
else:
|
| 151 |
+
obj = super().__new__(cls)
|
| 152 |
+
|
| 153 |
+
if (type(obj)._matvec == LinearOperator._matvec
|
| 154 |
+
and type(obj)._matmat == LinearOperator._matmat):
|
| 155 |
+
warnings.warn("LinearOperator subclass should implement"
|
| 156 |
+
" at least one of _matvec and _matmat.",
|
| 157 |
+
category=RuntimeWarning, stacklevel=2)
|
| 158 |
+
|
| 159 |
+
return obj
|
| 160 |
+
|
| 161 |
+
def __init__(self, dtype, shape):
|
| 162 |
+
"""Initialize this LinearOperator.
|
| 163 |
+
|
| 164 |
+
To be called by subclasses. ``dtype`` may be None; ``shape`` should
|
| 165 |
+
be convertible to a length-2 tuple.
|
| 166 |
+
"""
|
| 167 |
+
if dtype is not None:
|
| 168 |
+
dtype = np.dtype(dtype)
|
| 169 |
+
|
| 170 |
+
shape = tuple(shape)
|
| 171 |
+
if not isshape(shape):
|
| 172 |
+
raise ValueError(f"invalid shape {shape!r} (must be 2-d)")
|
| 173 |
+
|
| 174 |
+
self.dtype = dtype
|
| 175 |
+
self.shape = shape
|
| 176 |
+
|
| 177 |
+
def _init_dtype(self):
|
| 178 |
+
"""Called from subclasses at the end of the __init__ routine.
|
| 179 |
+
"""
|
| 180 |
+
if self.dtype is None:
|
| 181 |
+
v = np.zeros(self.shape[-1])
|
| 182 |
+
self.dtype = np.asarray(self.matvec(v)).dtype
|
| 183 |
+
|
| 184 |
+
def _matmat(self, X):
|
| 185 |
+
"""Default matrix-matrix multiplication handler.
|
| 186 |
+
|
| 187 |
+
Falls back on the user-defined _matvec method, so defining that will
|
| 188 |
+
define matrix multiplication (though in a very suboptimal way).
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
return np.hstack([self.matvec(col.reshape(-1,1)) for col in X.T])
|
| 192 |
+
|
| 193 |
+
def _matvec(self, x):
|
| 194 |
+
"""Default matrix-vector multiplication handler.
|
| 195 |
+
|
| 196 |
+
If self is a linear operator of shape (M, N), then this method will
|
| 197 |
+
be called on a shape (N,) or (N, 1) ndarray, and should return a
|
| 198 |
+
shape (M,) or (M, 1) ndarray.
|
| 199 |
+
|
| 200 |
+
This default implementation falls back on _matmat, so defining that
|
| 201 |
+
will define matrix-vector multiplication as well.
|
| 202 |
+
"""
|
| 203 |
+
return self.matmat(x.reshape(-1, 1))
|
| 204 |
+
|
| 205 |
+
def matvec(self, x):
|
| 206 |
+
"""Matrix-vector multiplication.
|
| 207 |
+
|
| 208 |
+
Performs the operation y=A*x where A is an MxN linear
|
| 209 |
+
operator and x is a column vector or 1-d array.
|
| 210 |
+
|
| 211 |
+
Parameters
|
| 212 |
+
----------
|
| 213 |
+
x : {matrix, ndarray}
|
| 214 |
+
An array with shape (N,) or (N,1).
|
| 215 |
+
|
| 216 |
+
Returns
|
| 217 |
+
-------
|
| 218 |
+
y : {matrix, ndarray}
|
| 219 |
+
A matrix or ndarray with shape (M,) or (M,1) depending
|
| 220 |
+
on the type and shape of the x argument.
|
| 221 |
+
|
| 222 |
+
Notes
|
| 223 |
+
-----
|
| 224 |
+
This matvec wraps the user-specified matvec routine or overridden
|
| 225 |
+
_matvec method to ensure that y has the correct shape and type.
|
| 226 |
+
|
| 227 |
+
"""
|
| 228 |
+
|
| 229 |
+
x = np.asanyarray(x)
|
| 230 |
+
|
| 231 |
+
M,N = self.shape
|
| 232 |
+
|
| 233 |
+
if x.shape != (N,) and x.shape != (N,1):
|
| 234 |
+
raise ValueError('dimension mismatch')
|
| 235 |
+
|
| 236 |
+
y = self._matvec(x)
|
| 237 |
+
|
| 238 |
+
if isinstance(x, np.matrix):
|
| 239 |
+
y = asmatrix(y)
|
| 240 |
+
else:
|
| 241 |
+
y = np.asarray(y)
|
| 242 |
+
|
| 243 |
+
if x.ndim == 1:
|
| 244 |
+
y = y.reshape(M)
|
| 245 |
+
elif x.ndim == 2:
|
| 246 |
+
y = y.reshape(M,1)
|
| 247 |
+
else:
|
| 248 |
+
raise ValueError('invalid shape returned by user-defined matvec()')
|
| 249 |
+
|
| 250 |
+
return y
|
| 251 |
+
|
| 252 |
+
def rmatvec(self, x):
|
| 253 |
+
"""Adjoint matrix-vector multiplication.
|
| 254 |
+
|
| 255 |
+
Performs the operation y = A^H * x where A is an MxN linear
|
| 256 |
+
operator and x is a column vector or 1-d array.
|
| 257 |
+
|
| 258 |
+
Parameters
|
| 259 |
+
----------
|
| 260 |
+
x : {matrix, ndarray}
|
| 261 |
+
An array with shape (M,) or (M,1).
|
| 262 |
+
|
| 263 |
+
Returns
|
| 264 |
+
-------
|
| 265 |
+
y : {matrix, ndarray}
|
| 266 |
+
A matrix or ndarray with shape (N,) or (N,1) depending
|
| 267 |
+
on the type and shape of the x argument.
|
| 268 |
+
|
| 269 |
+
Notes
|
| 270 |
+
-----
|
| 271 |
+
This rmatvec wraps the user-specified rmatvec routine or overridden
|
| 272 |
+
_rmatvec method to ensure that y has the correct shape and type.
|
| 273 |
+
|
| 274 |
+
"""
|
| 275 |
+
|
| 276 |
+
x = np.asanyarray(x)
|
| 277 |
+
|
| 278 |
+
M,N = self.shape
|
| 279 |
+
|
| 280 |
+
if x.shape != (M,) and x.shape != (M,1):
|
| 281 |
+
raise ValueError('dimension mismatch')
|
| 282 |
+
|
| 283 |
+
y = self._rmatvec(x)
|
| 284 |
+
|
| 285 |
+
if isinstance(x, np.matrix):
|
| 286 |
+
y = asmatrix(y)
|
| 287 |
+
else:
|
| 288 |
+
y = np.asarray(y)
|
| 289 |
+
|
| 290 |
+
if x.ndim == 1:
|
| 291 |
+
y = y.reshape(N)
|
| 292 |
+
elif x.ndim == 2:
|
| 293 |
+
y = y.reshape(N,1)
|
| 294 |
+
else:
|
| 295 |
+
raise ValueError('invalid shape returned by user-defined rmatvec()')
|
| 296 |
+
|
| 297 |
+
return y
|
| 298 |
+
|
| 299 |
+
def _rmatvec(self, x):
|
| 300 |
+
"""Default implementation of _rmatvec; defers to adjoint."""
|
| 301 |
+
if type(self)._adjoint == LinearOperator._adjoint:
|
| 302 |
+
# _adjoint not overridden, prevent infinite recursion
|
| 303 |
+
raise NotImplementedError
|
| 304 |
+
else:
|
| 305 |
+
return self.H.matvec(x)
|
| 306 |
+
|
| 307 |
+
def matmat(self, X):
|
| 308 |
+
"""Matrix-matrix multiplication.
|
| 309 |
+
|
| 310 |
+
Performs the operation y=A*X where A is an MxN linear
|
| 311 |
+
operator and X dense N*K matrix or ndarray.
|
| 312 |
+
|
| 313 |
+
Parameters
|
| 314 |
+
----------
|
| 315 |
+
X : {matrix, ndarray}
|
| 316 |
+
An array with shape (N,K).
|
| 317 |
+
|
| 318 |
+
Returns
|
| 319 |
+
-------
|
| 320 |
+
Y : {matrix, ndarray}
|
| 321 |
+
A matrix or ndarray with shape (M,K) depending on
|
| 322 |
+
the type of the X argument.
|
| 323 |
+
|
| 324 |
+
Notes
|
| 325 |
+
-----
|
| 326 |
+
This matmat wraps any user-specified matmat routine or overridden
|
| 327 |
+
_matmat method to ensure that y has the correct type.
|
| 328 |
+
|
| 329 |
+
"""
|
| 330 |
+
if not (issparse(X) or is_pydata_spmatrix(X)):
|
| 331 |
+
X = np.asanyarray(X)
|
| 332 |
+
|
| 333 |
+
if X.ndim != 2:
|
| 334 |
+
raise ValueError(f'expected 2-d ndarray or matrix, not {X.ndim}-d')
|
| 335 |
+
|
| 336 |
+
if X.shape[0] != self.shape[1]:
|
| 337 |
+
raise ValueError(f'dimension mismatch: {self.shape}, {X.shape}')
|
| 338 |
+
|
| 339 |
+
try:
|
| 340 |
+
Y = self._matmat(X)
|
| 341 |
+
except Exception as e:
|
| 342 |
+
if issparse(X) or is_pydata_spmatrix(X):
|
| 343 |
+
raise TypeError(
|
| 344 |
+
"Unable to multiply a LinearOperator with a sparse matrix."
|
| 345 |
+
" Wrap the matrix in aslinearoperator first."
|
| 346 |
+
) from e
|
| 347 |
+
raise
|
| 348 |
+
|
| 349 |
+
if isinstance(Y, np.matrix):
|
| 350 |
+
Y = asmatrix(Y)
|
| 351 |
+
|
| 352 |
+
return Y
|
| 353 |
+
|
| 354 |
+
def rmatmat(self, X):
|
| 355 |
+
"""Adjoint matrix-matrix multiplication.
|
| 356 |
+
|
| 357 |
+
Performs the operation y = A^H * x where A is an MxN linear
|
| 358 |
+
operator and x is a column vector or 1-d array, or 2-d array.
|
| 359 |
+
The default implementation defers to the adjoint.
|
| 360 |
+
|
| 361 |
+
Parameters
|
| 362 |
+
----------
|
| 363 |
+
X : {matrix, ndarray}
|
| 364 |
+
A matrix or 2D array.
|
| 365 |
+
|
| 366 |
+
Returns
|
| 367 |
+
-------
|
| 368 |
+
Y : {matrix, ndarray}
|
| 369 |
+
A matrix or 2D array depending on the type of the input.
|
| 370 |
+
|
| 371 |
+
Notes
|
| 372 |
+
-----
|
| 373 |
+
This rmatmat wraps the user-specified rmatmat routine.
|
| 374 |
+
|
| 375 |
+
"""
|
| 376 |
+
if not (issparse(X) or is_pydata_spmatrix(X)):
|
| 377 |
+
X = np.asanyarray(X)
|
| 378 |
+
|
| 379 |
+
if X.ndim != 2:
|
| 380 |
+
raise ValueError('expected 2-d ndarray or matrix, not %d-d'
|
| 381 |
+
% X.ndim)
|
| 382 |
+
|
| 383 |
+
if X.shape[0] != self.shape[0]:
|
| 384 |
+
raise ValueError(f'dimension mismatch: {self.shape}, {X.shape}')
|
| 385 |
+
|
| 386 |
+
try:
|
| 387 |
+
Y = self._rmatmat(X)
|
| 388 |
+
except Exception as e:
|
| 389 |
+
if issparse(X) or is_pydata_spmatrix(X):
|
| 390 |
+
raise TypeError(
|
| 391 |
+
"Unable to multiply a LinearOperator with a sparse matrix."
|
| 392 |
+
" Wrap the matrix in aslinearoperator() first."
|
| 393 |
+
) from e
|
| 394 |
+
raise
|
| 395 |
+
|
| 396 |
+
if isinstance(Y, np.matrix):
|
| 397 |
+
Y = asmatrix(Y)
|
| 398 |
+
return Y
|
| 399 |
+
|
| 400 |
+
def _rmatmat(self, X):
|
| 401 |
+
"""Default implementation of _rmatmat defers to rmatvec or adjoint."""
|
| 402 |
+
if type(self)._adjoint == LinearOperator._adjoint:
|
| 403 |
+
return np.hstack([self.rmatvec(col.reshape(-1, 1)) for col in X.T])
|
| 404 |
+
else:
|
| 405 |
+
return self.H.matmat(X)
|
| 406 |
+
|
| 407 |
+
def __call__(self, x):
|
| 408 |
+
return self*x
|
| 409 |
+
|
| 410 |
+
def __mul__(self, x):
|
| 411 |
+
return self.dot(x)
|
| 412 |
+
|
| 413 |
+
def __truediv__(self, other):
|
| 414 |
+
if not np.isscalar(other):
|
| 415 |
+
raise ValueError("Can only divide a linear operator by a scalar.")
|
| 416 |
+
|
| 417 |
+
return _ScaledLinearOperator(self, 1.0/other)
|
| 418 |
+
|
| 419 |
+
def dot(self, x):
|
| 420 |
+
"""Matrix-matrix or matrix-vector multiplication.
|
| 421 |
+
|
| 422 |
+
Parameters
|
| 423 |
+
----------
|
| 424 |
+
x : array_like
|
| 425 |
+
1-d or 2-d array, representing a vector or matrix.
|
| 426 |
+
|
| 427 |
+
Returns
|
| 428 |
+
-------
|
| 429 |
+
Ax : array
|
| 430 |
+
1-d or 2-d array (depending on the shape of x) that represents
|
| 431 |
+
the result of applying this linear operator on x.
|
| 432 |
+
|
| 433 |
+
"""
|
| 434 |
+
if isinstance(x, LinearOperator):
|
| 435 |
+
return _ProductLinearOperator(self, x)
|
| 436 |
+
elif np.isscalar(x):
|
| 437 |
+
return _ScaledLinearOperator(self, x)
|
| 438 |
+
else:
|
| 439 |
+
if not issparse(x) and not is_pydata_spmatrix(x):
|
| 440 |
+
# Sparse matrices shouldn't be converted to numpy arrays.
|
| 441 |
+
x = np.asarray(x)
|
| 442 |
+
|
| 443 |
+
if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:
|
| 444 |
+
return self.matvec(x)
|
| 445 |
+
elif x.ndim == 2:
|
| 446 |
+
return self.matmat(x)
|
| 447 |
+
else:
|
| 448 |
+
raise ValueError('expected 1-d or 2-d array or matrix, got %r'
|
| 449 |
+
% x)
|
| 450 |
+
|
| 451 |
+
def __matmul__(self, other):
|
| 452 |
+
if np.isscalar(other):
|
| 453 |
+
raise ValueError("Scalar operands are not allowed, "
|
| 454 |
+
"use '*' instead")
|
| 455 |
+
return self.__mul__(other)
|
| 456 |
+
|
| 457 |
+
def __rmatmul__(self, other):
|
| 458 |
+
if np.isscalar(other):
|
| 459 |
+
raise ValueError("Scalar operands are not allowed, "
|
| 460 |
+
"use '*' instead")
|
| 461 |
+
return self.__rmul__(other)
|
| 462 |
+
|
| 463 |
+
def __rmul__(self, x):
|
| 464 |
+
if np.isscalar(x):
|
| 465 |
+
return _ScaledLinearOperator(self, x)
|
| 466 |
+
else:
|
| 467 |
+
return self._rdot(x)
|
| 468 |
+
|
| 469 |
+
def _rdot(self, x):
|
| 470 |
+
"""Matrix-matrix or matrix-vector multiplication from the right.
|
| 471 |
+
|
| 472 |
+
Parameters
|
| 473 |
+
----------
|
| 474 |
+
x : array_like
|
| 475 |
+
1-d or 2-d array, representing a vector or matrix.
|
| 476 |
+
|
| 477 |
+
Returns
|
| 478 |
+
-------
|
| 479 |
+
xA : array
|
| 480 |
+
1-d or 2-d array (depending on the shape of x) that represents
|
| 481 |
+
the result of applying this linear operator on x from the right.
|
| 482 |
+
|
| 483 |
+
Notes
|
| 484 |
+
-----
|
| 485 |
+
This is copied from dot to implement right multiplication.
|
| 486 |
+
"""
|
| 487 |
+
if isinstance(x, LinearOperator):
|
| 488 |
+
return _ProductLinearOperator(x, self)
|
| 489 |
+
elif np.isscalar(x):
|
| 490 |
+
return _ScaledLinearOperator(self, x)
|
| 491 |
+
else:
|
| 492 |
+
if not issparse(x) and not is_pydata_spmatrix(x):
|
| 493 |
+
# Sparse matrices shouldn't be converted to numpy arrays.
|
| 494 |
+
x = np.asarray(x)
|
| 495 |
+
|
| 496 |
+
# We use transpose instead of rmatvec/rmatmat to avoid
|
| 497 |
+
# unnecessary complex conjugation if possible.
|
| 498 |
+
if x.ndim == 1 or x.ndim == 2 and x.shape[0] == 1:
|
| 499 |
+
return self.T.matvec(x.T).T
|
| 500 |
+
elif x.ndim == 2:
|
| 501 |
+
return self.T.matmat(x.T).T
|
| 502 |
+
else:
|
| 503 |
+
raise ValueError('expected 1-d or 2-d array or matrix, got %r'
|
| 504 |
+
% x)
|
| 505 |
+
|
| 506 |
+
def __pow__(self, p):
|
| 507 |
+
if np.isscalar(p):
|
| 508 |
+
return _PowerLinearOperator(self, p)
|
| 509 |
+
else:
|
| 510 |
+
return NotImplemented
|
| 511 |
+
|
| 512 |
+
def __add__(self, x):
|
| 513 |
+
if isinstance(x, LinearOperator):
|
| 514 |
+
return _SumLinearOperator(self, x)
|
| 515 |
+
else:
|
| 516 |
+
return NotImplemented
|
| 517 |
+
|
| 518 |
+
def __neg__(self):
|
| 519 |
+
return _ScaledLinearOperator(self, -1)
|
| 520 |
+
|
| 521 |
+
def __sub__(self, x):
|
| 522 |
+
return self.__add__(-x)
|
| 523 |
+
|
| 524 |
+
def __repr__(self):
|
| 525 |
+
M,N = self.shape
|
| 526 |
+
if self.dtype is None:
|
| 527 |
+
dt = 'unspecified dtype'
|
| 528 |
+
else:
|
| 529 |
+
dt = 'dtype=' + str(self.dtype)
|
| 530 |
+
|
| 531 |
+
return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt)
|
| 532 |
+
|
| 533 |
+
def adjoint(self):
|
| 534 |
+
"""Hermitian adjoint.
|
| 535 |
+
|
| 536 |
+
Returns the Hermitian adjoint of self, aka the Hermitian
|
| 537 |
+
conjugate or Hermitian transpose. For a complex matrix, the
|
| 538 |
+
Hermitian adjoint is equal to the conjugate transpose.
|
| 539 |
+
|
| 540 |
+
Can be abbreviated self.H instead of self.adjoint().
|
| 541 |
+
|
| 542 |
+
Returns
|
| 543 |
+
-------
|
| 544 |
+
A_H : LinearOperator
|
| 545 |
+
Hermitian adjoint of self.
|
| 546 |
+
"""
|
| 547 |
+
return self._adjoint()
|
| 548 |
+
|
| 549 |
+
H = property(adjoint)
|
| 550 |
+
|
| 551 |
+
def transpose(self):
|
| 552 |
+
"""Transpose this linear operator.
|
| 553 |
+
|
| 554 |
+
Returns a LinearOperator that represents the transpose of this one.
|
| 555 |
+
Can be abbreviated self.T instead of self.transpose().
|
| 556 |
+
"""
|
| 557 |
+
return self._transpose()
|
| 558 |
+
|
| 559 |
+
T = property(transpose)
|
| 560 |
+
|
| 561 |
+
def _adjoint(self):
|
| 562 |
+
"""Default implementation of _adjoint; defers to rmatvec."""
|
| 563 |
+
return _AdjointLinearOperator(self)
|
| 564 |
+
|
| 565 |
+
def _transpose(self):
|
| 566 |
+
""" Default implementation of _transpose; defers to rmatvec + conj"""
|
| 567 |
+
return _TransposedLinearOperator(self)
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
class _CustomLinearOperator(LinearOperator):
|
| 571 |
+
"""Linear operator defined in terms of user-specified operations."""
|
| 572 |
+
|
| 573 |
+
def __init__(self, shape, matvec, rmatvec=None, matmat=None,
|
| 574 |
+
dtype=None, rmatmat=None):
|
| 575 |
+
super().__init__(dtype, shape)
|
| 576 |
+
|
| 577 |
+
self.args = ()
|
| 578 |
+
|
| 579 |
+
self.__matvec_impl = matvec
|
| 580 |
+
self.__rmatvec_impl = rmatvec
|
| 581 |
+
self.__rmatmat_impl = rmatmat
|
| 582 |
+
self.__matmat_impl = matmat
|
| 583 |
+
|
| 584 |
+
self._init_dtype()
|
| 585 |
+
|
| 586 |
+
def _matmat(self, X):
|
| 587 |
+
if self.__matmat_impl is not None:
|
| 588 |
+
return self.__matmat_impl(X)
|
| 589 |
+
else:
|
| 590 |
+
return super()._matmat(X)
|
| 591 |
+
|
| 592 |
+
def _matvec(self, x):
|
| 593 |
+
return self.__matvec_impl(x)
|
| 594 |
+
|
| 595 |
+
def _rmatvec(self, x):
|
| 596 |
+
func = self.__rmatvec_impl
|
| 597 |
+
if func is None:
|
| 598 |
+
raise NotImplementedError("rmatvec is not defined")
|
| 599 |
+
return self.__rmatvec_impl(x)
|
| 600 |
+
|
| 601 |
+
def _rmatmat(self, X):
|
| 602 |
+
if self.__rmatmat_impl is not None:
|
| 603 |
+
return self.__rmatmat_impl(X)
|
| 604 |
+
else:
|
| 605 |
+
return super()._rmatmat(X)
|
| 606 |
+
|
| 607 |
+
def _adjoint(self):
|
| 608 |
+
return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]),
|
| 609 |
+
matvec=self.__rmatvec_impl,
|
| 610 |
+
rmatvec=self.__matvec_impl,
|
| 611 |
+
matmat=self.__rmatmat_impl,
|
| 612 |
+
rmatmat=self.__matmat_impl,
|
| 613 |
+
dtype=self.dtype)
|
| 614 |
+
|
| 615 |
+
|
| 616 |
+
class _AdjointLinearOperator(LinearOperator):
|
| 617 |
+
"""Adjoint of arbitrary Linear Operator"""
|
| 618 |
+
|
| 619 |
+
def __init__(self, A):
|
| 620 |
+
shape = (A.shape[1], A.shape[0])
|
| 621 |
+
super().__init__(dtype=A.dtype, shape=shape)
|
| 622 |
+
self.A = A
|
| 623 |
+
self.args = (A,)
|
| 624 |
+
|
| 625 |
+
def _matvec(self, x):
|
| 626 |
+
return self.A._rmatvec(x)
|
| 627 |
+
|
| 628 |
+
def _rmatvec(self, x):
|
| 629 |
+
return self.A._matvec(x)
|
| 630 |
+
|
| 631 |
+
def _matmat(self, x):
|
| 632 |
+
return self.A._rmatmat(x)
|
| 633 |
+
|
| 634 |
+
def _rmatmat(self, x):
|
| 635 |
+
return self.A._matmat(x)
|
| 636 |
+
|
| 637 |
+
class _TransposedLinearOperator(LinearOperator):
|
| 638 |
+
"""Transposition of arbitrary Linear Operator"""
|
| 639 |
+
|
| 640 |
+
def __init__(self, A):
|
| 641 |
+
shape = (A.shape[1], A.shape[0])
|
| 642 |
+
super().__init__(dtype=A.dtype, shape=shape)
|
| 643 |
+
self.A = A
|
| 644 |
+
self.args = (A,)
|
| 645 |
+
|
| 646 |
+
def _matvec(self, x):
|
| 647 |
+
# NB. np.conj works also on sparse matrices
|
| 648 |
+
return np.conj(self.A._rmatvec(np.conj(x)))
|
| 649 |
+
|
| 650 |
+
def _rmatvec(self, x):
|
| 651 |
+
return np.conj(self.A._matvec(np.conj(x)))
|
| 652 |
+
|
| 653 |
+
def _matmat(self, x):
|
| 654 |
+
# NB. np.conj works also on sparse matrices
|
| 655 |
+
return np.conj(self.A._rmatmat(np.conj(x)))
|
| 656 |
+
|
| 657 |
+
def _rmatmat(self, x):
|
| 658 |
+
return np.conj(self.A._matmat(np.conj(x)))
|
| 659 |
+
|
| 660 |
+
def _get_dtype(operators, dtypes=None):
|
| 661 |
+
if dtypes is None:
|
| 662 |
+
dtypes = []
|
| 663 |
+
for obj in operators:
|
| 664 |
+
if obj is not None and hasattr(obj, 'dtype'):
|
| 665 |
+
dtypes.append(obj.dtype)
|
| 666 |
+
return np.result_type(*dtypes)
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
class _SumLinearOperator(LinearOperator):
|
| 670 |
+
def __init__(self, A, B):
|
| 671 |
+
if not isinstance(A, LinearOperator) or \
|
| 672 |
+
not isinstance(B, LinearOperator):
|
| 673 |
+
raise ValueError('both operands have to be a LinearOperator')
|
| 674 |
+
if A.shape != B.shape:
|
| 675 |
+
raise ValueError(f'cannot add {A} and {B}: shape mismatch')
|
| 676 |
+
self.args = (A, B)
|
| 677 |
+
super().__init__(_get_dtype([A, B]), A.shape)
|
| 678 |
+
|
| 679 |
+
def _matvec(self, x):
|
| 680 |
+
return self.args[0].matvec(x) + self.args[1].matvec(x)
|
| 681 |
+
|
| 682 |
+
def _rmatvec(self, x):
|
| 683 |
+
return self.args[0].rmatvec(x) + self.args[1].rmatvec(x)
|
| 684 |
+
|
| 685 |
+
def _rmatmat(self, x):
|
| 686 |
+
return self.args[0].rmatmat(x) + self.args[1].rmatmat(x)
|
| 687 |
+
|
| 688 |
+
def _matmat(self, x):
|
| 689 |
+
return self.args[0].matmat(x) + self.args[1].matmat(x)
|
| 690 |
+
|
| 691 |
+
def _adjoint(self):
|
| 692 |
+
A, B = self.args
|
| 693 |
+
return A.H + B.H
|
| 694 |
+
|
| 695 |
+
|
| 696 |
+
class _ProductLinearOperator(LinearOperator):
|
| 697 |
+
def __init__(self, A, B):
|
| 698 |
+
if not isinstance(A, LinearOperator) or \
|
| 699 |
+
not isinstance(B, LinearOperator):
|
| 700 |
+
raise ValueError('both operands have to be a LinearOperator')
|
| 701 |
+
if A.shape[1] != B.shape[0]:
|
| 702 |
+
raise ValueError(f'cannot multiply {A} and {B}: shape mismatch')
|
| 703 |
+
super().__init__(_get_dtype([A, B]),
|
| 704 |
+
(A.shape[0], B.shape[1]))
|
| 705 |
+
self.args = (A, B)
|
| 706 |
+
|
| 707 |
+
def _matvec(self, x):
|
| 708 |
+
return self.args[0].matvec(self.args[1].matvec(x))
|
| 709 |
+
|
| 710 |
+
def _rmatvec(self, x):
|
| 711 |
+
return self.args[1].rmatvec(self.args[0].rmatvec(x))
|
| 712 |
+
|
| 713 |
+
def _rmatmat(self, x):
|
| 714 |
+
return self.args[1].rmatmat(self.args[0].rmatmat(x))
|
| 715 |
+
|
| 716 |
+
def _matmat(self, x):
|
| 717 |
+
return self.args[0].matmat(self.args[1].matmat(x))
|
| 718 |
+
|
| 719 |
+
def _adjoint(self):
|
| 720 |
+
A, B = self.args
|
| 721 |
+
return B.H * A.H
|
| 722 |
+
|
| 723 |
+
|
| 724 |
+
class _ScaledLinearOperator(LinearOperator):
|
| 725 |
+
def __init__(self, A, alpha):
|
| 726 |
+
if not isinstance(A, LinearOperator):
|
| 727 |
+
raise ValueError('LinearOperator expected as A')
|
| 728 |
+
if not np.isscalar(alpha):
|
| 729 |
+
raise ValueError('scalar expected as alpha')
|
| 730 |
+
if isinstance(A, _ScaledLinearOperator):
|
| 731 |
+
A, alpha_original = A.args
|
| 732 |
+
# Avoid in-place multiplication so that we don't accidentally mutate
|
| 733 |
+
# the original prefactor.
|
| 734 |
+
alpha = alpha * alpha_original
|
| 735 |
+
|
| 736 |
+
dtype = _get_dtype([A], [type(alpha)])
|
| 737 |
+
super().__init__(dtype, A.shape)
|
| 738 |
+
self.args = (A, alpha)
|
| 739 |
+
|
| 740 |
+
def _matvec(self, x):
|
| 741 |
+
return self.args[1] * self.args[0].matvec(x)
|
| 742 |
+
|
| 743 |
+
def _rmatvec(self, x):
|
| 744 |
+
return np.conj(self.args[1]) * self.args[0].rmatvec(x)
|
| 745 |
+
|
| 746 |
+
def _rmatmat(self, x):
|
| 747 |
+
return np.conj(self.args[1]) * self.args[0].rmatmat(x)
|
| 748 |
+
|
| 749 |
+
def _matmat(self, x):
|
| 750 |
+
return self.args[1] * self.args[0].matmat(x)
|
| 751 |
+
|
| 752 |
+
def _adjoint(self):
|
| 753 |
+
A, alpha = self.args
|
| 754 |
+
return A.H * np.conj(alpha)
|
| 755 |
+
|
| 756 |
+
|
| 757 |
+
class _PowerLinearOperator(LinearOperator):
|
| 758 |
+
def __init__(self, A, p):
|
| 759 |
+
if not isinstance(A, LinearOperator):
|
| 760 |
+
raise ValueError('LinearOperator expected as A')
|
| 761 |
+
if A.shape[0] != A.shape[1]:
|
| 762 |
+
raise ValueError('square LinearOperator expected, got %r' % A)
|
| 763 |
+
if not isintlike(p) or p < 0:
|
| 764 |
+
raise ValueError('non-negative integer expected as p')
|
| 765 |
+
|
| 766 |
+
super().__init__(_get_dtype([A]), A.shape)
|
| 767 |
+
self.args = (A, p)
|
| 768 |
+
|
| 769 |
+
def _power(self, fun, x):
|
| 770 |
+
res = np.array(x, copy=True)
|
| 771 |
+
for i in range(self.args[1]):
|
| 772 |
+
res = fun(res)
|
| 773 |
+
return res
|
| 774 |
+
|
| 775 |
+
def _matvec(self, x):
|
| 776 |
+
return self._power(self.args[0].matvec, x)
|
| 777 |
+
|
| 778 |
+
def _rmatvec(self, x):
|
| 779 |
+
return self._power(self.args[0].rmatvec, x)
|
| 780 |
+
|
| 781 |
+
def _rmatmat(self, x):
|
| 782 |
+
return self._power(self.args[0].rmatmat, x)
|
| 783 |
+
|
| 784 |
+
def _matmat(self, x):
|
| 785 |
+
return self._power(self.args[0].matmat, x)
|
| 786 |
+
|
| 787 |
+
def _adjoint(self):
|
| 788 |
+
A, p = self.args
|
| 789 |
+
return A.H ** p
|
| 790 |
+
|
| 791 |
+
|
| 792 |
+
class MatrixLinearOperator(LinearOperator):
|
| 793 |
+
def __init__(self, A):
|
| 794 |
+
super().__init__(A.dtype, A.shape)
|
| 795 |
+
self.A = A
|
| 796 |
+
self.__adj = None
|
| 797 |
+
self.args = (A,)
|
| 798 |
+
|
| 799 |
+
def _matmat(self, X):
|
| 800 |
+
return self.A.dot(X)
|
| 801 |
+
|
| 802 |
+
def _adjoint(self):
|
| 803 |
+
if self.__adj is None:
|
| 804 |
+
self.__adj = _AdjointMatrixOperator(self)
|
| 805 |
+
return self.__adj
|
| 806 |
+
|
| 807 |
+
class _AdjointMatrixOperator(MatrixLinearOperator):
|
| 808 |
+
def __init__(self, adjoint):
|
| 809 |
+
self.A = adjoint.A.T.conj()
|
| 810 |
+
self.__adjoint = adjoint
|
| 811 |
+
self.args = (adjoint,)
|
| 812 |
+
self.shape = adjoint.shape[1], adjoint.shape[0]
|
| 813 |
+
|
| 814 |
+
@property
|
| 815 |
+
def dtype(self):
|
| 816 |
+
return self.__adjoint.dtype
|
| 817 |
+
|
| 818 |
+
def _adjoint(self):
|
| 819 |
+
return self.__adjoint
|
| 820 |
+
|
| 821 |
+
|
| 822 |
+
class IdentityOperator(LinearOperator):
|
| 823 |
+
def __init__(self, shape, dtype=None):
|
| 824 |
+
super().__init__(dtype, shape)
|
| 825 |
+
|
| 826 |
+
def _matvec(self, x):
|
| 827 |
+
return x
|
| 828 |
+
|
| 829 |
+
def _rmatvec(self, x):
|
| 830 |
+
return x
|
| 831 |
+
|
| 832 |
+
def _rmatmat(self, x):
|
| 833 |
+
return x
|
| 834 |
+
|
| 835 |
+
def _matmat(self, x):
|
| 836 |
+
return x
|
| 837 |
+
|
| 838 |
+
def _adjoint(self):
|
| 839 |
+
return self
|
| 840 |
+
|
| 841 |
+
|
| 842 |
+
def aslinearoperator(A):
|
| 843 |
+
"""Return A as a LinearOperator.
|
| 844 |
+
|
| 845 |
+
'A' may be any of the following types:
|
| 846 |
+
- ndarray
|
| 847 |
+
- matrix
|
| 848 |
+
- sparse matrix (e.g. csr_matrix, lil_matrix, etc.)
|
| 849 |
+
- LinearOperator
|
| 850 |
+
- An object with .shape and .matvec attributes
|
| 851 |
+
|
| 852 |
+
See the LinearOperator documentation for additional information.
|
| 853 |
+
|
| 854 |
+
Notes
|
| 855 |
+
-----
|
| 856 |
+
If 'A' has no .dtype attribute, the data type is determined by calling
|
| 857 |
+
:func:`LinearOperator.matvec()` - set the .dtype attribute to prevent this
|
| 858 |
+
call upon the linear operator creation.
|
| 859 |
+
|
| 860 |
+
Examples
|
| 861 |
+
--------
|
| 862 |
+
>>> import numpy as np
|
| 863 |
+
>>> from scipy.sparse.linalg import aslinearoperator
|
| 864 |
+
>>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32)
|
| 865 |
+
>>> aslinearoperator(M)
|
| 866 |
+
<2x3 MatrixLinearOperator with dtype=int32>
|
| 867 |
+
"""
|
| 868 |
+
if isinstance(A, LinearOperator):
|
| 869 |
+
return A
|
| 870 |
+
|
| 871 |
+
elif isinstance(A, np.ndarray) or isinstance(A, np.matrix):
|
| 872 |
+
if A.ndim > 2:
|
| 873 |
+
raise ValueError('array must have ndim <= 2')
|
| 874 |
+
A = np.atleast_2d(np.asarray(A))
|
| 875 |
+
return MatrixLinearOperator(A)
|
| 876 |
+
|
| 877 |
+
elif issparse(A) or is_pydata_spmatrix(A):
|
| 878 |
+
return MatrixLinearOperator(A)
|
| 879 |
+
|
| 880 |
+
else:
|
| 881 |
+
if hasattr(A, 'shape') and hasattr(A, 'matvec'):
|
| 882 |
+
rmatvec = None
|
| 883 |
+
rmatmat = None
|
| 884 |
+
dtype = None
|
| 885 |
+
|
| 886 |
+
if hasattr(A, 'rmatvec'):
|
| 887 |
+
rmatvec = A.rmatvec
|
| 888 |
+
if hasattr(A, 'rmatmat'):
|
| 889 |
+
rmatmat = A.rmatmat
|
| 890 |
+
if hasattr(A, 'dtype'):
|
| 891 |
+
dtype = A.dtype
|
| 892 |
+
return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec,
|
| 893 |
+
rmatmat=rmatmat, dtype=dtype)
|
| 894 |
+
|
| 895 |
+
else:
|
| 896 |
+
raise TypeError('type not understood')
|
.venv/Lib/site-packages/scipy/sparse/linalg/_isolve/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"Iterative Solvers for Sparse Linear Systems"
|
| 2 |
+
|
| 3 |
+
#from info import __doc__
|
| 4 |
+
from .iterative import *
|
| 5 |
+
from .minres import minres
|
| 6 |
+
from .lgmres import lgmres
|
| 7 |
+
from .lsqr import lsqr
|
| 8 |
+
from .lsmr import lsmr
|
| 9 |
+
from ._gcrotmk import gcrotmk
|
| 10 |
+
from .tfqmr import tfqmr
|
| 11 |
+
|
| 12 |
+
__all__ = [
|
| 13 |
+
'bicg', 'bicgstab', 'cg', 'cgs', 'gcrotmk', 'gmres',
|
| 14 |
+
'lgmres', 'lsmr', 'lsqr',
|
| 15 |
+
'minres', 'qmr', 'tfqmr'
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
from scipy._lib._testutils import PytestTester
|
| 19 |
+
test = PytestTester(__name__)
|
| 20 |
+
del PytestTester
|
.venv/Lib/site-packages/scipy/sparse/linalg/_isolve/_gcrotmk.py
ADDED
|
@@ -0,0 +1,514 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2015, Pauli Virtanen <pav@iki.fi>
|
| 2 |
+
# Distributed under the same license as SciPy.
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy.linalg import LinAlgError
|
| 6 |
+
from scipy.linalg import (get_blas_funcs, qr, solve, svd, qr_insert, lstsq)
|
| 7 |
+
from .iterative import _get_atol_rtol
|
| 8 |
+
from scipy.sparse.linalg._isolve.utils import make_system
|
| 9 |
+
from scipy._lib.deprecation import _NoValue, _deprecate_positional_args
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
__all__ = ['gcrotmk']
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _fgmres(matvec, v0, m, atol, lpsolve=None, rpsolve=None, cs=(), outer_v=(),
|
| 16 |
+
prepend_outer_v=False):
|
| 17 |
+
"""
|
| 18 |
+
FGMRES Arnoldi process, with optional projection or augmentation
|
| 19 |
+
|
| 20 |
+
Parameters
|
| 21 |
+
----------
|
| 22 |
+
matvec : callable
|
| 23 |
+
Operation A*x
|
| 24 |
+
v0 : ndarray
|
| 25 |
+
Initial vector, normalized to nrm2(v0) == 1
|
| 26 |
+
m : int
|
| 27 |
+
Number of GMRES rounds
|
| 28 |
+
atol : float
|
| 29 |
+
Absolute tolerance for early exit
|
| 30 |
+
lpsolve : callable
|
| 31 |
+
Left preconditioner L
|
| 32 |
+
rpsolve : callable
|
| 33 |
+
Right preconditioner R
|
| 34 |
+
cs : list of (ndarray, ndarray)
|
| 35 |
+
Columns of matrices C and U in GCROT
|
| 36 |
+
outer_v : list of ndarrays
|
| 37 |
+
Augmentation vectors in LGMRES
|
| 38 |
+
prepend_outer_v : bool, optional
|
| 39 |
+
Whether augmentation vectors come before or after
|
| 40 |
+
Krylov iterates
|
| 41 |
+
|
| 42 |
+
Raises
|
| 43 |
+
------
|
| 44 |
+
LinAlgError
|
| 45 |
+
If nans encountered
|
| 46 |
+
|
| 47 |
+
Returns
|
| 48 |
+
-------
|
| 49 |
+
Q, R : ndarray
|
| 50 |
+
QR decomposition of the upper Hessenberg H=QR
|
| 51 |
+
B : ndarray
|
| 52 |
+
Projections corresponding to matrix C
|
| 53 |
+
vs : list of ndarray
|
| 54 |
+
Columns of matrix V
|
| 55 |
+
zs : list of ndarray
|
| 56 |
+
Columns of matrix Z
|
| 57 |
+
y : ndarray
|
| 58 |
+
Solution to ||H y - e_1||_2 = min!
|
| 59 |
+
res : float
|
| 60 |
+
The final (preconditioned) residual norm
|
| 61 |
+
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
if lpsolve is None:
|
| 65 |
+
def lpsolve(x):
|
| 66 |
+
return x
|
| 67 |
+
if rpsolve is None:
|
| 68 |
+
def rpsolve(x):
|
| 69 |
+
return x
|
| 70 |
+
|
| 71 |
+
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (v0,))
|
| 72 |
+
|
| 73 |
+
vs = [v0]
|
| 74 |
+
zs = []
|
| 75 |
+
y = None
|
| 76 |
+
res = np.nan
|
| 77 |
+
|
| 78 |
+
m = m + len(outer_v)
|
| 79 |
+
|
| 80 |
+
# Orthogonal projection coefficients
|
| 81 |
+
B = np.zeros((len(cs), m), dtype=v0.dtype)
|
| 82 |
+
|
| 83 |
+
# H is stored in QR factorized form
|
| 84 |
+
Q = np.ones((1, 1), dtype=v0.dtype)
|
| 85 |
+
R = np.zeros((1, 0), dtype=v0.dtype)
|
| 86 |
+
|
| 87 |
+
eps = np.finfo(v0.dtype).eps
|
| 88 |
+
|
| 89 |
+
breakdown = False
|
| 90 |
+
|
| 91 |
+
# FGMRES Arnoldi process
|
| 92 |
+
for j in range(m):
|
| 93 |
+
# L A Z = C B + V H
|
| 94 |
+
|
| 95 |
+
if prepend_outer_v and j < len(outer_v):
|
| 96 |
+
z, w = outer_v[j]
|
| 97 |
+
elif prepend_outer_v and j == len(outer_v):
|
| 98 |
+
z = rpsolve(v0)
|
| 99 |
+
w = None
|
| 100 |
+
elif not prepend_outer_v and j >= m - len(outer_v):
|
| 101 |
+
z, w = outer_v[j - (m - len(outer_v))]
|
| 102 |
+
else:
|
| 103 |
+
z = rpsolve(vs[-1])
|
| 104 |
+
w = None
|
| 105 |
+
|
| 106 |
+
if w is None:
|
| 107 |
+
w = lpsolve(matvec(z))
|
| 108 |
+
else:
|
| 109 |
+
# w is clobbered below
|
| 110 |
+
w = w.copy()
|
| 111 |
+
|
| 112 |
+
w_norm = nrm2(w)
|
| 113 |
+
|
| 114 |
+
# GCROT projection: L A -> (1 - C C^H) L A
|
| 115 |
+
# i.e. orthogonalize against C
|
| 116 |
+
for i, c in enumerate(cs):
|
| 117 |
+
alpha = dot(c, w)
|
| 118 |
+
B[i,j] = alpha
|
| 119 |
+
w = axpy(c, w, c.shape[0], -alpha) # w -= alpha*c
|
| 120 |
+
|
| 121 |
+
# Orthogonalize against V
|
| 122 |
+
hcur = np.zeros(j+2, dtype=Q.dtype)
|
| 123 |
+
for i, v in enumerate(vs):
|
| 124 |
+
alpha = dot(v, w)
|
| 125 |
+
hcur[i] = alpha
|
| 126 |
+
w = axpy(v, w, v.shape[0], -alpha) # w -= alpha*v
|
| 127 |
+
hcur[i+1] = nrm2(w)
|
| 128 |
+
|
| 129 |
+
with np.errstate(over='ignore', divide='ignore'):
|
| 130 |
+
# Careful with denormals
|
| 131 |
+
alpha = 1/hcur[-1]
|
| 132 |
+
|
| 133 |
+
if np.isfinite(alpha):
|
| 134 |
+
w = scal(alpha, w)
|
| 135 |
+
|
| 136 |
+
if not (hcur[-1] > eps * w_norm):
|
| 137 |
+
# w essentially in the span of previous vectors,
|
| 138 |
+
# or we have nans. Bail out after updating the QR
|
| 139 |
+
# solution.
|
| 140 |
+
breakdown = True
|
| 141 |
+
|
| 142 |
+
vs.append(w)
|
| 143 |
+
zs.append(z)
|
| 144 |
+
|
| 145 |
+
# Arnoldi LSQ problem
|
| 146 |
+
|
| 147 |
+
# Add new column to H=Q@R, padding other columns with zeros
|
| 148 |
+
Q2 = np.zeros((j+2, j+2), dtype=Q.dtype, order='F')
|
| 149 |
+
Q2[:j+1,:j+1] = Q
|
| 150 |
+
Q2[j+1,j+1] = 1
|
| 151 |
+
|
| 152 |
+
R2 = np.zeros((j+2, j), dtype=R.dtype, order='F')
|
| 153 |
+
R2[:j+1,:] = R
|
| 154 |
+
|
| 155 |
+
Q, R = qr_insert(Q2, R2, hcur, j, which='col',
|
| 156 |
+
overwrite_qru=True, check_finite=False)
|
| 157 |
+
|
| 158 |
+
# Transformed least squares problem
|
| 159 |
+
# || Q R y - inner_res_0 * e_1 ||_2 = min!
|
| 160 |
+
# Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0]
|
| 161 |
+
|
| 162 |
+
# Residual is immediately known
|
| 163 |
+
res = abs(Q[0,-1])
|
| 164 |
+
|
| 165 |
+
# Check for termination
|
| 166 |
+
if res < atol or breakdown:
|
| 167 |
+
break
|
| 168 |
+
|
| 169 |
+
if not np.isfinite(R[j,j]):
|
| 170 |
+
# nans encountered, bail out
|
| 171 |
+
raise LinAlgError()
|
| 172 |
+
|
| 173 |
+
# -- Get the LSQ problem solution
|
| 174 |
+
|
| 175 |
+
# The problem is triangular, but the condition number may be
|
| 176 |
+
# bad (or in case of breakdown the last diagonal entry may be
|
| 177 |
+
# zero), so use lstsq instead of trtrs.
|
| 178 |
+
y, _, _, _, = lstsq(R[:j+1,:j+1], Q[0,:j+1].conj())
|
| 179 |
+
|
| 180 |
+
B = B[:,:j+1]
|
| 181 |
+
|
| 182 |
+
return Q, R, B, vs, zs, y, res
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
@_deprecate_positional_args(version="1.14.0")
|
| 186 |
+
def gcrotmk(A, b, x0=None, *, tol=_NoValue, maxiter=1000, M=None, callback=None,
|
| 187 |
+
m=20, k=None, CU=None, discard_C=False, truncate='oldest',
|
| 188 |
+
atol=None, rtol=1e-5):
|
| 189 |
+
"""
|
| 190 |
+
Solve a matrix equation using flexible GCROT(m,k) algorithm.
|
| 191 |
+
|
| 192 |
+
Parameters
|
| 193 |
+
----------
|
| 194 |
+
A : {sparse matrix, ndarray, LinearOperator}
|
| 195 |
+
The real or complex N-by-N matrix of the linear system.
|
| 196 |
+
Alternatively, ``A`` can be a linear operator which can
|
| 197 |
+
produce ``Ax`` using, e.g.,
|
| 198 |
+
``scipy.sparse.linalg.LinearOperator``.
|
| 199 |
+
b : ndarray
|
| 200 |
+
Right hand side of the linear system. Has shape (N,) or (N,1).
|
| 201 |
+
x0 : ndarray
|
| 202 |
+
Starting guess for the solution.
|
| 203 |
+
rtol, atol : float, optional
|
| 204 |
+
Parameters for the convergence test. For convergence,
|
| 205 |
+
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
|
| 206 |
+
The default is ``rtol=1e-5``, the default for ``atol`` is ``rtol``.
|
| 207 |
+
|
| 208 |
+
.. warning::
|
| 209 |
+
|
| 210 |
+
The default value for ``atol`` will be changed to ``0.0`` in
|
| 211 |
+
SciPy 1.14.0.
|
| 212 |
+
maxiter : int, optional
|
| 213 |
+
Maximum number of iterations. Iteration will stop after maxiter
|
| 214 |
+
steps even if the specified tolerance has not been achieved.
|
| 215 |
+
M : {sparse matrix, ndarray, LinearOperator}, optional
|
| 216 |
+
Preconditioner for A. The preconditioner should approximate the
|
| 217 |
+
inverse of A. gcrotmk is a 'flexible' algorithm and the preconditioner
|
| 218 |
+
can vary from iteration to iteration. Effective preconditioning
|
| 219 |
+
dramatically improves the rate of convergence, which implies that
|
| 220 |
+
fewer iterations are needed to reach a given error tolerance.
|
| 221 |
+
callback : function, optional
|
| 222 |
+
User-supplied function to call after each iteration. It is called
|
| 223 |
+
as callback(xk), where xk is the current solution vector.
|
| 224 |
+
m : int, optional
|
| 225 |
+
Number of inner FGMRES iterations per each outer iteration.
|
| 226 |
+
Default: 20
|
| 227 |
+
k : int, optional
|
| 228 |
+
Number of vectors to carry between inner FGMRES iterations.
|
| 229 |
+
According to [2]_, good values are around m.
|
| 230 |
+
Default: m
|
| 231 |
+
CU : list of tuples, optional
|
| 232 |
+
List of tuples ``(c, u)`` which contain the columns of the matrices
|
| 233 |
+
C and U in the GCROT(m,k) algorithm. For details, see [2]_.
|
| 234 |
+
The list given and vectors contained in it are modified in-place.
|
| 235 |
+
If not given, start from empty matrices. The ``c`` elements in the
|
| 236 |
+
tuples can be ``None``, in which case the vectors are recomputed
|
| 237 |
+
via ``c = A u`` on start and orthogonalized as described in [3]_.
|
| 238 |
+
discard_C : bool, optional
|
| 239 |
+
Discard the C-vectors at the end. Useful if recycling Krylov subspaces
|
| 240 |
+
for different linear systems.
|
| 241 |
+
truncate : {'oldest', 'smallest'}, optional
|
| 242 |
+
Truncation scheme to use. Drop: oldest vectors, or vectors with
|
| 243 |
+
smallest singular values using the scheme discussed in [1,2].
|
| 244 |
+
See [2]_ for detailed comparison.
|
| 245 |
+
Default: 'oldest'
|
| 246 |
+
tol : float, optional, deprecated
|
| 247 |
+
|
| 248 |
+
.. deprecated:: 1.12.0
|
| 249 |
+
`gcrotmk` keyword argument ``tol`` is deprecated in favor of
|
| 250 |
+
``rtol`` and will be removed in SciPy 1.14.0.
|
| 251 |
+
|
| 252 |
+
Returns
|
| 253 |
+
-------
|
| 254 |
+
x : ndarray
|
| 255 |
+
The solution found.
|
| 256 |
+
info : int
|
| 257 |
+
Provides convergence information:
|
| 258 |
+
|
| 259 |
+
* 0 : successful exit
|
| 260 |
+
* >0 : convergence to tolerance not achieved, number of iterations
|
| 261 |
+
|
| 262 |
+
Examples
|
| 263 |
+
--------
|
| 264 |
+
>>> import numpy as np
|
| 265 |
+
>>> from scipy.sparse import csc_matrix
|
| 266 |
+
>>> from scipy.sparse.linalg import gcrotmk
|
| 267 |
+
>>> R = np.random.randn(5, 5)
|
| 268 |
+
>>> A = csc_matrix(R)
|
| 269 |
+
>>> b = np.random.randn(5)
|
| 270 |
+
>>> x, exit_code = gcrotmk(A, b, atol=1e-5)
|
| 271 |
+
>>> print(exit_code)
|
| 272 |
+
0
|
| 273 |
+
>>> np.allclose(A.dot(x), b)
|
| 274 |
+
True
|
| 275 |
+
|
| 276 |
+
References
|
| 277 |
+
----------
|
| 278 |
+
.. [1] E. de Sturler, ''Truncation strategies for optimal Krylov subspace
|
| 279 |
+
methods'', SIAM J. Numer. Anal. 36, 864 (1999).
|
| 280 |
+
.. [2] J.E. Hicken and D.W. Zingg, ''A simplified and flexible variant
|
| 281 |
+
of GCROT for solving nonsymmetric linear systems'',
|
| 282 |
+
SIAM J. Sci. Comput. 32, 172 (2010).
|
| 283 |
+
.. [3] M.L. Parks, E. de Sturler, G. Mackey, D.D. Johnson, S. Maiti,
|
| 284 |
+
''Recycling Krylov subspaces for sequences of linear systems'',
|
| 285 |
+
SIAM J. Sci. Comput. 28, 1651 (2006).
|
| 286 |
+
|
| 287 |
+
"""
|
| 288 |
+
A,M,x,b,postprocess = make_system(A,M,x0,b)
|
| 289 |
+
|
| 290 |
+
if not np.isfinite(b).all():
|
| 291 |
+
raise ValueError("RHS must contain only finite numbers")
|
| 292 |
+
|
| 293 |
+
if truncate not in ('oldest', 'smallest'):
|
| 294 |
+
raise ValueError(f"Invalid value for 'truncate': {truncate!r}")
|
| 295 |
+
|
| 296 |
+
matvec = A.matvec
|
| 297 |
+
psolve = M.matvec
|
| 298 |
+
|
| 299 |
+
if CU is None:
|
| 300 |
+
CU = []
|
| 301 |
+
|
| 302 |
+
if k is None:
|
| 303 |
+
k = m
|
| 304 |
+
|
| 305 |
+
axpy, dot, scal = None, None, None
|
| 306 |
+
|
| 307 |
+
if x0 is None:
|
| 308 |
+
r = b.copy()
|
| 309 |
+
else:
|
| 310 |
+
r = b - matvec(x)
|
| 311 |
+
|
| 312 |
+
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (x, r))
|
| 313 |
+
|
| 314 |
+
b_norm = nrm2(b)
|
| 315 |
+
|
| 316 |
+
# we call this to get the right atol/rtol and raise warnings as necessary
|
| 317 |
+
atol, rtol = _get_atol_rtol('gcrotmk', b_norm, tol, atol, rtol)
|
| 318 |
+
|
| 319 |
+
if b_norm == 0:
|
| 320 |
+
x = b
|
| 321 |
+
return (postprocess(x), 0)
|
| 322 |
+
|
| 323 |
+
if discard_C:
|
| 324 |
+
CU[:] = [(None, u) for c, u in CU]
|
| 325 |
+
|
| 326 |
+
# Reorthogonalize old vectors
|
| 327 |
+
if CU:
|
| 328 |
+
# Sort already existing vectors to the front
|
| 329 |
+
CU.sort(key=lambda cu: cu[0] is not None)
|
| 330 |
+
|
| 331 |
+
# Fill-in missing ones
|
| 332 |
+
C = np.empty((A.shape[0], len(CU)), dtype=r.dtype, order='F')
|
| 333 |
+
us = []
|
| 334 |
+
j = 0
|
| 335 |
+
while CU:
|
| 336 |
+
# More memory-efficient: throw away old vectors as we go
|
| 337 |
+
c, u = CU.pop(0)
|
| 338 |
+
if c is None:
|
| 339 |
+
c = matvec(u)
|
| 340 |
+
C[:,j] = c
|
| 341 |
+
j += 1
|
| 342 |
+
us.append(u)
|
| 343 |
+
|
| 344 |
+
# Orthogonalize
|
| 345 |
+
Q, R, P = qr(C, overwrite_a=True, mode='economic', pivoting=True)
|
| 346 |
+
del C
|
| 347 |
+
|
| 348 |
+
# C := Q
|
| 349 |
+
cs = list(Q.T)
|
| 350 |
+
|
| 351 |
+
# U := U P R^-1, back-substitution
|
| 352 |
+
new_us = []
|
| 353 |
+
for j in range(len(cs)):
|
| 354 |
+
u = us[P[j]]
|
| 355 |
+
for i in range(j):
|
| 356 |
+
u = axpy(us[P[i]], u, u.shape[0], -R[i,j])
|
| 357 |
+
if abs(R[j,j]) < 1e-12 * abs(R[0,0]):
|
| 358 |
+
# discard rest of the vectors
|
| 359 |
+
break
|
| 360 |
+
u = scal(1.0/R[j,j], u)
|
| 361 |
+
new_us.append(u)
|
| 362 |
+
|
| 363 |
+
# Form the new CU lists
|
| 364 |
+
CU[:] = list(zip(cs, new_us))[::-1]
|
| 365 |
+
|
| 366 |
+
if CU:
|
| 367 |
+
axpy, dot = get_blas_funcs(['axpy', 'dot'], (r,))
|
| 368 |
+
|
| 369 |
+
# Solve first the projection operation with respect to the CU
|
| 370 |
+
# vectors. This corresponds to modifying the initial guess to
|
| 371 |
+
# be
|
| 372 |
+
#
|
| 373 |
+
# x' = x + U y
|
| 374 |
+
# y = argmin_y || b - A (x + U y) ||^2
|
| 375 |
+
#
|
| 376 |
+
# The solution is y = C^H (b - A x)
|
| 377 |
+
for c, u in CU:
|
| 378 |
+
yc = dot(c, r)
|
| 379 |
+
x = axpy(u, x, x.shape[0], yc)
|
| 380 |
+
r = axpy(c, r, r.shape[0], -yc)
|
| 381 |
+
|
| 382 |
+
# GCROT main iteration
|
| 383 |
+
for j_outer in range(maxiter):
|
| 384 |
+
# -- callback
|
| 385 |
+
if callback is not None:
|
| 386 |
+
callback(x)
|
| 387 |
+
|
| 388 |
+
beta = nrm2(r)
|
| 389 |
+
|
| 390 |
+
# -- check stopping condition
|
| 391 |
+
beta_tol = max(atol, rtol * b_norm)
|
| 392 |
+
|
| 393 |
+
if beta <= beta_tol and (j_outer > 0 or CU):
|
| 394 |
+
# recompute residual to avoid rounding error
|
| 395 |
+
r = b - matvec(x)
|
| 396 |
+
beta = nrm2(r)
|
| 397 |
+
|
| 398 |
+
if beta <= beta_tol:
|
| 399 |
+
j_outer = -1
|
| 400 |
+
break
|
| 401 |
+
|
| 402 |
+
ml = m + max(k - len(CU), 0)
|
| 403 |
+
|
| 404 |
+
cs = [c for c, u in CU]
|
| 405 |
+
|
| 406 |
+
try:
|
| 407 |
+
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
|
| 408 |
+
r/beta,
|
| 409 |
+
ml,
|
| 410 |
+
rpsolve=psolve,
|
| 411 |
+
atol=max(atol, rtol*b_norm)/beta,
|
| 412 |
+
cs=cs)
|
| 413 |
+
y *= beta
|
| 414 |
+
except LinAlgError:
|
| 415 |
+
# Floating point over/underflow, non-finite result from
|
| 416 |
+
# matmul etc. -- report failure.
|
| 417 |
+
break
|
| 418 |
+
|
| 419 |
+
#
|
| 420 |
+
# At this point,
|
| 421 |
+
#
|
| 422 |
+
# [A U, A Z] = [C, V] G; G = [ I B ]
|
| 423 |
+
# [ 0 H ]
|
| 424 |
+
#
|
| 425 |
+
# where [C, V] has orthonormal columns, and r = beta v_0. Moreover,
|
| 426 |
+
#
|
| 427 |
+
# || b - A (x + Z y + U q) ||_2 = || r - C B y - V H y - C q ||_2 = min!
|
| 428 |
+
#
|
| 429 |
+
# from which y = argmin_y || beta e_1 - H y ||_2, and q = -B y
|
| 430 |
+
#
|
| 431 |
+
|
| 432 |
+
#
|
| 433 |
+
# GCROT(m,k) update
|
| 434 |
+
#
|
| 435 |
+
|
| 436 |
+
# Define new outer vectors
|
| 437 |
+
|
| 438 |
+
# ux := (Z - U B) y
|
| 439 |
+
ux = zs[0]*y[0]
|
| 440 |
+
for z, yc in zip(zs[1:], y[1:]):
|
| 441 |
+
ux = axpy(z, ux, ux.shape[0], yc) # ux += z*yc
|
| 442 |
+
by = B.dot(y)
|
| 443 |
+
for cu, byc in zip(CU, by):
|
| 444 |
+
c, u = cu
|
| 445 |
+
ux = axpy(u, ux, ux.shape[0], -byc) # ux -= u*byc
|
| 446 |
+
|
| 447 |
+
# cx := V H y
|
| 448 |
+
hy = Q.dot(R.dot(y))
|
| 449 |
+
cx = vs[0] * hy[0]
|
| 450 |
+
for v, hyc in zip(vs[1:], hy[1:]):
|
| 451 |
+
cx = axpy(v, cx, cx.shape[0], hyc) # cx += v*hyc
|
| 452 |
+
|
| 453 |
+
# Normalize cx, maintaining cx = A ux
|
| 454 |
+
# This new cx is orthogonal to the previous C, by construction
|
| 455 |
+
try:
|
| 456 |
+
alpha = 1/nrm2(cx)
|
| 457 |
+
if not np.isfinite(alpha):
|
| 458 |
+
raise FloatingPointError()
|
| 459 |
+
except (FloatingPointError, ZeroDivisionError):
|
| 460 |
+
# Cannot update, so skip it
|
| 461 |
+
continue
|
| 462 |
+
|
| 463 |
+
cx = scal(alpha, cx)
|
| 464 |
+
ux = scal(alpha, ux)
|
| 465 |
+
|
| 466 |
+
# Update residual and solution
|
| 467 |
+
gamma = dot(cx, r)
|
| 468 |
+
r = axpy(cx, r, r.shape[0], -gamma) # r -= gamma*cx
|
| 469 |
+
x = axpy(ux, x, x.shape[0], gamma) # x += gamma*ux
|
| 470 |
+
|
| 471 |
+
# Truncate CU
|
| 472 |
+
if truncate == 'oldest':
|
| 473 |
+
while len(CU) >= k and CU:
|
| 474 |
+
del CU[0]
|
| 475 |
+
elif truncate == 'smallest':
|
| 476 |
+
if len(CU) >= k and CU:
|
| 477 |
+
# cf. [1,2]
|
| 478 |
+
D = solve(R[:-1,:].T, B.T).T
|
| 479 |
+
W, sigma, V = svd(D)
|
| 480 |
+
|
| 481 |
+
# C := C W[:,:k-1], U := U W[:,:k-1]
|
| 482 |
+
new_CU = []
|
| 483 |
+
for j, w in enumerate(W[:,:k-1].T):
|
| 484 |
+
c, u = CU[0]
|
| 485 |
+
c = c * w[0]
|
| 486 |
+
u = u * w[0]
|
| 487 |
+
for cup, wp in zip(CU[1:], w[1:]):
|
| 488 |
+
cp, up = cup
|
| 489 |
+
c = axpy(cp, c, c.shape[0], wp)
|
| 490 |
+
u = axpy(up, u, u.shape[0], wp)
|
| 491 |
+
|
| 492 |
+
# Reorthogonalize at the same time; not necessary
|
| 493 |
+
# in exact arithmetic, but floating point error
|
| 494 |
+
# tends to accumulate here
|
| 495 |
+
for cp, up in new_CU:
|
| 496 |
+
alpha = dot(cp, c)
|
| 497 |
+
c = axpy(cp, c, c.shape[0], -alpha)
|
| 498 |
+
u = axpy(up, u, u.shape[0], -alpha)
|
| 499 |
+
alpha = nrm2(c)
|
| 500 |
+
c = scal(1.0/alpha, c)
|
| 501 |
+
u = scal(1.0/alpha, u)
|
| 502 |
+
|
| 503 |
+
new_CU.append((c, u))
|
| 504 |
+
CU[:] = new_CU
|
| 505 |
+
|
| 506 |
+
# Add new vector to CU
|
| 507 |
+
CU.append((cx, ux))
|
| 508 |
+
|
| 509 |
+
# Include the solution vector to the span
|
| 510 |
+
CU.append((None, x.copy()))
|
| 511 |
+
if discard_C:
|
| 512 |
+
CU[:] = [(None, uz) for cz, uz in CU]
|
| 513 |
+
|
| 514 |
+
return postprocess(x), j_outer + 1
|
.venv/Lib/site-packages/scipy/sparse/linalg/_isolve/iterative.py
ADDED
|
@@ -0,0 +1,1079 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
import numpy as np
|
| 3 |
+
from scipy.sparse.linalg._interface import LinearOperator
|
| 4 |
+
from .utils import make_system
|
| 5 |
+
from scipy.linalg import get_lapack_funcs
|
| 6 |
+
from scipy._lib.deprecation import _NoValue, _deprecate_positional_args
|
| 7 |
+
|
| 8 |
+
__all__ = ['bicg', 'bicgstab', 'cg', 'cgs', 'gmres', 'qmr']
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _get_atol_rtol(name, b_norm, tol=_NoValue, atol=0., rtol=1e-5):
|
| 12 |
+
"""
|
| 13 |
+
A helper function to handle tolerance deprecations and normalization
|
| 14 |
+
"""
|
| 15 |
+
if tol is not _NoValue:
|
| 16 |
+
msg = (f"'scipy.sparse.linalg.{name}' keyword argument `tol` is "
|
| 17 |
+
"deprecated in favor of `rtol` and will be removed in SciPy "
|
| 18 |
+
"v1.14.0. Until then, if set, it will override `rtol`.")
|
| 19 |
+
warnings.warn(msg, category=DeprecationWarning, stacklevel=4)
|
| 20 |
+
rtol = float(tol) if tol is not None else rtol
|
| 21 |
+
|
| 22 |
+
if atol == 'legacy':
|
| 23 |
+
msg = (f"'scipy.sparse.linalg.{name}' called with `atol='legacy'`. "
|
| 24 |
+
"This behavior is deprecated and will result in an error in "
|
| 25 |
+
"SciPy v1.14.0. To preserve current behaviour, set `atol=0.0`.")
|
| 26 |
+
warnings.warn(msg, category=DeprecationWarning, stacklevel=4)
|
| 27 |
+
atol = 0
|
| 28 |
+
|
| 29 |
+
# this branch is only hit from gcrotmk/lgmres/tfqmr
|
| 30 |
+
if atol is None:
|
| 31 |
+
msg = (f"'scipy.sparse.linalg.{name}' called without specifying "
|
| 32 |
+
"`atol`. This behavior is deprecated and will result in an "
|
| 33 |
+
"error in SciPy v1.14.0. To preserve current behaviour, set "
|
| 34 |
+
"`atol=rtol`, or, to adopt the future default, set `atol=0.0`.")
|
| 35 |
+
warnings.warn(msg, category=DeprecationWarning, stacklevel=4)
|
| 36 |
+
atol = rtol
|
| 37 |
+
|
| 38 |
+
atol = max(float(atol), float(rtol) * float(b_norm))
|
| 39 |
+
|
| 40 |
+
return atol, rtol
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@_deprecate_positional_args(version="1.14")
|
| 44 |
+
def bicg(A, b, x0=None, *, tol=_NoValue, maxiter=None, M=None, callback=None,
|
| 45 |
+
atol=0., rtol=1e-5):
|
| 46 |
+
"""Use BIConjugate Gradient iteration to solve ``Ax = b``.
|
| 47 |
+
|
| 48 |
+
Parameters
|
| 49 |
+
----------
|
| 50 |
+
A : {sparse matrix, ndarray, LinearOperator}
|
| 51 |
+
The real or complex N-by-N matrix of the linear system.
|
| 52 |
+
Alternatively, ``A`` can be a linear operator which can
|
| 53 |
+
produce ``Ax`` and ``A^T x`` using, e.g.,
|
| 54 |
+
``scipy.sparse.linalg.LinearOperator``.
|
| 55 |
+
b : ndarray
|
| 56 |
+
Right hand side of the linear system. Has shape (N,) or (N,1).
|
| 57 |
+
x0 : ndarray
|
| 58 |
+
Starting guess for the solution.
|
| 59 |
+
rtol, atol : float, optional
|
| 60 |
+
Parameters for the convergence test. For convergence,
|
| 61 |
+
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
|
| 62 |
+
The default is ``atol=0.`` and ``rtol=1e-5``.
|
| 63 |
+
maxiter : integer
|
| 64 |
+
Maximum number of iterations. Iteration will stop after maxiter
|
| 65 |
+
steps even if the specified tolerance has not been achieved.
|
| 66 |
+
M : {sparse matrix, ndarray, LinearOperator}
|
| 67 |
+
Preconditioner for A. The preconditioner should approximate the
|
| 68 |
+
inverse of A. Effective preconditioning dramatically improves the
|
| 69 |
+
rate of convergence, which implies that fewer iterations are needed
|
| 70 |
+
to reach a given error tolerance.
|
| 71 |
+
callback : function
|
| 72 |
+
User-supplied function to call after each iteration. It is called
|
| 73 |
+
as callback(xk), where xk is the current solution vector.
|
| 74 |
+
tol : float, optional, deprecated
|
| 75 |
+
|
| 76 |
+
.. deprecated:: 1.12.0
|
| 77 |
+
`bicg` keyword argument ``tol`` is deprecated in favor of ``rtol``
|
| 78 |
+
and will be removed in SciPy 1.14.0.
|
| 79 |
+
|
| 80 |
+
Returns
|
| 81 |
+
-------
|
| 82 |
+
x : ndarray
|
| 83 |
+
The converged solution.
|
| 84 |
+
info : integer
|
| 85 |
+
Provides convergence information:
|
| 86 |
+
0 : successful exit
|
| 87 |
+
>0 : convergence to tolerance not achieved, number of iterations
|
| 88 |
+
<0 : parameter breakdown
|
| 89 |
+
|
| 90 |
+
Examples
|
| 91 |
+
--------
|
| 92 |
+
>>> import numpy as np
|
| 93 |
+
>>> from scipy.sparse import csc_matrix
|
| 94 |
+
>>> from scipy.sparse.linalg import bicg
|
| 95 |
+
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1.]])
|
| 96 |
+
>>> b = np.array([2., 4., -1.])
|
| 97 |
+
>>> x, exitCode = bicg(A, b, atol=1e-5)
|
| 98 |
+
>>> print(exitCode) # 0 indicates successful convergence
|
| 99 |
+
0
|
| 100 |
+
>>> np.allclose(A.dot(x), b)
|
| 101 |
+
True
|
| 102 |
+
|
| 103 |
+
"""
|
| 104 |
+
A, M, x, b, postprocess = make_system(A, M, x0, b)
|
| 105 |
+
bnrm2 = np.linalg.norm(b)
|
| 106 |
+
|
| 107 |
+
atol, _ = _get_atol_rtol('bicg', bnrm2, tol, atol, rtol)
|
| 108 |
+
|
| 109 |
+
if bnrm2 == 0:
|
| 110 |
+
return postprocess(b), 0
|
| 111 |
+
|
| 112 |
+
n = len(b)
|
| 113 |
+
dotprod = np.vdot if np.iscomplexobj(x) else np.dot
|
| 114 |
+
|
| 115 |
+
if maxiter is None:
|
| 116 |
+
maxiter = n*10
|
| 117 |
+
|
| 118 |
+
matvec, rmatvec = A.matvec, A.rmatvec
|
| 119 |
+
psolve, rpsolve = M.matvec, M.rmatvec
|
| 120 |
+
|
| 121 |
+
rhotol = np.finfo(x.dtype.char).eps**2
|
| 122 |
+
|
| 123 |
+
# Dummy values to initialize vars, silence linter warnings
|
| 124 |
+
rho_prev, p, ptilde = None, None, None
|
| 125 |
+
|
| 126 |
+
r = b - matvec(x) if x.any() else b.copy()
|
| 127 |
+
rtilde = r.copy()
|
| 128 |
+
|
| 129 |
+
for iteration in range(maxiter):
|
| 130 |
+
if np.linalg.norm(r) < atol: # Are we done?
|
| 131 |
+
return postprocess(x), 0
|
| 132 |
+
|
| 133 |
+
z = psolve(r)
|
| 134 |
+
ztilde = rpsolve(rtilde)
|
| 135 |
+
# order matters in this dot product
|
| 136 |
+
rho_cur = dotprod(rtilde, z)
|
| 137 |
+
|
| 138 |
+
if np.abs(rho_cur) < rhotol: # Breakdown case
|
| 139 |
+
return postprocess, -10
|
| 140 |
+
|
| 141 |
+
if iteration > 0:
|
| 142 |
+
beta = rho_cur / rho_prev
|
| 143 |
+
p *= beta
|
| 144 |
+
p += z
|
| 145 |
+
ptilde *= beta.conj()
|
| 146 |
+
ptilde += ztilde
|
| 147 |
+
else: # First spin
|
| 148 |
+
p = z.copy()
|
| 149 |
+
ptilde = ztilde.copy()
|
| 150 |
+
|
| 151 |
+
q = matvec(p)
|
| 152 |
+
qtilde = rmatvec(ptilde)
|
| 153 |
+
rv = dotprod(ptilde, q)
|
| 154 |
+
|
| 155 |
+
if rv == 0:
|
| 156 |
+
return postprocess(x), -11
|
| 157 |
+
|
| 158 |
+
alpha = rho_cur / rv
|
| 159 |
+
x += alpha*p
|
| 160 |
+
r -= alpha*q
|
| 161 |
+
rtilde -= alpha.conj()*qtilde
|
| 162 |
+
rho_prev = rho_cur
|
| 163 |
+
|
| 164 |
+
if callback:
|
| 165 |
+
callback(x)
|
| 166 |
+
|
| 167 |
+
else: # for loop exhausted
|
| 168 |
+
# Return incomplete progress
|
| 169 |
+
return postprocess(x), maxiter
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
@_deprecate_positional_args(version="1.14")
|
| 173 |
+
def bicgstab(A, b, *, x0=None, tol=_NoValue, maxiter=None, M=None,
|
| 174 |
+
callback=None, atol=0., rtol=1e-5):
|
| 175 |
+
"""Use BIConjugate Gradient STABilized iteration to solve ``Ax = b``.
|
| 176 |
+
|
| 177 |
+
Parameters
|
| 178 |
+
----------
|
| 179 |
+
A : {sparse matrix, ndarray, LinearOperator}
|
| 180 |
+
The real or complex N-by-N matrix of the linear system.
|
| 181 |
+
Alternatively, ``A`` can be a linear operator which can
|
| 182 |
+
produce ``Ax`` and ``A^T x`` using, e.g.,
|
| 183 |
+
``scipy.sparse.linalg.LinearOperator``.
|
| 184 |
+
b : ndarray
|
| 185 |
+
Right hand side of the linear system. Has shape (N,) or (N,1).
|
| 186 |
+
x0 : ndarray
|
| 187 |
+
Starting guess for the solution.
|
| 188 |
+
rtol, atol : float, optional
|
| 189 |
+
Parameters for the convergence test. For convergence,
|
| 190 |
+
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
|
| 191 |
+
The default is ``atol=0.`` and ``rtol=1e-5``.
|
| 192 |
+
maxiter : integer
|
| 193 |
+
Maximum number of iterations. Iteration will stop after maxiter
|
| 194 |
+
steps even if the specified tolerance has not been achieved.
|
| 195 |
+
M : {sparse matrix, ndarray, LinearOperator}
|
| 196 |
+
Preconditioner for A. The preconditioner should approximate the
|
| 197 |
+
inverse of A. Effective preconditioning dramatically improves the
|
| 198 |
+
rate of convergence, which implies that fewer iterations are needed
|
| 199 |
+
to reach a given error tolerance.
|
| 200 |
+
callback : function
|
| 201 |
+
User-supplied function to call after each iteration. It is called
|
| 202 |
+
as callback(xk), where xk is the current solution vector.
|
| 203 |
+
tol : float, optional, deprecated
|
| 204 |
+
|
| 205 |
+
.. deprecated:: 1.12.0
|
| 206 |
+
`bicgstab` keyword argument ``tol`` is deprecated in favor of
|
| 207 |
+
``rtol`` and will be removed in SciPy 1.14.0.
|
| 208 |
+
|
| 209 |
+
Returns
|
| 210 |
+
-------
|
| 211 |
+
x : ndarray
|
| 212 |
+
The converged solution.
|
| 213 |
+
info : integer
|
| 214 |
+
Provides convergence information:
|
| 215 |
+
0 : successful exit
|
| 216 |
+
>0 : convergence to tolerance not achieved, number of iterations
|
| 217 |
+
<0 : parameter breakdown
|
| 218 |
+
|
| 219 |
+
Examples
|
| 220 |
+
--------
|
| 221 |
+
>>> import numpy as np
|
| 222 |
+
>>> from scipy.sparse import csc_matrix
|
| 223 |
+
>>> from scipy.sparse.linalg import bicgstab
|
| 224 |
+
>>> R = np.array([[4, 2, 0, 1],
|
| 225 |
+
... [3, 0, 0, 2],
|
| 226 |
+
... [0, 1, 1, 1],
|
| 227 |
+
... [0, 2, 1, 0]])
|
| 228 |
+
>>> A = csc_matrix(R)
|
| 229 |
+
>>> b = np.array([-1, -0.5, -1, 2])
|
| 230 |
+
>>> x, exit_code = bicgstab(A, b, atol=1e-5)
|
| 231 |
+
>>> print(exit_code) # 0 indicates successful convergence
|
| 232 |
+
0
|
| 233 |
+
>>> np.allclose(A.dot(x), b)
|
| 234 |
+
True
|
| 235 |
+
|
| 236 |
+
"""
|
| 237 |
+
A, M, x, b, postprocess = make_system(A, M, x0, b)
|
| 238 |
+
bnrm2 = np.linalg.norm(b)
|
| 239 |
+
|
| 240 |
+
atol, _ = _get_atol_rtol('bicgstab', bnrm2, tol, atol, rtol)
|
| 241 |
+
|
| 242 |
+
if bnrm2 == 0:
|
| 243 |
+
return postprocess(b), 0
|
| 244 |
+
|
| 245 |
+
n = len(b)
|
| 246 |
+
|
| 247 |
+
dotprod = np.vdot if np.iscomplexobj(x) else np.dot
|
| 248 |
+
|
| 249 |
+
if maxiter is None:
|
| 250 |
+
maxiter = n*10
|
| 251 |
+
|
| 252 |
+
matvec = A.matvec
|
| 253 |
+
psolve = M.matvec
|
| 254 |
+
|
| 255 |
+
# These values make no sense but coming from original Fortran code
|
| 256 |
+
# sqrt might have been meant instead.
|
| 257 |
+
rhotol = np.finfo(x.dtype.char).eps**2
|
| 258 |
+
omegatol = rhotol
|
| 259 |
+
|
| 260 |
+
# Dummy values to initialize vars, silence linter warnings
|
| 261 |
+
rho_prev, omega, alpha, p, v = None, None, None, None, None
|
| 262 |
+
|
| 263 |
+
r = b - matvec(x) if x.any() else b.copy()
|
| 264 |
+
rtilde = r.copy()
|
| 265 |
+
|
| 266 |
+
for iteration in range(maxiter):
|
| 267 |
+
if np.linalg.norm(r) < atol: # Are we done?
|
| 268 |
+
return postprocess(x), 0
|
| 269 |
+
|
| 270 |
+
rho = dotprod(rtilde, r)
|
| 271 |
+
if np.abs(rho) < rhotol: # rho breakdown
|
| 272 |
+
return postprocess(x), -10
|
| 273 |
+
|
| 274 |
+
if iteration > 0:
|
| 275 |
+
if np.abs(omega) < omegatol: # omega breakdown
|
| 276 |
+
return postprocess(x), -11
|
| 277 |
+
|
| 278 |
+
beta = (rho / rho_prev) * (alpha / omega)
|
| 279 |
+
p -= omega*v
|
| 280 |
+
p *= beta
|
| 281 |
+
p += r
|
| 282 |
+
else: # First spin
|
| 283 |
+
s = np.empty_like(r)
|
| 284 |
+
p = r.copy()
|
| 285 |
+
|
| 286 |
+
phat = psolve(p)
|
| 287 |
+
v = matvec(phat)
|
| 288 |
+
rv = dotprod(rtilde, v)
|
| 289 |
+
if rv == 0:
|
| 290 |
+
return postprocess(x), -11
|
| 291 |
+
alpha = rho / rv
|
| 292 |
+
r -= alpha*v
|
| 293 |
+
s[:] = r[:]
|
| 294 |
+
|
| 295 |
+
if np.linalg.norm(s) < atol:
|
| 296 |
+
x += alpha*phat
|
| 297 |
+
return postprocess(x), 0
|
| 298 |
+
|
| 299 |
+
shat = psolve(s)
|
| 300 |
+
t = matvec(shat)
|
| 301 |
+
omega = dotprod(t, s) / dotprod(t, t)
|
| 302 |
+
x += alpha*phat
|
| 303 |
+
x += omega*shat
|
| 304 |
+
r -= omega*t
|
| 305 |
+
rho_prev = rho
|
| 306 |
+
|
| 307 |
+
if callback:
|
| 308 |
+
callback(x)
|
| 309 |
+
|
| 310 |
+
else: # for loop exhausted
|
| 311 |
+
# Return incomplete progress
|
| 312 |
+
return postprocess(x), maxiter
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
@_deprecate_positional_args(version="1.14")
|
| 316 |
+
def cg(A, b, x0=None, *, tol=_NoValue, maxiter=None, M=None, callback=None,
|
| 317 |
+
atol=0., rtol=1e-5):
|
| 318 |
+
"""Use Conjugate Gradient iteration to solve ``Ax = b``.
|
| 319 |
+
|
| 320 |
+
Parameters
|
| 321 |
+
----------
|
| 322 |
+
A : {sparse matrix, ndarray, LinearOperator}
|
| 323 |
+
The real or complex N-by-N matrix of the linear system.
|
| 324 |
+
``A`` must represent a hermitian, positive definite matrix.
|
| 325 |
+
Alternatively, ``A`` can be a linear operator which can
|
| 326 |
+
produce ``Ax`` using, e.g.,
|
| 327 |
+
``scipy.sparse.linalg.LinearOperator``.
|
| 328 |
+
b : ndarray
|
| 329 |
+
Right hand side of the linear system. Has shape (N,) or (N,1).
|
| 330 |
+
x0 : ndarray
|
| 331 |
+
Starting guess for the solution.
|
| 332 |
+
rtol, atol : float, optional
|
| 333 |
+
Parameters for the convergence test. For convergence,
|
| 334 |
+
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
|
| 335 |
+
The default is ``atol=0.`` and ``rtol=1e-5``.
|
| 336 |
+
maxiter : integer
|
| 337 |
+
Maximum number of iterations. Iteration will stop after maxiter
|
| 338 |
+
steps even if the specified tolerance has not been achieved.
|
| 339 |
+
M : {sparse matrix, ndarray, LinearOperator}
|
| 340 |
+
Preconditioner for A. The preconditioner should approximate the
|
| 341 |
+
inverse of A. Effective preconditioning dramatically improves the
|
| 342 |
+
rate of convergence, which implies that fewer iterations are needed
|
| 343 |
+
to reach a given error tolerance.
|
| 344 |
+
callback : function
|
| 345 |
+
User-supplied function to call after each iteration. It is called
|
| 346 |
+
as callback(xk), where xk is the current solution vector.
|
| 347 |
+
tol : float, optional, deprecated
|
| 348 |
+
|
| 349 |
+
.. deprecated:: 1.12.0
|
| 350 |
+
`cg` keyword argument ``tol`` is deprecated in favor of ``rtol`` and
|
| 351 |
+
will be removed in SciPy 1.14.0.
|
| 352 |
+
|
| 353 |
+
Returns
|
| 354 |
+
-------
|
| 355 |
+
x : ndarray
|
| 356 |
+
The converged solution.
|
| 357 |
+
info : integer
|
| 358 |
+
Provides convergence information:
|
| 359 |
+
0 : successful exit
|
| 360 |
+
>0 : convergence to tolerance not achieved, number of iterations
|
| 361 |
+
|
| 362 |
+
Examples
|
| 363 |
+
--------
|
| 364 |
+
>>> import numpy as np
|
| 365 |
+
>>> from scipy.sparse import csc_matrix
|
| 366 |
+
>>> from scipy.sparse.linalg import cg
|
| 367 |
+
>>> P = np.array([[4, 0, 1, 0],
|
| 368 |
+
... [0, 5, 0, 0],
|
| 369 |
+
... [1, 0, 3, 2],
|
| 370 |
+
... [0, 0, 2, 4]])
|
| 371 |
+
>>> A = csc_matrix(P)
|
| 372 |
+
>>> b = np.array([-1, -0.5, -1, 2])
|
| 373 |
+
>>> x, exit_code = cg(A, b, atol=1e-5)
|
| 374 |
+
>>> print(exit_code) # 0 indicates successful convergence
|
| 375 |
+
0
|
| 376 |
+
>>> np.allclose(A.dot(x), b)
|
| 377 |
+
True
|
| 378 |
+
|
| 379 |
+
"""
|
| 380 |
+
A, M, x, b, postprocess = make_system(A, M, x0, b)
|
| 381 |
+
bnrm2 = np.linalg.norm(b)
|
| 382 |
+
|
| 383 |
+
atol, _ = _get_atol_rtol('cg', bnrm2, tol, atol, rtol)
|
| 384 |
+
|
| 385 |
+
if bnrm2 == 0:
|
| 386 |
+
return postprocess(b), 0
|
| 387 |
+
|
| 388 |
+
n = len(b)
|
| 389 |
+
|
| 390 |
+
if maxiter is None:
|
| 391 |
+
maxiter = n*10
|
| 392 |
+
|
| 393 |
+
dotprod = np.vdot if np.iscomplexobj(x) else np.dot
|
| 394 |
+
|
| 395 |
+
matvec = A.matvec
|
| 396 |
+
psolve = M.matvec
|
| 397 |
+
r = b - matvec(x) if x.any() else b.copy()
|
| 398 |
+
|
| 399 |
+
# Dummy value to initialize var, silences warnings
|
| 400 |
+
rho_prev, p = None, None
|
| 401 |
+
|
| 402 |
+
for iteration in range(maxiter):
|
| 403 |
+
if np.linalg.norm(r) < atol: # Are we done?
|
| 404 |
+
return postprocess(x), 0
|
| 405 |
+
|
| 406 |
+
z = psolve(r)
|
| 407 |
+
rho_cur = dotprod(r, z)
|
| 408 |
+
if iteration > 0:
|
| 409 |
+
beta = rho_cur / rho_prev
|
| 410 |
+
p *= beta
|
| 411 |
+
p += z
|
| 412 |
+
else: # First spin
|
| 413 |
+
p = np.empty_like(r)
|
| 414 |
+
p[:] = z[:]
|
| 415 |
+
|
| 416 |
+
q = matvec(p)
|
| 417 |
+
alpha = rho_cur / dotprod(p, q)
|
| 418 |
+
x += alpha*p
|
| 419 |
+
r -= alpha*q
|
| 420 |
+
rho_prev = rho_cur
|
| 421 |
+
|
| 422 |
+
if callback:
|
| 423 |
+
callback(x)
|
| 424 |
+
|
| 425 |
+
else: # for loop exhausted
|
| 426 |
+
# Return incomplete progress
|
| 427 |
+
return postprocess(x), maxiter
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
@_deprecate_positional_args(version="1.14")
|
| 431 |
+
def cgs(A, b, x0=None, *, tol=_NoValue, maxiter=None, M=None, callback=None,
|
| 432 |
+
atol=0., rtol=1e-5):
|
| 433 |
+
"""Use Conjugate Gradient Squared iteration to solve ``Ax = b``.
|
| 434 |
+
|
| 435 |
+
Parameters
|
| 436 |
+
----------
|
| 437 |
+
A : {sparse matrix, ndarray, LinearOperator}
|
| 438 |
+
The real-valued N-by-N matrix of the linear system.
|
| 439 |
+
Alternatively, ``A`` can be a linear operator which can
|
| 440 |
+
produce ``Ax`` using, e.g.,
|
| 441 |
+
``scipy.sparse.linalg.LinearOperator``.
|
| 442 |
+
b : ndarray
|
| 443 |
+
Right hand side of the linear system. Has shape (N,) or (N,1).
|
| 444 |
+
x0 : ndarray
|
| 445 |
+
Starting guess for the solution.
|
| 446 |
+
rtol, atol : float, optional
|
| 447 |
+
Parameters for the convergence test. For convergence,
|
| 448 |
+
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
|
| 449 |
+
The default is ``atol=0.`` and ``rtol=1e-5``.
|
| 450 |
+
maxiter : integer
|
| 451 |
+
Maximum number of iterations. Iteration will stop after maxiter
|
| 452 |
+
steps even if the specified tolerance has not been achieved.
|
| 453 |
+
M : {sparse matrix, ndarray, LinearOperator}
|
| 454 |
+
Preconditioner for A. The preconditioner should approximate the
|
| 455 |
+
inverse of A. Effective preconditioning dramatically improves the
|
| 456 |
+
rate of convergence, which implies that fewer iterations are needed
|
| 457 |
+
to reach a given error tolerance.
|
| 458 |
+
callback : function
|
| 459 |
+
User-supplied function to call after each iteration. It is called
|
| 460 |
+
as callback(xk), where xk is the current solution vector.
|
| 461 |
+
tol : float, optional, deprecated
|
| 462 |
+
|
| 463 |
+
.. deprecated:: 1.12.0
|
| 464 |
+
`cgs` keyword argument ``tol`` is deprecated in favor of ``rtol``
|
| 465 |
+
and will be removed in SciPy 1.14.0.
|
| 466 |
+
|
| 467 |
+
Returns
|
| 468 |
+
-------
|
| 469 |
+
x : ndarray
|
| 470 |
+
The converged solution.
|
| 471 |
+
info : integer
|
| 472 |
+
Provides convergence information:
|
| 473 |
+
0 : successful exit
|
| 474 |
+
>0 : convergence to tolerance not achieved, number of iterations
|
| 475 |
+
<0 : parameter breakdown
|
| 476 |
+
|
| 477 |
+
Examples
|
| 478 |
+
--------
|
| 479 |
+
>>> import numpy as np
|
| 480 |
+
>>> from scipy.sparse import csc_matrix
|
| 481 |
+
>>> from scipy.sparse.linalg import cgs
|
| 482 |
+
>>> R = np.array([[4, 2, 0, 1],
|
| 483 |
+
... [3, 0, 0, 2],
|
| 484 |
+
... [0, 1, 1, 1],
|
| 485 |
+
... [0, 2, 1, 0]])
|
| 486 |
+
>>> A = csc_matrix(R)
|
| 487 |
+
>>> b = np.array([-1, -0.5, -1, 2])
|
| 488 |
+
>>> x, exit_code = cgs(A, b)
|
| 489 |
+
>>> print(exit_code) # 0 indicates successful convergence
|
| 490 |
+
0
|
| 491 |
+
>>> np.allclose(A.dot(x), b)
|
| 492 |
+
True
|
| 493 |
+
|
| 494 |
+
"""
|
| 495 |
+
A, M, x, b, postprocess = make_system(A, M, x0, b)
|
| 496 |
+
bnrm2 = np.linalg.norm(b)
|
| 497 |
+
|
| 498 |
+
atol, _ = _get_atol_rtol('cgs', bnrm2, tol, atol, rtol)
|
| 499 |
+
|
| 500 |
+
if bnrm2 == 0:
|
| 501 |
+
return postprocess(b), 0
|
| 502 |
+
|
| 503 |
+
n = len(b)
|
| 504 |
+
|
| 505 |
+
dotprod = np.vdot if np.iscomplexobj(x) else np.dot
|
| 506 |
+
|
| 507 |
+
if maxiter is None:
|
| 508 |
+
maxiter = n*10
|
| 509 |
+
|
| 510 |
+
matvec = A.matvec
|
| 511 |
+
psolve = M.matvec
|
| 512 |
+
|
| 513 |
+
rhotol = np.finfo(x.dtype.char).eps**2
|
| 514 |
+
|
| 515 |
+
r = b - matvec(x) if x.any() else b.copy()
|
| 516 |
+
|
| 517 |
+
rtilde = r.copy()
|
| 518 |
+
bnorm = np.linalg.norm(b)
|
| 519 |
+
if bnorm == 0:
|
| 520 |
+
bnorm = 1
|
| 521 |
+
|
| 522 |
+
# Dummy values to initialize vars, silence linter warnings
|
| 523 |
+
rho_prev, p, u, q = None, None, None, None
|
| 524 |
+
|
| 525 |
+
for iteration in range(maxiter):
|
| 526 |
+
rnorm = np.linalg.norm(r)
|
| 527 |
+
if rnorm < atol: # Are we done?
|
| 528 |
+
return postprocess(x), 0
|
| 529 |
+
|
| 530 |
+
rho_cur = dotprod(rtilde, r)
|
| 531 |
+
if np.abs(rho_cur) < rhotol: # Breakdown case
|
| 532 |
+
return postprocess, -10
|
| 533 |
+
|
| 534 |
+
if iteration > 0:
|
| 535 |
+
beta = rho_cur / rho_prev
|
| 536 |
+
|
| 537 |
+
# u = r + beta * q
|
| 538 |
+
# p = u + beta * (q + beta * p);
|
| 539 |
+
u[:] = r[:]
|
| 540 |
+
u += beta*q
|
| 541 |
+
|
| 542 |
+
p *= beta
|
| 543 |
+
p += q
|
| 544 |
+
p *= beta
|
| 545 |
+
p += u
|
| 546 |
+
|
| 547 |
+
else: # First spin
|
| 548 |
+
p = r.copy()
|
| 549 |
+
u = r.copy()
|
| 550 |
+
q = np.empty_like(r)
|
| 551 |
+
|
| 552 |
+
phat = psolve(p)
|
| 553 |
+
vhat = matvec(phat)
|
| 554 |
+
rv = dotprod(rtilde, vhat)
|
| 555 |
+
|
| 556 |
+
if rv == 0: # Dot product breakdown
|
| 557 |
+
return postprocess(x), -11
|
| 558 |
+
|
| 559 |
+
alpha = rho_cur / rv
|
| 560 |
+
q[:] = u[:]
|
| 561 |
+
q -= alpha*vhat
|
| 562 |
+
uhat = psolve(u + q)
|
| 563 |
+
x += alpha*uhat
|
| 564 |
+
|
| 565 |
+
# Due to numerical error build-up the actual residual is computed
|
| 566 |
+
# instead of the following two lines that were in the original
|
| 567 |
+
# FORTRAN templates, still using a single matvec.
|
| 568 |
+
|
| 569 |
+
# qhat = matvec(uhat)
|
| 570 |
+
# r -= alpha*qhat
|
| 571 |
+
r = b - matvec(x)
|
| 572 |
+
|
| 573 |
+
rho_prev = rho_cur
|
| 574 |
+
|
| 575 |
+
if callback:
|
| 576 |
+
callback(x)
|
| 577 |
+
|
| 578 |
+
else: # for loop exhausted
|
| 579 |
+
# Return incomplete progress
|
| 580 |
+
return postprocess(x), maxiter
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
@_deprecate_positional_args(version="1.14")
|
| 584 |
+
def gmres(A, b, x0=None, *, tol=_NoValue, restart=None, maxiter=None, M=None,
|
| 585 |
+
callback=None, restrt=_NoValue, atol=0., callback_type=None,
|
| 586 |
+
rtol=1e-5):
|
| 587 |
+
"""
|
| 588 |
+
Use Generalized Minimal RESidual iteration to solve ``Ax = b``.
|
| 589 |
+
|
| 590 |
+
Parameters
|
| 591 |
+
----------
|
| 592 |
+
A : {sparse matrix, ndarray, LinearOperator}
|
| 593 |
+
The real or complex N-by-N matrix of the linear system.
|
| 594 |
+
Alternatively, ``A`` can be a linear operator which can
|
| 595 |
+
produce ``Ax`` using, e.g.,
|
| 596 |
+
``scipy.sparse.linalg.LinearOperator``.
|
| 597 |
+
b : ndarray
|
| 598 |
+
Right hand side of the linear system. Has shape (N,) or (N,1).
|
| 599 |
+
x0 : ndarray
|
| 600 |
+
Starting guess for the solution (a vector of zeros by default).
|
| 601 |
+
atol, rtol : float
|
| 602 |
+
Parameters for the convergence test. For convergence,
|
| 603 |
+
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
|
| 604 |
+
The default is ``atol=0.`` and ``rtol=1e-5``.
|
| 605 |
+
restart : int, optional
|
| 606 |
+
Number of iterations between restarts. Larger values increase
|
| 607 |
+
iteration cost, but may be necessary for convergence.
|
| 608 |
+
If omitted, ``min(20, n)`` is used.
|
| 609 |
+
maxiter : int, optional
|
| 610 |
+
Maximum number of iterations (restart cycles). Iteration will stop
|
| 611 |
+
after maxiter steps even if the specified tolerance has not been
|
| 612 |
+
achieved. See `callback_type`.
|
| 613 |
+
M : {sparse matrix, ndarray, LinearOperator}
|
| 614 |
+
Inverse of the preconditioner of A. M should approximate the
|
| 615 |
+
inverse of A and be easy to solve for (see Notes). Effective
|
| 616 |
+
preconditioning dramatically improves the rate of convergence,
|
| 617 |
+
which implies that fewer iterations are needed to reach a given
|
| 618 |
+
error tolerance. By default, no preconditioner is used.
|
| 619 |
+
In this implementation, left preconditioning is used,
|
| 620 |
+
and the preconditioned residual is minimized. However, the final
|
| 621 |
+
convergence is tested with respect to the ``b - A @ x`` residual.
|
| 622 |
+
callback : function
|
| 623 |
+
User-supplied function to call after each iteration. It is called
|
| 624 |
+
as `callback(args)`, where `args` are selected by `callback_type`.
|
| 625 |
+
callback_type : {'x', 'pr_norm', 'legacy'}, optional
|
| 626 |
+
Callback function argument requested:
|
| 627 |
+
- ``x``: current iterate (ndarray), called on every restart
|
| 628 |
+
- ``pr_norm``: relative (preconditioned) residual norm (float),
|
| 629 |
+
called on every inner iteration
|
| 630 |
+
- ``legacy`` (default): same as ``pr_norm``, but also changes the
|
| 631 |
+
meaning of `maxiter` to count inner iterations instead of restart
|
| 632 |
+
cycles.
|
| 633 |
+
|
| 634 |
+
This keyword has no effect if `callback` is not set.
|
| 635 |
+
restrt : int, optional, deprecated
|
| 636 |
+
|
| 637 |
+
.. deprecated:: 0.11.0
|
| 638 |
+
`gmres` keyword argument ``restrt`` is deprecated in favor of
|
| 639 |
+
``restart`` and will be removed in SciPy 1.14.0.
|
| 640 |
+
tol : float, optional, deprecated
|
| 641 |
+
|
| 642 |
+
.. deprecated:: 1.12.0
|
| 643 |
+
`gmres` keyword argument ``tol`` is deprecated in favor of ``rtol``
|
| 644 |
+
and will be removed in SciPy 1.14.0
|
| 645 |
+
|
| 646 |
+
Returns
|
| 647 |
+
-------
|
| 648 |
+
x : ndarray
|
| 649 |
+
The converged solution.
|
| 650 |
+
info : int
|
| 651 |
+
Provides convergence information:
|
| 652 |
+
0 : successful exit
|
| 653 |
+
>0 : convergence to tolerance not achieved, number of iterations
|
| 654 |
+
|
| 655 |
+
See Also
|
| 656 |
+
--------
|
| 657 |
+
LinearOperator
|
| 658 |
+
|
| 659 |
+
Notes
|
| 660 |
+
-----
|
| 661 |
+
A preconditioner, P, is chosen such that P is close to A but easy to solve
|
| 662 |
+
for. The preconditioner parameter required by this routine is
|
| 663 |
+
``M = P^-1``. The inverse should preferably not be calculated
|
| 664 |
+
explicitly. Rather, use the following template to produce M::
|
| 665 |
+
|
| 666 |
+
# Construct a linear operator that computes P^-1 @ x.
|
| 667 |
+
import scipy.sparse.linalg as spla
|
| 668 |
+
M_x = lambda x: spla.spsolve(P, x)
|
| 669 |
+
M = spla.LinearOperator((n, n), M_x)
|
| 670 |
+
|
| 671 |
+
Examples
|
| 672 |
+
--------
|
| 673 |
+
>>> import numpy as np
|
| 674 |
+
>>> from scipy.sparse import csc_matrix
|
| 675 |
+
>>> from scipy.sparse.linalg import gmres
|
| 676 |
+
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
|
| 677 |
+
>>> b = np.array([2, 4, -1], dtype=float)
|
| 678 |
+
>>> x, exitCode = gmres(A, b, atol=1e-5)
|
| 679 |
+
>>> print(exitCode) # 0 indicates successful convergence
|
| 680 |
+
0
|
| 681 |
+
>>> np.allclose(A.dot(x), b)
|
| 682 |
+
True
|
| 683 |
+
"""
|
| 684 |
+
|
| 685 |
+
# Handle the deprecation frenzy
|
| 686 |
+
if restrt not in (None, _NoValue) and restart:
|
| 687 |
+
raise ValueError("Cannot specify both 'restart' and 'restrt'"
|
| 688 |
+
" keywords. Also 'rstrt' is deprecated."
|
| 689 |
+
" and will be removed in SciPy 1.14.0. Use "
|
| 690 |
+
"'restart' instead.")
|
| 691 |
+
if restrt is not _NoValue:
|
| 692 |
+
msg = ("'gmres' keyword argument 'restrt' is deprecated "
|
| 693 |
+
"in favor of 'restart' and will be removed in SciPy"
|
| 694 |
+
" 1.14.0. Until then, if set, 'rstrt' will override 'restart'."
|
| 695 |
+
)
|
| 696 |
+
warnings.warn(msg, DeprecationWarning, stacklevel=3)
|
| 697 |
+
restart = restrt
|
| 698 |
+
|
| 699 |
+
if callback is not None and callback_type is None:
|
| 700 |
+
# Warn about 'callback_type' semantic changes.
|
| 701 |
+
# Probably should be removed only in far future, Scipy 2.0 or so.
|
| 702 |
+
msg = ("scipy.sparse.linalg.gmres called without specifying "
|
| 703 |
+
"`callback_type`. The default value will be changed in"
|
| 704 |
+
" a future release. For compatibility, specify a value "
|
| 705 |
+
"for `callback_type` explicitly, e.g., "
|
| 706 |
+
"``gmres(..., callback_type='pr_norm')``, or to retain the "
|
| 707 |
+
"old behavior ``gmres(..., callback_type='legacy')``"
|
| 708 |
+
)
|
| 709 |
+
warnings.warn(msg, category=DeprecationWarning, stacklevel=3)
|
| 710 |
+
|
| 711 |
+
if callback_type is None:
|
| 712 |
+
callback_type = 'legacy'
|
| 713 |
+
|
| 714 |
+
if callback_type not in ('x', 'pr_norm', 'legacy'):
|
| 715 |
+
raise ValueError(f"Unknown callback_type: {callback_type!r}")
|
| 716 |
+
|
| 717 |
+
if callback is None:
|
| 718 |
+
callback_type = None
|
| 719 |
+
|
| 720 |
+
A, M, x, b, postprocess = make_system(A, M, x0, b)
|
| 721 |
+
matvec = A.matvec
|
| 722 |
+
psolve = M.matvec
|
| 723 |
+
n = len(b)
|
| 724 |
+
bnrm2 = np.linalg.norm(b)
|
| 725 |
+
|
| 726 |
+
atol, _ = _get_atol_rtol('gmres', bnrm2, tol, atol, rtol)
|
| 727 |
+
|
| 728 |
+
if bnrm2 == 0:
|
| 729 |
+
return postprocess(b), 0
|
| 730 |
+
|
| 731 |
+
eps = np.finfo(x.dtype.char).eps
|
| 732 |
+
|
| 733 |
+
dotprod = np.vdot if np.iscomplexobj(x) else np.dot
|
| 734 |
+
|
| 735 |
+
if maxiter is None:
|
| 736 |
+
maxiter = n*10
|
| 737 |
+
|
| 738 |
+
if restart is None:
|
| 739 |
+
restart = 20
|
| 740 |
+
restart = min(restart, n)
|
| 741 |
+
|
| 742 |
+
Mb_nrm2 = np.linalg.norm(psolve(b))
|
| 743 |
+
|
| 744 |
+
# ====================================================
|
| 745 |
+
# =========== Tolerance control from gh-8400 =========
|
| 746 |
+
# ====================================================
|
| 747 |
+
# Tolerance passed to GMRESREVCOM applies to the inner
|
| 748 |
+
# iteration and deals with the left-preconditioned
|
| 749 |
+
# residual.
|
| 750 |
+
ptol_max_factor = 1.
|
| 751 |
+
ptol = Mb_nrm2 * min(ptol_max_factor, atol / bnrm2)
|
| 752 |
+
presid = 0.
|
| 753 |
+
# ====================================================
|
| 754 |
+
lartg = get_lapack_funcs('lartg', dtype=x.dtype)
|
| 755 |
+
|
| 756 |
+
# allocate internal variables
|
| 757 |
+
v = np.empty([restart+1, n], dtype=x.dtype)
|
| 758 |
+
h = np.zeros([restart, restart+1], dtype=x.dtype)
|
| 759 |
+
givens = np.zeros([restart, 2], dtype=x.dtype)
|
| 760 |
+
|
| 761 |
+
# legacy iteration count
|
| 762 |
+
inner_iter = 0
|
| 763 |
+
|
| 764 |
+
for iteration in range(maxiter):
|
| 765 |
+
if iteration == 0:
|
| 766 |
+
r = b - matvec(x) if x.any() else b.copy()
|
| 767 |
+
if np.linalg.norm(r) < atol: # Are we done?
|
| 768 |
+
return postprocess(x), 0
|
| 769 |
+
|
| 770 |
+
v[0, :] = psolve(r)
|
| 771 |
+
tmp = np.linalg.norm(v[0, :])
|
| 772 |
+
v[0, :] *= (1 / tmp)
|
| 773 |
+
# RHS of the Hessenberg problem
|
| 774 |
+
S = np.zeros(restart+1, dtype=x.dtype)
|
| 775 |
+
S[0] = tmp
|
| 776 |
+
|
| 777 |
+
breakdown = False
|
| 778 |
+
for col in range(restart):
|
| 779 |
+
av = matvec(v[col, :])
|
| 780 |
+
w = psolve(av)
|
| 781 |
+
|
| 782 |
+
# Modified Gram-Schmidt
|
| 783 |
+
h0 = np.linalg.norm(w)
|
| 784 |
+
for k in range(col+1):
|
| 785 |
+
tmp = dotprod(v[k, :], w)
|
| 786 |
+
h[col, k] = tmp
|
| 787 |
+
w -= tmp*v[k, :]
|
| 788 |
+
|
| 789 |
+
h1 = np.linalg.norm(w)
|
| 790 |
+
h[col, col + 1] = h1
|
| 791 |
+
v[col + 1, :] = w[:]
|
| 792 |
+
|
| 793 |
+
# Exact solution indicator
|
| 794 |
+
if h1 <= eps*h0:
|
| 795 |
+
h[col, col + 1] = 0
|
| 796 |
+
breakdown = True
|
| 797 |
+
else:
|
| 798 |
+
v[col + 1, :] *= (1 / h1)
|
| 799 |
+
|
| 800 |
+
# apply past Givens rotations to current h column
|
| 801 |
+
for k in range(col):
|
| 802 |
+
c, s = givens[k, 0], givens[k, 1]
|
| 803 |
+
n0, n1 = h[col, [k, k+1]]
|
| 804 |
+
h[col, [k, k + 1]] = [c*n0 + s*n1, -s.conj()*n0 + c*n1]
|
| 805 |
+
|
| 806 |
+
# get and apply current rotation to h and S
|
| 807 |
+
c, s, mag = lartg(h[col, col], h[col, col+1])
|
| 808 |
+
givens[col, :] = [c, s]
|
| 809 |
+
h[col, [col, col+1]] = mag, 0
|
| 810 |
+
|
| 811 |
+
# S[col+1] component is always 0
|
| 812 |
+
tmp = -np.conjugate(s)*S[col]
|
| 813 |
+
S[[col, col + 1]] = [c*S[col], tmp]
|
| 814 |
+
presid = np.abs(tmp)
|
| 815 |
+
inner_iter += 1
|
| 816 |
+
|
| 817 |
+
if callback_type in ('legacy', 'pr_norm'):
|
| 818 |
+
callback(presid / bnrm2)
|
| 819 |
+
# Legacy behavior
|
| 820 |
+
if callback_type == 'legacy' and inner_iter == maxiter:
|
| 821 |
+
break
|
| 822 |
+
if presid <= ptol or breakdown:
|
| 823 |
+
break
|
| 824 |
+
|
| 825 |
+
# Solve h(col, col) upper triangular system and allow pseudo-solve
|
| 826 |
+
# singular cases as in (but without the f2py copies):
|
| 827 |
+
# y = trsv(h[:col+1, :col+1].T, S[:col+1])
|
| 828 |
+
|
| 829 |
+
if h[col, col] == 0:
|
| 830 |
+
S[col] = 0
|
| 831 |
+
|
| 832 |
+
y = np.zeros([col+1], dtype=x.dtype)
|
| 833 |
+
y[:] = S[:col+1]
|
| 834 |
+
for k in range(col, 0, -1):
|
| 835 |
+
if y[k] != 0:
|
| 836 |
+
y[k] /= h[k, k]
|
| 837 |
+
tmp = y[k]
|
| 838 |
+
y[:k] -= tmp*h[k, :k]
|
| 839 |
+
if y[0] != 0:
|
| 840 |
+
y[0] /= h[0, 0]
|
| 841 |
+
|
| 842 |
+
x += y @ v[:col+1, :]
|
| 843 |
+
|
| 844 |
+
r = b - matvec(x)
|
| 845 |
+
rnorm = np.linalg.norm(r)
|
| 846 |
+
|
| 847 |
+
# Legacy exit
|
| 848 |
+
if callback_type == 'legacy' and inner_iter == maxiter:
|
| 849 |
+
return postprocess(x), 0 if rnorm <= atol else maxiter
|
| 850 |
+
|
| 851 |
+
if callback_type == 'x':
|
| 852 |
+
callback(x)
|
| 853 |
+
|
| 854 |
+
if rnorm <= atol:
|
| 855 |
+
break
|
| 856 |
+
elif breakdown:
|
| 857 |
+
# Reached breakdown (= exact solution), but the external
|
| 858 |
+
# tolerance check failed. Bail out with failure.
|
| 859 |
+
break
|
| 860 |
+
elif presid <= ptol:
|
| 861 |
+
# Inner loop passed but outer didn't
|
| 862 |
+
ptol_max_factor = max(eps, 0.25 * ptol_max_factor)
|
| 863 |
+
else:
|
| 864 |
+
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
|
| 865 |
+
|
| 866 |
+
ptol = presid * min(ptol_max_factor, atol / rnorm)
|
| 867 |
+
|
| 868 |
+
info = 0 if (rnorm <= atol) else maxiter
|
| 869 |
+
return postprocess(x), info
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
@_deprecate_positional_args(version="1.14")
|
| 873 |
+
def qmr(A, b, x0=None, *, tol=_NoValue, maxiter=None, M1=None, M2=None,
|
| 874 |
+
callback=None, atol=0., rtol=1e-5):
|
| 875 |
+
"""Use Quasi-Minimal Residual iteration to solve ``Ax = b``.
|
| 876 |
+
|
| 877 |
+
Parameters
|
| 878 |
+
----------
|
| 879 |
+
A : {sparse matrix, ndarray, LinearOperator}
|
| 880 |
+
The real-valued N-by-N matrix of the linear system.
|
| 881 |
+
Alternatively, ``A`` can be a linear operator which can
|
| 882 |
+
produce ``Ax`` and ``A^T x`` using, e.g.,
|
| 883 |
+
``scipy.sparse.linalg.LinearOperator``.
|
| 884 |
+
b : ndarray
|
| 885 |
+
Right hand side of the linear system. Has shape (N,) or (N,1).
|
| 886 |
+
x0 : ndarray
|
| 887 |
+
Starting guess for the solution.
|
| 888 |
+
atol, rtol : float, optional
|
| 889 |
+
Parameters for the convergence test. For convergence,
|
| 890 |
+
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
|
| 891 |
+
The default is ``atol=0.`` and ``rtol=1e-5``.
|
| 892 |
+
maxiter : integer
|
| 893 |
+
Maximum number of iterations. Iteration will stop after maxiter
|
| 894 |
+
steps even if the specified tolerance has not been achieved.
|
| 895 |
+
M1 : {sparse matrix, ndarray, LinearOperator}
|
| 896 |
+
Left preconditioner for A.
|
| 897 |
+
M2 : {sparse matrix, ndarray, LinearOperator}
|
| 898 |
+
Right preconditioner for A. Used together with the left
|
| 899 |
+
preconditioner M1. The matrix M1@A@M2 should have better
|
| 900 |
+
conditioned than A alone.
|
| 901 |
+
callback : function
|
| 902 |
+
User-supplied function to call after each iteration. It is called
|
| 903 |
+
as callback(xk), where xk is the current solution vector.
|
| 904 |
+
tol : float, optional, deprecated
|
| 905 |
+
|
| 906 |
+
.. deprecated:: 1.12.0
|
| 907 |
+
`qmr` keyword argument ``tol`` is deprecated in favor of ``rtol``
|
| 908 |
+
and will be removed in SciPy 1.14.0.
|
| 909 |
+
|
| 910 |
+
Returns
|
| 911 |
+
-------
|
| 912 |
+
x : ndarray
|
| 913 |
+
The converged solution.
|
| 914 |
+
info : integer
|
| 915 |
+
Provides convergence information:
|
| 916 |
+
0 : successful exit
|
| 917 |
+
>0 : convergence to tolerance not achieved, number of iterations
|
| 918 |
+
<0 : parameter breakdown
|
| 919 |
+
|
| 920 |
+
See Also
|
| 921 |
+
--------
|
| 922 |
+
LinearOperator
|
| 923 |
+
|
| 924 |
+
Examples
|
| 925 |
+
--------
|
| 926 |
+
>>> import numpy as np
|
| 927 |
+
>>> from scipy.sparse import csc_matrix
|
| 928 |
+
>>> from scipy.sparse.linalg import qmr
|
| 929 |
+
>>> A = csc_matrix([[3., 2., 0.], [1., -1., 0.], [0., 5., 1.]])
|
| 930 |
+
>>> b = np.array([2., 4., -1.])
|
| 931 |
+
>>> x, exitCode = qmr(A, b, atol=1e-5)
|
| 932 |
+
>>> print(exitCode) # 0 indicates successful convergence
|
| 933 |
+
0
|
| 934 |
+
>>> np.allclose(A.dot(x), b)
|
| 935 |
+
True
|
| 936 |
+
"""
|
| 937 |
+
A_ = A
|
| 938 |
+
A, M, x, b, postprocess = make_system(A, None, x0, b)
|
| 939 |
+
bnrm2 = np.linalg.norm(b)
|
| 940 |
+
|
| 941 |
+
atol, _ = _get_atol_rtol('qmr', bnrm2, tol, atol, rtol)
|
| 942 |
+
|
| 943 |
+
if bnrm2 == 0:
|
| 944 |
+
return postprocess(b), 0
|
| 945 |
+
|
| 946 |
+
if M1 is None and M2 is None:
|
| 947 |
+
if hasattr(A_, 'psolve'):
|
| 948 |
+
def left_psolve(b):
|
| 949 |
+
return A_.psolve(b, 'left')
|
| 950 |
+
|
| 951 |
+
def right_psolve(b):
|
| 952 |
+
return A_.psolve(b, 'right')
|
| 953 |
+
|
| 954 |
+
def left_rpsolve(b):
|
| 955 |
+
return A_.rpsolve(b, 'left')
|
| 956 |
+
|
| 957 |
+
def right_rpsolve(b):
|
| 958 |
+
return A_.rpsolve(b, 'right')
|
| 959 |
+
M1 = LinearOperator(A.shape,
|
| 960 |
+
matvec=left_psolve,
|
| 961 |
+
rmatvec=left_rpsolve)
|
| 962 |
+
M2 = LinearOperator(A.shape,
|
| 963 |
+
matvec=right_psolve,
|
| 964 |
+
rmatvec=right_rpsolve)
|
| 965 |
+
else:
|
| 966 |
+
def id(b):
|
| 967 |
+
return b
|
| 968 |
+
M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
|
| 969 |
+
M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)
|
| 970 |
+
|
| 971 |
+
n = len(b)
|
| 972 |
+
if maxiter is None:
|
| 973 |
+
maxiter = n*10
|
| 974 |
+
|
| 975 |
+
dotprod = np.vdot if np.iscomplexobj(x) else np.dot
|
| 976 |
+
|
| 977 |
+
rhotol = np.finfo(x.dtype.char).eps
|
| 978 |
+
betatol = rhotol
|
| 979 |
+
gammatol = rhotol
|
| 980 |
+
deltatol = rhotol
|
| 981 |
+
epsilontol = rhotol
|
| 982 |
+
xitol = rhotol
|
| 983 |
+
|
| 984 |
+
r = b - A.matvec(x) if x.any() else b.copy()
|
| 985 |
+
|
| 986 |
+
vtilde = r.copy()
|
| 987 |
+
y = M1.matvec(vtilde)
|
| 988 |
+
rho = np.linalg.norm(y)
|
| 989 |
+
wtilde = r.copy()
|
| 990 |
+
z = M2.rmatvec(wtilde)
|
| 991 |
+
xi = np.linalg.norm(z)
|
| 992 |
+
gamma, eta, theta = 1, -1, 0
|
| 993 |
+
v = np.empty_like(vtilde)
|
| 994 |
+
w = np.empty_like(wtilde)
|
| 995 |
+
|
| 996 |
+
# Dummy values to initialize vars, silence linter warnings
|
| 997 |
+
epsilon, q, d, p, s = None, None, None, None, None
|
| 998 |
+
|
| 999 |
+
for iteration in range(maxiter):
|
| 1000 |
+
if np.linalg.norm(r) < atol: # Are we done?
|
| 1001 |
+
return postprocess(x), 0
|
| 1002 |
+
if np.abs(rho) < rhotol: # rho breakdown
|
| 1003 |
+
return postprocess(x), -10
|
| 1004 |
+
if np.abs(xi) < xitol: # xi breakdown
|
| 1005 |
+
return postprocess(x), -15
|
| 1006 |
+
|
| 1007 |
+
v[:] = vtilde[:]
|
| 1008 |
+
v *= (1 / rho)
|
| 1009 |
+
y *= (1 / rho)
|
| 1010 |
+
w[:] = wtilde[:]
|
| 1011 |
+
w *= (1 / xi)
|
| 1012 |
+
z *= (1 / xi)
|
| 1013 |
+
delta = dotprod(z, y)
|
| 1014 |
+
|
| 1015 |
+
if np.abs(delta) < deltatol: # delta breakdown
|
| 1016 |
+
return postprocess(x), -13
|
| 1017 |
+
|
| 1018 |
+
ytilde = M2.matvec(y)
|
| 1019 |
+
ztilde = M1.rmatvec(z)
|
| 1020 |
+
|
| 1021 |
+
if iteration > 0:
|
| 1022 |
+
ytilde -= (xi * delta / epsilon) * p
|
| 1023 |
+
p[:] = ytilde[:]
|
| 1024 |
+
ztilde -= (rho * (delta / epsilon).conj()) * q
|
| 1025 |
+
q[:] = ztilde[:]
|
| 1026 |
+
else: # First spin
|
| 1027 |
+
p = ytilde.copy()
|
| 1028 |
+
q = ztilde.copy()
|
| 1029 |
+
|
| 1030 |
+
ptilde = A.matvec(p)
|
| 1031 |
+
epsilon = dotprod(q, ptilde)
|
| 1032 |
+
if np.abs(epsilon) < epsilontol: # epsilon breakdown
|
| 1033 |
+
return postprocess(x), -14
|
| 1034 |
+
|
| 1035 |
+
beta = epsilon / delta
|
| 1036 |
+
if np.abs(beta) < betatol: # beta breakdown
|
| 1037 |
+
return postprocess(x), -11
|
| 1038 |
+
|
| 1039 |
+
vtilde[:] = ptilde[:]
|
| 1040 |
+
vtilde -= beta*v
|
| 1041 |
+
y = M1.matvec(vtilde)
|
| 1042 |
+
|
| 1043 |
+
rho_prev = rho
|
| 1044 |
+
rho = np.linalg.norm(y)
|
| 1045 |
+
wtilde[:] = w[:]
|
| 1046 |
+
wtilde *= - beta.conj()
|
| 1047 |
+
wtilde += A.rmatvec(q)
|
| 1048 |
+
z = M2.rmatvec(wtilde)
|
| 1049 |
+
xi = np.linalg.norm(z)
|
| 1050 |
+
gamma_prev = gamma
|
| 1051 |
+
theta_prev = theta
|
| 1052 |
+
theta = rho / (gamma_prev * np.abs(beta))
|
| 1053 |
+
gamma = 1 / np.sqrt(1 + theta**2)
|
| 1054 |
+
|
| 1055 |
+
if np.abs(gamma) < gammatol: # gamma breakdown
|
| 1056 |
+
return postprocess(x), -12
|
| 1057 |
+
|
| 1058 |
+
eta *= -(rho_prev / beta) * (gamma / gamma_prev)**2
|
| 1059 |
+
|
| 1060 |
+
if iteration > 0:
|
| 1061 |
+
d *= (theta_prev * gamma) ** 2
|
| 1062 |
+
d += eta*p
|
| 1063 |
+
s *= (theta_prev * gamma) ** 2
|
| 1064 |
+
s += eta*ptilde
|
| 1065 |
+
else:
|
| 1066 |
+
d = p.copy()
|
| 1067 |
+
d *= eta
|
| 1068 |
+
s = ptilde.copy()
|
| 1069 |
+
s *= eta
|
| 1070 |
+
|
| 1071 |
+
x += d
|
| 1072 |
+
r -= s
|
| 1073 |
+
|
| 1074 |
+
if callback:
|
| 1075 |
+
callback(x)
|
| 1076 |
+
|
| 1077 |
+
else: # for loop exhausted
|
| 1078 |
+
# Return incomplete progress
|
| 1079 |
+
return postprocess(x), maxiter
|