prompt
stringlengths 174
59.5k
| completion
stringlengths 7
228
| api
stringlengths 12
64
|
---|---|---|
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = | get_default(i_max, self.conf.i_max) | sfepy.base.base.get_default |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
| LinearSolver.__init__(self, conf, mg=None, **kwargs) | sfepy.solvers.solvers.LinearSolver.__init__ |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = | get_default(eps_r, self.conf.eps_r) | sfepy.base.base.get_default |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = | get_default(i_max, self.conf.i_max) | sfepy.base.base.get_default |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = | get_default(eps_r, self.conf.eps_r) | sfepy.base.base.get_default |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = | get_default(i_max, self.conf.i_max) | sfepy.base.base.get_default |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = | get_default(comm, self.comm) | sfepy.base.base.get_default |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = get_default(comm, self.comm)
self.fields = []
for key, rng in six.iteritems(field_ranges):
if isinstance(rng, slice):
rng = rng.start, rng.stop
size = rng[1] - rng[0]
field_is = self.petsc.IS().createStride(size, first=rng[0], step=1,
comm=comm)
self.fields.append((key, field_is))
def create_ksp(self, options=None, comm=None):
optDB = self.petsc.Options()
optDB['sub_pc_type'] = self.conf.sub_precond
if options is not None:
for key, val in six.iteritems(options):
optDB[key] = val
ksp = self.petsc.KSP()
ksp.create(comm)
ksp.setType(self.conf.method)
pc = ksp.getPC()
if self.conf.setup_precond is None:
pc.setType(self.conf.precond)
else:
pc.setType(pc.Type.PYTHON)
ksp.setFromOptions()
if (pc.type == 'fieldsplit'):
if self.fields is not None:
pc.setFieldSplitIS(*self.fields)
else:
msg = 'PETScKrylovSolver.set_field_split() has to be called!'
raise ValueError(msg)
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
return ksp
def create_petsc_matrix(self, mtx, comm=None):
if isinstance(mtx, self.petsc.Mat):
pmtx = mtx
else:
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat()
pmtx.createAIJ(mtx.shape, csr=(mtx.indptr, mtx.indices, mtx.data),
comm=comm)
return pmtx
@petsc_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None, context=None,
**kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = | get_default(eps_a, self.conf.eps_a) | sfepy.base.base.get_default |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = get_default(comm, self.comm)
self.fields = []
for key, rng in six.iteritems(field_ranges):
if isinstance(rng, slice):
rng = rng.start, rng.stop
size = rng[1] - rng[0]
field_is = self.petsc.IS().createStride(size, first=rng[0], step=1,
comm=comm)
self.fields.append((key, field_is))
def create_ksp(self, options=None, comm=None):
optDB = self.petsc.Options()
optDB['sub_pc_type'] = self.conf.sub_precond
if options is not None:
for key, val in six.iteritems(options):
optDB[key] = val
ksp = self.petsc.KSP()
ksp.create(comm)
ksp.setType(self.conf.method)
pc = ksp.getPC()
if self.conf.setup_precond is None:
pc.setType(self.conf.precond)
else:
pc.setType(pc.Type.PYTHON)
ksp.setFromOptions()
if (pc.type == 'fieldsplit'):
if self.fields is not None:
pc.setFieldSplitIS(*self.fields)
else:
msg = 'PETScKrylovSolver.set_field_split() has to be called!'
raise ValueError(msg)
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
return ksp
def create_petsc_matrix(self, mtx, comm=None):
if isinstance(mtx, self.petsc.Mat):
pmtx = mtx
else:
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat()
pmtx.createAIJ(mtx.shape, csr=(mtx.indptr, mtx.indices, mtx.data),
comm=comm)
return pmtx
@petsc_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None, context=None,
**kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = | get_default(eps_r, self.conf.eps_r) | sfepy.base.base.get_default |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = get_default(comm, self.comm)
self.fields = []
for key, rng in six.iteritems(field_ranges):
if isinstance(rng, slice):
rng = rng.start, rng.stop
size = rng[1] - rng[0]
field_is = self.petsc.IS().createStride(size, first=rng[0], step=1,
comm=comm)
self.fields.append((key, field_is))
def create_ksp(self, options=None, comm=None):
optDB = self.petsc.Options()
optDB['sub_pc_type'] = self.conf.sub_precond
if options is not None:
for key, val in six.iteritems(options):
optDB[key] = val
ksp = self.petsc.KSP()
ksp.create(comm)
ksp.setType(self.conf.method)
pc = ksp.getPC()
if self.conf.setup_precond is None:
pc.setType(self.conf.precond)
else:
pc.setType(pc.Type.PYTHON)
ksp.setFromOptions()
if (pc.type == 'fieldsplit'):
if self.fields is not None:
pc.setFieldSplitIS(*self.fields)
else:
msg = 'PETScKrylovSolver.set_field_split() has to be called!'
raise ValueError(msg)
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
return ksp
def create_petsc_matrix(self, mtx, comm=None):
if isinstance(mtx, self.petsc.Mat):
pmtx = mtx
else:
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat()
pmtx.createAIJ(mtx.shape, csr=(mtx.indptr, mtx.indices, mtx.data),
comm=comm)
return pmtx
@petsc_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None, context=None,
**kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = | get_default(i_max, self.conf.i_max) | sfepy.base.base.get_default |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = get_default(comm, self.comm)
self.fields = []
for key, rng in six.iteritems(field_ranges):
if isinstance(rng, slice):
rng = rng.start, rng.stop
size = rng[1] - rng[0]
field_is = self.petsc.IS().createStride(size, first=rng[0], step=1,
comm=comm)
self.fields.append((key, field_is))
def create_ksp(self, options=None, comm=None):
optDB = self.petsc.Options()
optDB['sub_pc_type'] = self.conf.sub_precond
if options is not None:
for key, val in six.iteritems(options):
optDB[key] = val
ksp = self.petsc.KSP()
ksp.create(comm)
ksp.setType(self.conf.method)
pc = ksp.getPC()
if self.conf.setup_precond is None:
pc.setType(self.conf.precond)
else:
pc.setType(pc.Type.PYTHON)
ksp.setFromOptions()
if (pc.type == 'fieldsplit'):
if self.fields is not None:
pc.setFieldSplitIS(*self.fields)
else:
msg = 'PETScKrylovSolver.set_field_split() has to be called!'
raise ValueError(msg)
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
return ksp
def create_petsc_matrix(self, mtx, comm=None):
if isinstance(mtx, self.petsc.Mat):
pmtx = mtx
else:
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat()
pmtx.createAIJ(mtx.shape, csr=(mtx.indptr, mtx.indices, mtx.data),
comm=comm)
return pmtx
@petsc_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None, context=None,
**kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if (not is_new) and self.ksp is not None:
ksp = self.ksp
pmtx = self.pmtx
else:
pmtx = self.create_petsc_matrix(mtx, comm=comm)
ksp = self.create_ksp(options=solver_kwargs, comm=comm)
ksp.setOperators(pmtx)
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d,
max_it=i_max)
setup_precond = self.conf.setup_precond
if setup_precond is not None:
ksp.pc.setPythonContext(setup_precond(mtx, context))
ksp.setFromOptions()
self.mtx_digest = mtx_digest
self.ksp = ksp
self.pmtx = pmtx
if isinstance(rhs, self.petsc.Vec):
prhs = rhs
else:
prhs = pmtx.getVecLeft()
prhs[...] = rhs
if x0 is not None:
if isinstance(x0, self.petsc.Vec):
psol = x0
else:
psol = pmtx.getVecRight()
psol[...] = x0
ksp.setInitialGuessNonzero(True)
else:
psol = pmtx.getVecRight()
ksp.setInitialGuessNonzero(False)
ksp.solve(prhs, psol)
output('%s(%s, %s/proc) convergence: %s (%s, %d iterations)'
% (ksp.getType(), ksp.getPC().getType(), self.conf.sub_precond,
ksp.reason, self.converged_reasons[ksp.reason],
ksp.getIterationNumber()),
verbose=conf.verbose)
if isinstance(rhs, self.petsc.Vec):
sol = psol
else:
sol = psol[...].copy()
return sol
class MUMPSSolver(LinearSolver):
"""
Interface to MUMPS solver.
"""
name = 'ls.mumps'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import sfepy.solvers.ls_mumps as mumps
self.mumps_ls = None
if not mumps.use_mpi:
raise AttributeError('No mpi4py found! Required by MUMPS solver.')
| mumps.load_mumps_libraries() | sfepy.solvers.ls_mumps.load_mumps_libraries |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = get_default(comm, self.comm)
self.fields = []
for key, rng in six.iteritems(field_ranges):
if isinstance(rng, slice):
rng = rng.start, rng.stop
size = rng[1] - rng[0]
field_is = self.petsc.IS().createStride(size, first=rng[0], step=1,
comm=comm)
self.fields.append((key, field_is))
def create_ksp(self, options=None, comm=None):
optDB = self.petsc.Options()
optDB['sub_pc_type'] = self.conf.sub_precond
if options is not None:
for key, val in six.iteritems(options):
optDB[key] = val
ksp = self.petsc.KSP()
ksp.create(comm)
ksp.setType(self.conf.method)
pc = ksp.getPC()
if self.conf.setup_precond is None:
pc.setType(self.conf.precond)
else:
pc.setType(pc.Type.PYTHON)
ksp.setFromOptions()
if (pc.type == 'fieldsplit'):
if self.fields is not None:
pc.setFieldSplitIS(*self.fields)
else:
msg = 'PETScKrylovSolver.set_field_split() has to be called!'
raise ValueError(msg)
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
return ksp
def create_petsc_matrix(self, mtx, comm=None):
if isinstance(mtx, self.petsc.Mat):
pmtx = mtx
else:
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat()
pmtx.createAIJ(mtx.shape, csr=(mtx.indptr, mtx.indices, mtx.data),
comm=comm)
return pmtx
@petsc_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None, context=None,
**kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if (not is_new) and self.ksp is not None:
ksp = self.ksp
pmtx = self.pmtx
else:
pmtx = self.create_petsc_matrix(mtx, comm=comm)
ksp = self.create_ksp(options=solver_kwargs, comm=comm)
ksp.setOperators(pmtx)
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d,
max_it=i_max)
setup_precond = self.conf.setup_precond
if setup_precond is not None:
ksp.pc.setPythonContext(setup_precond(mtx, context))
ksp.setFromOptions()
self.mtx_digest = mtx_digest
self.ksp = ksp
self.pmtx = pmtx
if isinstance(rhs, self.petsc.Vec):
prhs = rhs
else:
prhs = pmtx.getVecLeft()
prhs[...] = rhs
if x0 is not None:
if isinstance(x0, self.petsc.Vec):
psol = x0
else:
psol = pmtx.getVecRight()
psol[...] = x0
ksp.setInitialGuessNonzero(True)
else:
psol = pmtx.getVecRight()
ksp.setInitialGuessNonzero(False)
ksp.solve(prhs, psol)
output('%s(%s, %s/proc) convergence: %s (%s, %d iterations)'
% (ksp.getType(), ksp.getPC().getType(), self.conf.sub_precond,
ksp.reason, self.converged_reasons[ksp.reason],
ksp.getIterationNumber()),
verbose=conf.verbose)
if isinstance(rhs, self.petsc.Vec):
sol = psol
else:
sol = psol[...].copy()
return sol
class MUMPSSolver(LinearSolver):
"""
Interface to MUMPS solver.
"""
name = 'ls.mumps'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import sfepy.solvers.ls_mumps as mumps
self.mumps_ls = None
if not mumps.use_mpi:
raise AttributeError('No mpi4py found! Required by MUMPS solver.')
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if not self.mumps_presolved:
self.presolve(mtx, presolve_flag=conf.use_presolve)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
self.mumps_ls(3) # solve
return out
def presolve(self, mtx, presolve_flag=False):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
if self.mumps_ls is None:
system = 'complex' if mtx.dtype.name.startswith('complex')\
else 'real'
is_sym = self.mumps.coo_is_symmetric(mtx)
mem_relax = self.conf.memory_relaxation
self.mumps_ls = self.mumps.MumpsSolver(system=system,
is_sym=is_sym,
mem_relax=mem_relax)
if is_new:
if self.conf.verbose:
self.mumps_ls.set_verbose()
self.mumps_ls.set_mtx_centralized(mtx)
self.mumps_ls(4) # analyze + factorize
if presolve_flag:
self.mumps_presolved = True
self.mtx_digest = mtx_digest
def __del__(self):
if self.mumps_ls is not None:
del(self.mumps_ls)
class MUMPSParallelSolver(LinearSolver):
"""
Interface to MUMPS parallel solver.
"""
name = 'ls.mumps_par'
_parameters = [
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import multiprocessing
import sfepy.solvers.ls_mumps as mumps
| mumps.load_mumps_libraries() | sfepy.solvers.ls_mumps.load_mumps_libraries |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = get_default(comm, self.comm)
self.fields = []
for key, rng in six.iteritems(field_ranges):
if isinstance(rng, slice):
rng = rng.start, rng.stop
size = rng[1] - rng[0]
field_is = self.petsc.IS().createStride(size, first=rng[0], step=1,
comm=comm)
self.fields.append((key, field_is))
def create_ksp(self, options=None, comm=None):
optDB = self.petsc.Options()
optDB['sub_pc_type'] = self.conf.sub_precond
if options is not None:
for key, val in six.iteritems(options):
optDB[key] = val
ksp = self.petsc.KSP()
ksp.create(comm)
ksp.setType(self.conf.method)
pc = ksp.getPC()
if self.conf.setup_precond is None:
pc.setType(self.conf.precond)
else:
pc.setType(pc.Type.PYTHON)
ksp.setFromOptions()
if (pc.type == 'fieldsplit'):
if self.fields is not None:
pc.setFieldSplitIS(*self.fields)
else:
msg = 'PETScKrylovSolver.set_field_split() has to be called!'
raise ValueError(msg)
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
return ksp
def create_petsc_matrix(self, mtx, comm=None):
if isinstance(mtx, self.petsc.Mat):
pmtx = mtx
else:
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat()
pmtx.createAIJ(mtx.shape, csr=(mtx.indptr, mtx.indices, mtx.data),
comm=comm)
return pmtx
@petsc_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None, context=None,
**kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if (not is_new) and self.ksp is not None:
ksp = self.ksp
pmtx = self.pmtx
else:
pmtx = self.create_petsc_matrix(mtx, comm=comm)
ksp = self.create_ksp(options=solver_kwargs, comm=comm)
ksp.setOperators(pmtx)
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d,
max_it=i_max)
setup_precond = self.conf.setup_precond
if setup_precond is not None:
ksp.pc.setPythonContext(setup_precond(mtx, context))
ksp.setFromOptions()
self.mtx_digest = mtx_digest
self.ksp = ksp
self.pmtx = pmtx
if isinstance(rhs, self.petsc.Vec):
prhs = rhs
else:
prhs = pmtx.getVecLeft()
prhs[...] = rhs
if x0 is not None:
if isinstance(x0, self.petsc.Vec):
psol = x0
else:
psol = pmtx.getVecRight()
psol[...] = x0
ksp.setInitialGuessNonzero(True)
else:
psol = pmtx.getVecRight()
ksp.setInitialGuessNonzero(False)
ksp.solve(prhs, psol)
output('%s(%s, %s/proc) convergence: %s (%s, %d iterations)'
% (ksp.getType(), ksp.getPC().getType(), self.conf.sub_precond,
ksp.reason, self.converged_reasons[ksp.reason],
ksp.getIterationNumber()),
verbose=conf.verbose)
if isinstance(rhs, self.petsc.Vec):
sol = psol
else:
sol = psol[...].copy()
return sol
class MUMPSSolver(LinearSolver):
"""
Interface to MUMPS solver.
"""
name = 'ls.mumps'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import sfepy.solvers.ls_mumps as mumps
self.mumps_ls = None
if not mumps.use_mpi:
raise AttributeError('No mpi4py found! Required by MUMPS solver.')
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if not self.mumps_presolved:
self.presolve(mtx, presolve_flag=conf.use_presolve)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
self.mumps_ls(3) # solve
return out
def presolve(self, mtx, presolve_flag=False):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
if self.mumps_ls is None:
system = 'complex' if mtx.dtype.name.startswith('complex')\
else 'real'
is_sym = self.mumps.coo_is_symmetric(mtx)
mem_relax = self.conf.memory_relaxation
self.mumps_ls = self.mumps.MumpsSolver(system=system,
is_sym=is_sym,
mem_relax=mem_relax)
if is_new:
if self.conf.verbose:
self.mumps_ls.set_verbose()
self.mumps_ls.set_mtx_centralized(mtx)
self.mumps_ls(4) # analyze + factorize
if presolve_flag:
self.mumps_presolved = True
self.mtx_digest = mtx_digest
def __del__(self):
if self.mumps_ls is not None:
del(self.mumps_ls)
class MUMPSParallelSolver(LinearSolver):
"""
Interface to MUMPS parallel solver.
"""
name = 'ls.mumps_par'
_parameters = [
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import multiprocessing
import sfepy.solvers.ls_mumps as mumps
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
number_of_cpu=multiprocessing.cpu_count(),
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
from mpi4py import MPI
import sys
from sfepy import data_dir
import os.path as op
from tempfile import gettempdir
def tmpfile(fname):
return op.join(gettempdir(), fname)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
is_sym = self.mumps.coo_is_symmetric(mtx)
rr, cc, data = mtx.row + 1, mtx.col + 1, mtx.data
if is_sym:
idxs = nm.where(cc >= rr)[0] # upper triangular matrix
rr, cc, data = rr[idxs], cc[idxs], data[idxs]
n = mtx.shape[0]
nz = rr.shape[0]
flags = nm.memmap(tmpfile('vals_flags.array'), dtype='int32',
mode='w+', shape=(4,))
flags[0] = n
flags[1] = 1 if data.dtype.name.startswith('complex') else 0
flags[2] = int(is_sym)
flags[3] = int(self.conf.verbose)
idxs = nm.memmap(tmpfile('idxs.array'), dtype='int32',
mode='w+', shape=(2, nz))
idxs[0, :] = rr
idxs[1, :] = cc
dtype = {0: 'float64', 1: 'complex128'}[flags[1]]
vals_mtx = nm.memmap(tmpfile('vals_mtx.array'), dtype=dtype,
mode='w+', shape=(nz,))
vals_rhs = nm.memmap(tmpfile('vals_rhs.array'), dtype=dtype,
mode='w+', shape=(n,))
vals_mtx[:] = data
vals_rhs[:] = rhs
mumps_call = op.join(data_dir, 'sfepy', 'solvers',
'ls_mumps_parallel.py')
comm = MPI.COMM_SELF.Spawn(sys.executable, args=[mumps_call],
maxprocs=self.number_of_cpu)
comm.Disconnect()
out = nm.memmap(tmpfile('vals_x.array'), dtype=dtype, mode='r')
return out
class SchurMumps(MUMPSSolver):
r"""
Mumps Schur complement solver.
"""
name = 'ls.schur_mumps'
_parameters = MUMPSSolver._parameters + [
('schur_variables', 'list', None, True,
'The list of Schur variables.'),
]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import scipy.linalg as sla
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
system = 'complex' if mtx.dtype.name.startswith('complex') else 'real'
self.mumps_ls = self.mumps.MumpsSolver(system=system)
if self.conf.verbose:
self.mumps_ls.set_verbose()
schur_list = []
for schur_var in conf.schur_variables:
slc = self.context.equations.variables.adi.indx[schur_var]
schur_list.append(nm.arange(slc.start, slc.stop, slc.step, dtype='i'))
self.mumps_ls.set_mtx_centralized(mtx)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
S, y2 = self.mumps_ls.get_schur(nm.hstack(schur_list))
x2 = sla.solve(S.T, y2) # solve the dense Schur system using scipy.linalg
return self.mumps_ls.expand_schur(x2)
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
_parameters = ScipyDirect._parameters + [
('others', 'list', None, True,
'The list of auxiliary problem definition files.'),
('coupling_variables', 'list', None, True,
'The list of coupling variables.'),
]
def __init__(self, conf, context=None, **kwargs):
ScipyDirect.__init__(self, conf, context=context, **kwargs)
def init_subproblems(self, conf, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
# init subproblems
problem = self.context
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in six.itervalues(self.adi_indx):
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = | get_standard_keywords() | sfepy.base.conf.get_standard_keywords |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = get_default(comm, self.comm)
self.fields = []
for key, rng in six.iteritems(field_ranges):
if isinstance(rng, slice):
rng = rng.start, rng.stop
size = rng[1] - rng[0]
field_is = self.petsc.IS().createStride(size, first=rng[0], step=1,
comm=comm)
self.fields.append((key, field_is))
def create_ksp(self, options=None, comm=None):
optDB = self.petsc.Options()
optDB['sub_pc_type'] = self.conf.sub_precond
if options is not None:
for key, val in six.iteritems(options):
optDB[key] = val
ksp = self.petsc.KSP()
ksp.create(comm)
ksp.setType(self.conf.method)
pc = ksp.getPC()
if self.conf.setup_precond is None:
pc.setType(self.conf.precond)
else:
pc.setType(pc.Type.PYTHON)
ksp.setFromOptions()
if (pc.type == 'fieldsplit'):
if self.fields is not None:
pc.setFieldSplitIS(*self.fields)
else:
msg = 'PETScKrylovSolver.set_field_split() has to be called!'
raise ValueError(msg)
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
return ksp
def create_petsc_matrix(self, mtx, comm=None):
if isinstance(mtx, self.petsc.Mat):
pmtx = mtx
else:
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat()
pmtx.createAIJ(mtx.shape, csr=(mtx.indptr, mtx.indices, mtx.data),
comm=comm)
return pmtx
@petsc_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None, context=None,
**kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if (not is_new) and self.ksp is not None:
ksp = self.ksp
pmtx = self.pmtx
else:
pmtx = self.create_petsc_matrix(mtx, comm=comm)
ksp = self.create_ksp(options=solver_kwargs, comm=comm)
ksp.setOperators(pmtx)
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d,
max_it=i_max)
setup_precond = self.conf.setup_precond
if setup_precond is not None:
ksp.pc.setPythonContext(setup_precond(mtx, context))
ksp.setFromOptions()
self.mtx_digest = mtx_digest
self.ksp = ksp
self.pmtx = pmtx
if isinstance(rhs, self.petsc.Vec):
prhs = rhs
else:
prhs = pmtx.getVecLeft()
prhs[...] = rhs
if x0 is not None:
if isinstance(x0, self.petsc.Vec):
psol = x0
else:
psol = pmtx.getVecRight()
psol[...] = x0
ksp.setInitialGuessNonzero(True)
else:
psol = pmtx.getVecRight()
ksp.setInitialGuessNonzero(False)
ksp.solve(prhs, psol)
output('%s(%s, %s/proc) convergence: %s (%s, %d iterations)'
% (ksp.getType(), ksp.getPC().getType(), self.conf.sub_precond,
ksp.reason, self.converged_reasons[ksp.reason],
ksp.getIterationNumber()),
verbose=conf.verbose)
if isinstance(rhs, self.petsc.Vec):
sol = psol
else:
sol = psol[...].copy()
return sol
class MUMPSSolver(LinearSolver):
"""
Interface to MUMPS solver.
"""
name = 'ls.mumps'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import sfepy.solvers.ls_mumps as mumps
self.mumps_ls = None
if not mumps.use_mpi:
raise AttributeError('No mpi4py found! Required by MUMPS solver.')
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if not self.mumps_presolved:
self.presolve(mtx, presolve_flag=conf.use_presolve)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
self.mumps_ls(3) # solve
return out
def presolve(self, mtx, presolve_flag=False):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
if self.mumps_ls is None:
system = 'complex' if mtx.dtype.name.startswith('complex')\
else 'real'
is_sym = self.mumps.coo_is_symmetric(mtx)
mem_relax = self.conf.memory_relaxation
self.mumps_ls = self.mumps.MumpsSolver(system=system,
is_sym=is_sym,
mem_relax=mem_relax)
if is_new:
if self.conf.verbose:
self.mumps_ls.set_verbose()
self.mumps_ls.set_mtx_centralized(mtx)
self.mumps_ls(4) # analyze + factorize
if presolve_flag:
self.mumps_presolved = True
self.mtx_digest = mtx_digest
def __del__(self):
if self.mumps_ls is not None:
del(self.mumps_ls)
class MUMPSParallelSolver(LinearSolver):
"""
Interface to MUMPS parallel solver.
"""
name = 'ls.mumps_par'
_parameters = [
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import multiprocessing
import sfepy.solvers.ls_mumps as mumps
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
number_of_cpu=multiprocessing.cpu_count(),
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
from mpi4py import MPI
import sys
from sfepy import data_dir
import os.path as op
from tempfile import gettempdir
def tmpfile(fname):
return op.join(gettempdir(), fname)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
is_sym = self.mumps.coo_is_symmetric(mtx)
rr, cc, data = mtx.row + 1, mtx.col + 1, mtx.data
if is_sym:
idxs = nm.where(cc >= rr)[0] # upper triangular matrix
rr, cc, data = rr[idxs], cc[idxs], data[idxs]
n = mtx.shape[0]
nz = rr.shape[0]
flags = nm.memmap(tmpfile('vals_flags.array'), dtype='int32',
mode='w+', shape=(4,))
flags[0] = n
flags[1] = 1 if data.dtype.name.startswith('complex') else 0
flags[2] = int(is_sym)
flags[3] = int(self.conf.verbose)
idxs = nm.memmap(tmpfile('idxs.array'), dtype='int32',
mode='w+', shape=(2, nz))
idxs[0, :] = rr
idxs[1, :] = cc
dtype = {0: 'float64', 1: 'complex128'}[flags[1]]
vals_mtx = nm.memmap(tmpfile('vals_mtx.array'), dtype=dtype,
mode='w+', shape=(nz,))
vals_rhs = nm.memmap(tmpfile('vals_rhs.array'), dtype=dtype,
mode='w+', shape=(n,))
vals_mtx[:] = data
vals_rhs[:] = rhs
mumps_call = op.join(data_dir, 'sfepy', 'solvers',
'ls_mumps_parallel.py')
comm = MPI.COMM_SELF.Spawn(sys.executable, args=[mumps_call],
maxprocs=self.number_of_cpu)
comm.Disconnect()
out = nm.memmap(tmpfile('vals_x.array'), dtype=dtype, mode='r')
return out
class SchurMumps(MUMPSSolver):
r"""
Mumps Schur complement solver.
"""
name = 'ls.schur_mumps'
_parameters = MUMPSSolver._parameters + [
('schur_variables', 'list', None, True,
'The list of Schur variables.'),
]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import scipy.linalg as sla
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
system = 'complex' if mtx.dtype.name.startswith('complex') else 'real'
self.mumps_ls = self.mumps.MumpsSolver(system=system)
if self.conf.verbose:
self.mumps_ls.set_verbose()
schur_list = []
for schur_var in conf.schur_variables:
slc = self.context.equations.variables.adi.indx[schur_var]
schur_list.append(nm.arange(slc.start, slc.stop, slc.step, dtype='i'))
self.mumps_ls.set_mtx_centralized(mtx)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
S, y2 = self.mumps_ls.get_schur(nm.hstack(schur_list))
x2 = sla.solve(S.T, y2) # solve the dense Schur system using scipy.linalg
return self.mumps_ls.expand_schur(x2)
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
_parameters = ScipyDirect._parameters + [
('others', 'list', None, True,
'The list of auxiliary problem definition files.'),
('coupling_variables', 'list', None, True,
'The list of coupling variables.'),
]
def __init__(self, conf, context=None, **kwargs):
ScipyDirect.__init__(self, conf, context=context, **kwargs)
def init_subproblems(self, conf, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
# init subproblems
problem = self.context
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in six.itervalues(self.adi_indx):
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = get_standard_keywords()
master_prefix = | output.get_output_prefix() | sfepy.base.base.output.get_output_prefix |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
| assert_(x0.shape[0] == rhs.shape[0]) | sfepy.base.base.assert_ |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
| assert_(xshape[0] == rshape[0]) | sfepy.base.base.assert_ |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
| output(msg, verbose=conf.verbose > 1) | sfepy.base.base.output |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
| output(msg, verbose=conf.verbose > 1) | sfepy.base.base.output |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
| output(msg, verbose=conf.verbose > 1) | sfepy.base.base.output |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = get_default(comm, self.comm)
self.fields = []
for key, rng in six.iteritems(field_ranges):
if isinstance(rng, slice):
rng = rng.start, rng.stop
size = rng[1] - rng[0]
field_is = self.petsc.IS().createStride(size, first=rng[0], step=1,
comm=comm)
self.fields.append((key, field_is))
def create_ksp(self, options=None, comm=None):
optDB = self.petsc.Options()
optDB['sub_pc_type'] = self.conf.sub_precond
if options is not None:
for key, val in six.iteritems(options):
optDB[key] = val
ksp = self.petsc.KSP()
ksp.create(comm)
ksp.setType(self.conf.method)
pc = ksp.getPC()
if self.conf.setup_precond is None:
pc.setType(self.conf.precond)
else:
pc.setType(pc.Type.PYTHON)
ksp.setFromOptions()
if (pc.type == 'fieldsplit'):
if self.fields is not None:
pc.setFieldSplitIS(*self.fields)
else:
msg = 'PETScKrylovSolver.set_field_split() has to be called!'
raise ValueError(msg)
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
return ksp
def create_petsc_matrix(self, mtx, comm=None):
if isinstance(mtx, self.petsc.Mat):
pmtx = mtx
else:
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat()
pmtx.createAIJ(mtx.shape, csr=(mtx.indptr, mtx.indices, mtx.data),
comm=comm)
return pmtx
@petsc_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None, context=None,
**kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if (not is_new) and self.ksp is not None:
ksp = self.ksp
pmtx = self.pmtx
else:
pmtx = self.create_petsc_matrix(mtx, comm=comm)
ksp = self.create_ksp(options=solver_kwargs, comm=comm)
ksp.setOperators(pmtx)
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d,
max_it=i_max)
setup_precond = self.conf.setup_precond
if setup_precond is not None:
ksp.pc.setPythonContext(setup_precond(mtx, context))
ksp.setFromOptions()
self.mtx_digest = mtx_digest
self.ksp = ksp
self.pmtx = pmtx
if isinstance(rhs, self.petsc.Vec):
prhs = rhs
else:
prhs = pmtx.getVecLeft()
prhs[...] = rhs
if x0 is not None:
if isinstance(x0, self.petsc.Vec):
psol = x0
else:
psol = pmtx.getVecRight()
psol[...] = x0
ksp.setInitialGuessNonzero(True)
else:
psol = pmtx.getVecRight()
ksp.setInitialGuessNonzero(False)
ksp.solve(prhs, psol)
output('%s(%s, %s/proc) convergence: %s (%s, %d iterations)'
% (ksp.getType(), ksp.getPC().getType(), self.conf.sub_precond,
ksp.reason, self.converged_reasons[ksp.reason],
ksp.getIterationNumber()),
verbose=conf.verbose)
if isinstance(rhs, self.petsc.Vec):
sol = psol
else:
sol = psol[...].copy()
return sol
class MUMPSSolver(LinearSolver):
"""
Interface to MUMPS solver.
"""
name = 'ls.mumps'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import sfepy.solvers.ls_mumps as mumps
self.mumps_ls = None
if not mumps.use_mpi:
raise AttributeError('No mpi4py found! Required by MUMPS solver.')
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if not self.mumps_presolved:
self.presolve(mtx, presolve_flag=conf.use_presolve)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
self.mumps_ls(3) # solve
return out
def presolve(self, mtx, presolve_flag=False):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
if self.mumps_ls is None:
system = 'complex' if mtx.dtype.name.startswith('complex')\
else 'real'
is_sym = self.mumps.coo_is_symmetric(mtx)
mem_relax = self.conf.memory_relaxation
self.mumps_ls = self.mumps.MumpsSolver(system=system,
is_sym=is_sym,
mem_relax=mem_relax)
if is_new:
if self.conf.verbose:
self.mumps_ls.set_verbose()
self.mumps_ls.set_mtx_centralized(mtx)
self.mumps_ls(4) # analyze + factorize
if presolve_flag:
self.mumps_presolved = True
self.mtx_digest = mtx_digest
def __del__(self):
if self.mumps_ls is not None:
del(self.mumps_ls)
class MUMPSParallelSolver(LinearSolver):
"""
Interface to MUMPS parallel solver.
"""
name = 'ls.mumps_par'
_parameters = [
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import multiprocessing
import sfepy.solvers.ls_mumps as mumps
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
number_of_cpu=multiprocessing.cpu_count(),
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
from mpi4py import MPI
import sys
from sfepy import data_dir
import os.path as op
from tempfile import gettempdir
def tmpfile(fname):
return op.join(gettempdir(), fname)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
is_sym = self.mumps.coo_is_symmetric(mtx)
rr, cc, data = mtx.row + 1, mtx.col + 1, mtx.data
if is_sym:
idxs = nm.where(cc >= rr)[0] # upper triangular matrix
rr, cc, data = rr[idxs], cc[idxs], data[idxs]
n = mtx.shape[0]
nz = rr.shape[0]
flags = nm.memmap(tmpfile('vals_flags.array'), dtype='int32',
mode='w+', shape=(4,))
flags[0] = n
flags[1] = 1 if data.dtype.name.startswith('complex') else 0
flags[2] = int(is_sym)
flags[3] = int(self.conf.verbose)
idxs = nm.memmap(tmpfile('idxs.array'), dtype='int32',
mode='w+', shape=(2, nz))
idxs[0, :] = rr
idxs[1, :] = cc
dtype = {0: 'float64', 1: 'complex128'}[flags[1]]
vals_mtx = nm.memmap(tmpfile('vals_mtx.array'), dtype=dtype,
mode='w+', shape=(nz,))
vals_rhs = nm.memmap(tmpfile('vals_rhs.array'), dtype=dtype,
mode='w+', shape=(n,))
vals_mtx[:] = data
vals_rhs[:] = rhs
mumps_call = op.join(data_dir, 'sfepy', 'solvers',
'ls_mumps_parallel.py')
comm = MPI.COMM_SELF.Spawn(sys.executable, args=[mumps_call],
maxprocs=self.number_of_cpu)
comm.Disconnect()
out = nm.memmap(tmpfile('vals_x.array'), dtype=dtype, mode='r')
return out
class SchurMumps(MUMPSSolver):
r"""
Mumps Schur complement solver.
"""
name = 'ls.schur_mumps'
_parameters = MUMPSSolver._parameters + [
('schur_variables', 'list', None, True,
'The list of Schur variables.'),
]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import scipy.linalg as sla
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
system = 'complex' if mtx.dtype.name.startswith('complex') else 'real'
self.mumps_ls = self.mumps.MumpsSolver(system=system)
if self.conf.verbose:
self.mumps_ls.set_verbose()
schur_list = []
for schur_var in conf.schur_variables:
slc = self.context.equations.variables.adi.indx[schur_var]
schur_list.append(nm.arange(slc.start, slc.stop, slc.step, dtype='i'))
self.mumps_ls.set_mtx_centralized(mtx)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
S, y2 = self.mumps_ls.get_schur(nm.hstack(schur_list))
x2 = sla.solve(S.T, y2) # solve the dense Schur system using scipy.linalg
return self.mumps_ls.expand_schur(x2)
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
_parameters = ScipyDirect._parameters + [
('others', 'list', None, True,
'The list of auxiliary problem definition files.'),
('coupling_variables', 'list', None, True,
'The list of coupling variables.'),
]
def __init__(self, conf, context=None, **kwargs):
ScipyDirect.__init__(self, conf, context=context, **kwargs)
def init_subproblems(self, conf, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
# init subproblems
problem = self.context
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in six.itervalues(self.adi_indx):
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = get_standard_keywords()
master_prefix = output.get_output_prefix()
for ii, ifname in enumerate(conf.others):
sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
| output.set_output_prefix(sub_prefix) | sfepy.base.base.output.set_output_prefix |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = get_default(comm, self.comm)
self.fields = []
for key, rng in six.iteritems(field_ranges):
if isinstance(rng, slice):
rng = rng.start, rng.stop
size = rng[1] - rng[0]
field_is = self.petsc.IS().createStride(size, first=rng[0], step=1,
comm=comm)
self.fields.append((key, field_is))
def create_ksp(self, options=None, comm=None):
optDB = self.petsc.Options()
optDB['sub_pc_type'] = self.conf.sub_precond
if options is not None:
for key, val in six.iteritems(options):
optDB[key] = val
ksp = self.petsc.KSP()
ksp.create(comm)
ksp.setType(self.conf.method)
pc = ksp.getPC()
if self.conf.setup_precond is None:
pc.setType(self.conf.precond)
else:
pc.setType(pc.Type.PYTHON)
ksp.setFromOptions()
if (pc.type == 'fieldsplit'):
if self.fields is not None:
pc.setFieldSplitIS(*self.fields)
else:
msg = 'PETScKrylovSolver.set_field_split() has to be called!'
raise ValueError(msg)
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
return ksp
def create_petsc_matrix(self, mtx, comm=None):
if isinstance(mtx, self.petsc.Mat):
pmtx = mtx
else:
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat()
pmtx.createAIJ(mtx.shape, csr=(mtx.indptr, mtx.indices, mtx.data),
comm=comm)
return pmtx
@petsc_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None, context=None,
**kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if (not is_new) and self.ksp is not None:
ksp = self.ksp
pmtx = self.pmtx
else:
pmtx = self.create_petsc_matrix(mtx, comm=comm)
ksp = self.create_ksp(options=solver_kwargs, comm=comm)
ksp.setOperators(pmtx)
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d,
max_it=i_max)
setup_precond = self.conf.setup_precond
if setup_precond is not None:
ksp.pc.setPythonContext(setup_precond(mtx, context))
ksp.setFromOptions()
self.mtx_digest = mtx_digest
self.ksp = ksp
self.pmtx = pmtx
if isinstance(rhs, self.petsc.Vec):
prhs = rhs
else:
prhs = pmtx.getVecLeft()
prhs[...] = rhs
if x0 is not None:
if isinstance(x0, self.petsc.Vec):
psol = x0
else:
psol = pmtx.getVecRight()
psol[...] = x0
ksp.setInitialGuessNonzero(True)
else:
psol = pmtx.getVecRight()
ksp.setInitialGuessNonzero(False)
ksp.solve(prhs, psol)
output('%s(%s, %s/proc) convergence: %s (%s, %d iterations)'
% (ksp.getType(), ksp.getPC().getType(), self.conf.sub_precond,
ksp.reason, self.converged_reasons[ksp.reason],
ksp.getIterationNumber()),
verbose=conf.verbose)
if isinstance(rhs, self.petsc.Vec):
sol = psol
else:
sol = psol[...].copy()
return sol
class MUMPSSolver(LinearSolver):
"""
Interface to MUMPS solver.
"""
name = 'ls.mumps'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import sfepy.solvers.ls_mumps as mumps
self.mumps_ls = None
if not mumps.use_mpi:
raise AttributeError('No mpi4py found! Required by MUMPS solver.')
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if not self.mumps_presolved:
self.presolve(mtx, presolve_flag=conf.use_presolve)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
self.mumps_ls(3) # solve
return out
def presolve(self, mtx, presolve_flag=False):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
if self.mumps_ls is None:
system = 'complex' if mtx.dtype.name.startswith('complex')\
else 'real'
is_sym = self.mumps.coo_is_symmetric(mtx)
mem_relax = self.conf.memory_relaxation
self.mumps_ls = self.mumps.MumpsSolver(system=system,
is_sym=is_sym,
mem_relax=mem_relax)
if is_new:
if self.conf.verbose:
self.mumps_ls.set_verbose()
self.mumps_ls.set_mtx_centralized(mtx)
self.mumps_ls(4) # analyze + factorize
if presolve_flag:
self.mumps_presolved = True
self.mtx_digest = mtx_digest
def __del__(self):
if self.mumps_ls is not None:
del(self.mumps_ls)
class MUMPSParallelSolver(LinearSolver):
"""
Interface to MUMPS parallel solver.
"""
name = 'ls.mumps_par'
_parameters = [
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import multiprocessing
import sfepy.solvers.ls_mumps as mumps
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
number_of_cpu=multiprocessing.cpu_count(),
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
from mpi4py import MPI
import sys
from sfepy import data_dir
import os.path as op
from tempfile import gettempdir
def tmpfile(fname):
return op.join(gettempdir(), fname)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
is_sym = self.mumps.coo_is_symmetric(mtx)
rr, cc, data = mtx.row + 1, mtx.col + 1, mtx.data
if is_sym:
idxs = nm.where(cc >= rr)[0] # upper triangular matrix
rr, cc, data = rr[idxs], cc[idxs], data[idxs]
n = mtx.shape[0]
nz = rr.shape[0]
flags = nm.memmap(tmpfile('vals_flags.array'), dtype='int32',
mode='w+', shape=(4,))
flags[0] = n
flags[1] = 1 if data.dtype.name.startswith('complex') else 0
flags[2] = int(is_sym)
flags[3] = int(self.conf.verbose)
idxs = nm.memmap(tmpfile('idxs.array'), dtype='int32',
mode='w+', shape=(2, nz))
idxs[0, :] = rr
idxs[1, :] = cc
dtype = {0: 'float64', 1: 'complex128'}[flags[1]]
vals_mtx = nm.memmap(tmpfile('vals_mtx.array'), dtype=dtype,
mode='w+', shape=(nz,))
vals_rhs = nm.memmap(tmpfile('vals_rhs.array'), dtype=dtype,
mode='w+', shape=(n,))
vals_mtx[:] = data
vals_rhs[:] = rhs
mumps_call = op.join(data_dir, 'sfepy', 'solvers',
'ls_mumps_parallel.py')
comm = MPI.COMM_SELF.Spawn(sys.executable, args=[mumps_call],
maxprocs=self.number_of_cpu)
comm.Disconnect()
out = nm.memmap(tmpfile('vals_x.array'), dtype=dtype, mode='r')
return out
class SchurMumps(MUMPSSolver):
r"""
Mumps Schur complement solver.
"""
name = 'ls.schur_mumps'
_parameters = MUMPSSolver._parameters + [
('schur_variables', 'list', None, True,
'The list of Schur variables.'),
]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import scipy.linalg as sla
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
system = 'complex' if mtx.dtype.name.startswith('complex') else 'real'
self.mumps_ls = self.mumps.MumpsSolver(system=system)
if self.conf.verbose:
self.mumps_ls.set_verbose()
schur_list = []
for schur_var in conf.schur_variables:
slc = self.context.equations.variables.adi.indx[schur_var]
schur_list.append(nm.arange(slc.start, slc.stop, slc.step, dtype='i'))
self.mumps_ls.set_mtx_centralized(mtx)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
S, y2 = self.mumps_ls.get_schur(nm.hstack(schur_list))
x2 = sla.solve(S.T, y2) # solve the dense Schur system using scipy.linalg
return self.mumps_ls.expand_schur(x2)
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
_parameters = ScipyDirect._parameters + [
('others', 'list', None, True,
'The list of auxiliary problem definition files.'),
('coupling_variables', 'list', None, True,
'The list of coupling variables.'),
]
def __init__(self, conf, context=None, **kwargs):
ScipyDirect.__init__(self, conf, context=context, **kwargs)
def init_subproblems(self, conf, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
# init subproblems
problem = self.context
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in six.itervalues(self.adi_indx):
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = get_standard_keywords()
master_prefix = output.get_output_prefix()
for ii, ifname in enumerate(conf.others):
sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
output.set_output_prefix(sub_prefix)
kwargs['master_problem'] = problem
confi = ProblemConf.from_file(ifname, required, other,
define_args=kwargs)
pbi = | Problem.from_conf(confi, init_equations=True) | sfepy.discrete.Problem.from_conf |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = get_default(comm, self.comm)
self.fields = []
for key, rng in six.iteritems(field_ranges):
if isinstance(rng, slice):
rng = rng.start, rng.stop
size = rng[1] - rng[0]
field_is = self.petsc.IS().createStride(size, first=rng[0], step=1,
comm=comm)
self.fields.append((key, field_is))
def create_ksp(self, options=None, comm=None):
optDB = self.petsc.Options()
optDB['sub_pc_type'] = self.conf.sub_precond
if options is not None:
for key, val in six.iteritems(options):
optDB[key] = val
ksp = self.petsc.KSP()
ksp.create(comm)
ksp.setType(self.conf.method)
pc = ksp.getPC()
if self.conf.setup_precond is None:
pc.setType(self.conf.precond)
else:
pc.setType(pc.Type.PYTHON)
ksp.setFromOptions()
if (pc.type == 'fieldsplit'):
if self.fields is not None:
pc.setFieldSplitIS(*self.fields)
else:
msg = 'PETScKrylovSolver.set_field_split() has to be called!'
raise ValueError(msg)
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
return ksp
def create_petsc_matrix(self, mtx, comm=None):
if isinstance(mtx, self.petsc.Mat):
pmtx = mtx
else:
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat()
pmtx.createAIJ(mtx.shape, csr=(mtx.indptr, mtx.indices, mtx.data),
comm=comm)
return pmtx
@petsc_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None, context=None,
**kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if (not is_new) and self.ksp is not None:
ksp = self.ksp
pmtx = self.pmtx
else:
pmtx = self.create_petsc_matrix(mtx, comm=comm)
ksp = self.create_ksp(options=solver_kwargs, comm=comm)
ksp.setOperators(pmtx)
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d,
max_it=i_max)
setup_precond = self.conf.setup_precond
if setup_precond is not None:
ksp.pc.setPythonContext(setup_precond(mtx, context))
ksp.setFromOptions()
self.mtx_digest = mtx_digest
self.ksp = ksp
self.pmtx = pmtx
if isinstance(rhs, self.petsc.Vec):
prhs = rhs
else:
prhs = pmtx.getVecLeft()
prhs[...] = rhs
if x0 is not None:
if isinstance(x0, self.petsc.Vec):
psol = x0
else:
psol = pmtx.getVecRight()
psol[...] = x0
ksp.setInitialGuessNonzero(True)
else:
psol = pmtx.getVecRight()
ksp.setInitialGuessNonzero(False)
ksp.solve(prhs, psol)
output('%s(%s, %s/proc) convergence: %s (%s, %d iterations)'
% (ksp.getType(), ksp.getPC().getType(), self.conf.sub_precond,
ksp.reason, self.converged_reasons[ksp.reason],
ksp.getIterationNumber()),
verbose=conf.verbose)
if isinstance(rhs, self.petsc.Vec):
sol = psol
else:
sol = psol[...].copy()
return sol
class MUMPSSolver(LinearSolver):
"""
Interface to MUMPS solver.
"""
name = 'ls.mumps'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import sfepy.solvers.ls_mumps as mumps
self.mumps_ls = None
if not mumps.use_mpi:
raise AttributeError('No mpi4py found! Required by MUMPS solver.')
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if not self.mumps_presolved:
self.presolve(mtx, presolve_flag=conf.use_presolve)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
self.mumps_ls(3) # solve
return out
def presolve(self, mtx, presolve_flag=False):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
if self.mumps_ls is None:
system = 'complex' if mtx.dtype.name.startswith('complex')\
else 'real'
is_sym = self.mumps.coo_is_symmetric(mtx)
mem_relax = self.conf.memory_relaxation
self.mumps_ls = self.mumps.MumpsSolver(system=system,
is_sym=is_sym,
mem_relax=mem_relax)
if is_new:
if self.conf.verbose:
self.mumps_ls.set_verbose()
self.mumps_ls.set_mtx_centralized(mtx)
self.mumps_ls(4) # analyze + factorize
if presolve_flag:
self.mumps_presolved = True
self.mtx_digest = mtx_digest
def __del__(self):
if self.mumps_ls is not None:
del(self.mumps_ls)
class MUMPSParallelSolver(LinearSolver):
"""
Interface to MUMPS parallel solver.
"""
name = 'ls.mumps_par'
_parameters = [
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import multiprocessing
import sfepy.solvers.ls_mumps as mumps
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
number_of_cpu=multiprocessing.cpu_count(),
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
from mpi4py import MPI
import sys
from sfepy import data_dir
import os.path as op
from tempfile import gettempdir
def tmpfile(fname):
return op.join(gettempdir(), fname)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
is_sym = self.mumps.coo_is_symmetric(mtx)
rr, cc, data = mtx.row + 1, mtx.col + 1, mtx.data
if is_sym:
idxs = nm.where(cc >= rr)[0] # upper triangular matrix
rr, cc, data = rr[idxs], cc[idxs], data[idxs]
n = mtx.shape[0]
nz = rr.shape[0]
flags = nm.memmap(tmpfile('vals_flags.array'), dtype='int32',
mode='w+', shape=(4,))
flags[0] = n
flags[1] = 1 if data.dtype.name.startswith('complex') else 0
flags[2] = int(is_sym)
flags[3] = int(self.conf.verbose)
idxs = nm.memmap(tmpfile('idxs.array'), dtype='int32',
mode='w+', shape=(2, nz))
idxs[0, :] = rr
idxs[1, :] = cc
dtype = {0: 'float64', 1: 'complex128'}[flags[1]]
vals_mtx = nm.memmap(tmpfile('vals_mtx.array'), dtype=dtype,
mode='w+', shape=(nz,))
vals_rhs = nm.memmap(tmpfile('vals_rhs.array'), dtype=dtype,
mode='w+', shape=(n,))
vals_mtx[:] = data
vals_rhs[:] = rhs
mumps_call = op.join(data_dir, 'sfepy', 'solvers',
'ls_mumps_parallel.py')
comm = MPI.COMM_SELF.Spawn(sys.executable, args=[mumps_call],
maxprocs=self.number_of_cpu)
comm.Disconnect()
out = nm.memmap(tmpfile('vals_x.array'), dtype=dtype, mode='r')
return out
class SchurMumps(MUMPSSolver):
r"""
Mumps Schur complement solver.
"""
name = 'ls.schur_mumps'
_parameters = MUMPSSolver._parameters + [
('schur_variables', 'list', None, True,
'The list of Schur variables.'),
]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import scipy.linalg as sla
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
system = 'complex' if mtx.dtype.name.startswith('complex') else 'real'
self.mumps_ls = self.mumps.MumpsSolver(system=system)
if self.conf.verbose:
self.mumps_ls.set_verbose()
schur_list = []
for schur_var in conf.schur_variables:
slc = self.context.equations.variables.adi.indx[schur_var]
schur_list.append(nm.arange(slc.start, slc.stop, slc.step, dtype='i'))
self.mumps_ls.set_mtx_centralized(mtx)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
S, y2 = self.mumps_ls.get_schur(nm.hstack(schur_list))
x2 = sla.solve(S.T, y2) # solve the dense Schur system using scipy.linalg
return self.mumps_ls.expand_schur(x2)
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
_parameters = ScipyDirect._parameters + [
('others', 'list', None, True,
'The list of auxiliary problem definition files.'),
('coupling_variables', 'list', None, True,
'The list of coupling variables.'),
]
def __init__(self, conf, context=None, **kwargs):
ScipyDirect.__init__(self, conf, context=context, **kwargs)
def init_subproblems(self, conf, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
# init subproblems
problem = self.context
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in six.itervalues(self.adi_indx):
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = get_standard_keywords()
master_prefix = output.get_output_prefix()
for ii, ifname in enumerate(conf.others):
sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
output.set_output_prefix(sub_prefix)
kwargs['master_problem'] = problem
confi = ProblemConf.from_file(ifname, required, other,
define_args=kwargs)
pbi = Problem.from_conf(confi, init_equations=True)
sti = | State(pbi.equations.variables) | sfepy.discrete.state.State |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = get_default(comm, self.comm)
self.fields = []
for key, rng in six.iteritems(field_ranges):
if isinstance(rng, slice):
rng = rng.start, rng.stop
size = rng[1] - rng[0]
field_is = self.petsc.IS().createStride(size, first=rng[0], step=1,
comm=comm)
self.fields.append((key, field_is))
def create_ksp(self, options=None, comm=None):
optDB = self.petsc.Options()
optDB['sub_pc_type'] = self.conf.sub_precond
if options is not None:
for key, val in six.iteritems(options):
optDB[key] = val
ksp = self.petsc.KSP()
ksp.create(comm)
ksp.setType(self.conf.method)
pc = ksp.getPC()
if self.conf.setup_precond is None:
pc.setType(self.conf.precond)
else:
pc.setType(pc.Type.PYTHON)
ksp.setFromOptions()
if (pc.type == 'fieldsplit'):
if self.fields is not None:
pc.setFieldSplitIS(*self.fields)
else:
msg = 'PETScKrylovSolver.set_field_split() has to be called!'
raise ValueError(msg)
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
return ksp
def create_petsc_matrix(self, mtx, comm=None):
if isinstance(mtx, self.petsc.Mat):
pmtx = mtx
else:
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat()
pmtx.createAIJ(mtx.shape, csr=(mtx.indptr, mtx.indices, mtx.data),
comm=comm)
return pmtx
@petsc_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None, context=None,
**kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if (not is_new) and self.ksp is not None:
ksp = self.ksp
pmtx = self.pmtx
else:
pmtx = self.create_petsc_matrix(mtx, comm=comm)
ksp = self.create_ksp(options=solver_kwargs, comm=comm)
ksp.setOperators(pmtx)
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d,
max_it=i_max)
setup_precond = self.conf.setup_precond
if setup_precond is not None:
ksp.pc.setPythonContext(setup_precond(mtx, context))
ksp.setFromOptions()
self.mtx_digest = mtx_digest
self.ksp = ksp
self.pmtx = pmtx
if isinstance(rhs, self.petsc.Vec):
prhs = rhs
else:
prhs = pmtx.getVecLeft()
prhs[...] = rhs
if x0 is not None:
if isinstance(x0, self.petsc.Vec):
psol = x0
else:
psol = pmtx.getVecRight()
psol[...] = x0
ksp.setInitialGuessNonzero(True)
else:
psol = pmtx.getVecRight()
ksp.setInitialGuessNonzero(False)
ksp.solve(prhs, psol)
output('%s(%s, %s/proc) convergence: %s (%s, %d iterations)'
% (ksp.getType(), ksp.getPC().getType(), self.conf.sub_precond,
ksp.reason, self.converged_reasons[ksp.reason],
ksp.getIterationNumber()),
verbose=conf.verbose)
if isinstance(rhs, self.petsc.Vec):
sol = psol
else:
sol = psol[...].copy()
return sol
class MUMPSSolver(LinearSolver):
"""
Interface to MUMPS solver.
"""
name = 'ls.mumps'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import sfepy.solvers.ls_mumps as mumps
self.mumps_ls = None
if not mumps.use_mpi:
raise AttributeError('No mpi4py found! Required by MUMPS solver.')
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if not self.mumps_presolved:
self.presolve(mtx, presolve_flag=conf.use_presolve)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
self.mumps_ls(3) # solve
return out
def presolve(self, mtx, presolve_flag=False):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
if self.mumps_ls is None:
system = 'complex' if mtx.dtype.name.startswith('complex')\
else 'real'
is_sym = self.mumps.coo_is_symmetric(mtx)
mem_relax = self.conf.memory_relaxation
self.mumps_ls = self.mumps.MumpsSolver(system=system,
is_sym=is_sym,
mem_relax=mem_relax)
if is_new:
if self.conf.verbose:
self.mumps_ls.set_verbose()
self.mumps_ls.set_mtx_centralized(mtx)
self.mumps_ls(4) # analyze + factorize
if presolve_flag:
self.mumps_presolved = True
self.mtx_digest = mtx_digest
def __del__(self):
if self.mumps_ls is not None:
del(self.mumps_ls)
class MUMPSParallelSolver(LinearSolver):
"""
Interface to MUMPS parallel solver.
"""
name = 'ls.mumps_par'
_parameters = [
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import multiprocessing
import sfepy.solvers.ls_mumps as mumps
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
number_of_cpu=multiprocessing.cpu_count(),
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
from mpi4py import MPI
import sys
from sfepy import data_dir
import os.path as op
from tempfile import gettempdir
def tmpfile(fname):
return op.join(gettempdir(), fname)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
is_sym = self.mumps.coo_is_symmetric(mtx)
rr, cc, data = mtx.row + 1, mtx.col + 1, mtx.data
if is_sym:
idxs = nm.where(cc >= rr)[0] # upper triangular matrix
rr, cc, data = rr[idxs], cc[idxs], data[idxs]
n = mtx.shape[0]
nz = rr.shape[0]
flags = nm.memmap(tmpfile('vals_flags.array'), dtype='int32',
mode='w+', shape=(4,))
flags[0] = n
flags[1] = 1 if data.dtype.name.startswith('complex') else 0
flags[2] = int(is_sym)
flags[3] = int(self.conf.verbose)
idxs = nm.memmap(tmpfile('idxs.array'), dtype='int32',
mode='w+', shape=(2, nz))
idxs[0, :] = rr
idxs[1, :] = cc
dtype = {0: 'float64', 1: 'complex128'}[flags[1]]
vals_mtx = nm.memmap(tmpfile('vals_mtx.array'), dtype=dtype,
mode='w+', shape=(nz,))
vals_rhs = nm.memmap(tmpfile('vals_rhs.array'), dtype=dtype,
mode='w+', shape=(n,))
vals_mtx[:] = data
vals_rhs[:] = rhs
mumps_call = op.join(data_dir, 'sfepy', 'solvers',
'ls_mumps_parallel.py')
comm = MPI.COMM_SELF.Spawn(sys.executable, args=[mumps_call],
maxprocs=self.number_of_cpu)
comm.Disconnect()
out = nm.memmap(tmpfile('vals_x.array'), dtype=dtype, mode='r')
return out
class SchurMumps(MUMPSSolver):
r"""
Mumps Schur complement solver.
"""
name = 'ls.schur_mumps'
_parameters = MUMPSSolver._parameters + [
('schur_variables', 'list', None, True,
'The list of Schur variables.'),
]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import scipy.linalg as sla
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
system = 'complex' if mtx.dtype.name.startswith('complex') else 'real'
self.mumps_ls = self.mumps.MumpsSolver(system=system)
if self.conf.verbose:
self.mumps_ls.set_verbose()
schur_list = []
for schur_var in conf.schur_variables:
slc = self.context.equations.variables.adi.indx[schur_var]
schur_list.append(nm.arange(slc.start, slc.stop, slc.step, dtype='i'))
self.mumps_ls.set_mtx_centralized(mtx)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
S, y2 = self.mumps_ls.get_schur(nm.hstack(schur_list))
x2 = sla.solve(S.T, y2) # solve the dense Schur system using scipy.linalg
return self.mumps_ls.expand_schur(x2)
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
_parameters = ScipyDirect._parameters + [
('others', 'list', None, True,
'The list of auxiliary problem definition files.'),
('coupling_variables', 'list', None, True,
'The list of coupling variables.'),
]
def __init__(self, conf, context=None, **kwargs):
ScipyDirect.__init__(self, conf, context=context, **kwargs)
def init_subproblems(self, conf, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
# init subproblems
problem = self.context
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in six.itervalues(self.adi_indx):
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = get_standard_keywords()
master_prefix = output.get_output_prefix()
for ii, ifname in enumerate(conf.others):
sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
output.set_output_prefix(sub_prefix)
kwargs['master_problem'] = problem
confi = ProblemConf.from_file(ifname, required, other,
define_args=kwargs)
pbi = Problem.from_conf(confi, init_equations=True)
sti = State(pbi.equations.variables)
pbi.equations.set_data(None, ignore_unknown=True)
pbi.time_update()
pbi.update_materials()
sti.apply_ebc()
pbi_vars = pbi.get_variables()
| output.set_output_prefix(master_prefix) | sfepy.base.base.output.set_output_prefix |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
| output('scipy solver %s does not exist!' % self.conf.method) | sfepy.base.base.output |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
| output('using cg instead') | sfepy.base.base.output |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
| output('pyamg.%s does not exist!' % self.conf.method) | sfepy.base.base.output |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
| output('using pyamg.smoothed_aggregation_solver instead') | sfepy.base.base.output |
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
| output('pyamg.krylov.%s does not exist!' % self.conf.method) | sfepy.base.base.output |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, | UserMeshIO(mesh_hook) | sfepy.discrete.fem.meshio.UserMeshIO |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report('read dimension %d -> failed' % dim)
ok = False
else:
self.report('read dimension %d -> ok' % dim)
return ok
def test_write_read_meshes(self):
"""
Try to write and then read all supported formats.
"""
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import (supported_formats,
supported_capabilities)
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir
+ '/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
oks = []
for suffix, format_ in six.iteritems(supported_formats):
if isinstance(format_, tuple) or (format_ == 'xyz'):
continue
if 'w' not in supported_capabilities[format_]: continue
filename = op.join(self.options.out_dir, 'test_mesh_wr' + suffix)
self.report('%s format: %s' % (suffix, filename))
mesh0.write(filename, io='auto')
mesh1 = Mesh.from_file(filename)
oks.extend(self._compare_meshes(mesh0, mesh1))
return sum(oks) == len(oks)
def test_hdf5_meshio(self):
try:
from igakit import igalib
except ImportError:
self.report('hdf5_meshio not-tested (missing igalib module)!')
return True
import tempfile
import numpy as nm
import scipy.sparse as sps
from sfepy.discrete.fem.meshio import HDF5MeshIO
from sfepy.base.base import Struct
from sfepy.base.ioutils import Cached, Uncached, SoftLink, \
DataSoftLink
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.solvers.ts import TimeStepper
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir +
'/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
shape = [4, 4, 4]
dims = [5, 5, 5]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = | IGDomain('iga', nurbs, bmesh, regions=regions) | sfepy.discrete.iga.domain.IGDomain |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = | Mesh.from_file(filename, prefix_dir=conf_dir) | sfepy.discrete.fem.Mesh.from_file |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
| assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim]) | sfepy.base.base.assert_ |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = | MeshIO.any_from_filename(filename, prefix_dir=conf_dir) | sfepy.discrete.fem.MeshIO.any_from_filename |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report('read dimension %d -> failed' % dim)
ok = False
else:
self.report('read dimension %d -> ok' % dim)
return ok
def test_write_read_meshes(self):
"""
Try to write and then read all supported formats.
"""
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import (supported_formats,
supported_capabilities)
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir
+ '/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
oks = []
for suffix, format_ in six.iteritems(supported_formats):
if isinstance(format_, tuple) or (format_ == 'xyz'):
continue
if 'w' not in supported_capabilities[format_]: continue
filename = op.join(self.options.out_dir, 'test_mesh_wr' + suffix)
self.report('%s format: %s' % (suffix, filename))
mesh0.write(filename, io='auto')
mesh1 = | Mesh.from_file(filename) | sfepy.discrete.fem.Mesh.from_file |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report('read dimension %d -> failed' % dim)
ok = False
else:
self.report('read dimension %d -> ok' % dim)
return ok
def test_write_read_meshes(self):
"""
Try to write and then read all supported formats.
"""
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import (supported_formats,
supported_capabilities)
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir
+ '/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
oks = []
for suffix, format_ in six.iteritems(supported_formats):
if isinstance(format_, tuple) or (format_ == 'xyz'):
continue
if 'w' not in supported_capabilities[format_]: continue
filename = op.join(self.options.out_dir, 'test_mesh_wr' + suffix)
self.report('%s format: %s' % (suffix, filename))
mesh0.write(filename, io='auto')
mesh1 = Mesh.from_file(filename)
oks.extend(self._compare_meshes(mesh0, mesh1))
return sum(oks) == len(oks)
def test_hdf5_meshio(self):
try:
from igakit import igalib
except ImportError:
self.report('hdf5_meshio not-tested (missing igalib module)!')
return True
import tempfile
import numpy as nm
import scipy.sparse as sps
from sfepy.discrete.fem.meshio import HDF5MeshIO
from sfepy.base.base import Struct
from sfepy.base.ioutils import Cached, Uncached, SoftLink, \
DataSoftLink
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.solvers.ts import TimeStepper
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir +
'/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
shape = [4, 4, 4]
dims = [5, 5, 5]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
int_ar = nm.arange(4)
data = {
'list': range(4),
'mesh1': mesh0,
'mesh2': mesh0,
'mesh3': | Uncached(mesh0) | sfepy.base.ioutils.Uncached |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report('read dimension %d -> failed' % dim)
ok = False
else:
self.report('read dimension %d -> ok' % dim)
return ok
def test_write_read_meshes(self):
"""
Try to write and then read all supported formats.
"""
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import (supported_formats,
supported_capabilities)
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir
+ '/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
oks = []
for suffix, format_ in six.iteritems(supported_formats):
if isinstance(format_, tuple) or (format_ == 'xyz'):
continue
if 'w' not in supported_capabilities[format_]: continue
filename = op.join(self.options.out_dir, 'test_mesh_wr' + suffix)
self.report('%s format: %s' % (suffix, filename))
mesh0.write(filename, io='auto')
mesh1 = Mesh.from_file(filename)
oks.extend(self._compare_meshes(mesh0, mesh1))
return sum(oks) == len(oks)
def test_hdf5_meshio(self):
try:
from igakit import igalib
except ImportError:
self.report('hdf5_meshio not-tested (missing igalib module)!')
return True
import tempfile
import numpy as nm
import scipy.sparse as sps
from sfepy.discrete.fem.meshio import HDF5MeshIO
from sfepy.base.base import Struct
from sfepy.base.ioutils import Cached, Uncached, SoftLink, \
DataSoftLink
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.solvers.ts import TimeStepper
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir +
'/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
shape = [4, 4, 4]
dims = [5, 5, 5]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
int_ar = nm.arange(4)
data = {
'list': range(4),
'mesh1': mesh0,
'mesh2': mesh0,
'mesh3': Uncached(mesh0),
'mesh4': | SoftLink('/step0/__cdata/data/data/mesh2') | sfepy.base.ioutils.SoftLink |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report('read dimension %d -> failed' % dim)
ok = False
else:
self.report('read dimension %d -> ok' % dim)
return ok
def test_write_read_meshes(self):
"""
Try to write and then read all supported formats.
"""
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import (supported_formats,
supported_capabilities)
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir
+ '/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
oks = []
for suffix, format_ in six.iteritems(supported_formats):
if isinstance(format_, tuple) or (format_ == 'xyz'):
continue
if 'w' not in supported_capabilities[format_]: continue
filename = op.join(self.options.out_dir, 'test_mesh_wr' + suffix)
self.report('%s format: %s' % (suffix, filename))
mesh0.write(filename, io='auto')
mesh1 = Mesh.from_file(filename)
oks.extend(self._compare_meshes(mesh0, mesh1))
return sum(oks) == len(oks)
def test_hdf5_meshio(self):
try:
from igakit import igalib
except ImportError:
self.report('hdf5_meshio not-tested (missing igalib module)!')
return True
import tempfile
import numpy as nm
import scipy.sparse as sps
from sfepy.discrete.fem.meshio import HDF5MeshIO
from sfepy.base.base import Struct
from sfepy.base.ioutils import Cached, Uncached, SoftLink, \
DataSoftLink
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.solvers.ts import TimeStepper
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir +
'/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
shape = [4, 4, 4]
dims = [5, 5, 5]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
int_ar = nm.arange(4)
data = {
'list': range(4),
'mesh1': mesh0,
'mesh2': mesh0,
'mesh3': Uncached(mesh0),
'mesh4': SoftLink('/step0/__cdata/data/data/mesh2'),
'mesh5': | DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data') | sfepy.base.ioutils.DataSoftLink |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report('read dimension %d -> failed' % dim)
ok = False
else:
self.report('read dimension %d -> ok' % dim)
return ok
def test_write_read_meshes(self):
"""
Try to write and then read all supported formats.
"""
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import (supported_formats,
supported_capabilities)
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir
+ '/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
oks = []
for suffix, format_ in six.iteritems(supported_formats):
if isinstance(format_, tuple) or (format_ == 'xyz'):
continue
if 'w' not in supported_capabilities[format_]: continue
filename = op.join(self.options.out_dir, 'test_mesh_wr' + suffix)
self.report('%s format: %s' % (suffix, filename))
mesh0.write(filename, io='auto')
mesh1 = Mesh.from_file(filename)
oks.extend(self._compare_meshes(mesh0, mesh1))
return sum(oks) == len(oks)
def test_hdf5_meshio(self):
try:
from igakit import igalib
except ImportError:
self.report('hdf5_meshio not-tested (missing igalib module)!')
return True
import tempfile
import numpy as nm
import scipy.sparse as sps
from sfepy.discrete.fem.meshio import HDF5MeshIO
from sfepy.base.base import Struct
from sfepy.base.ioutils import Cached, Uncached, SoftLink, \
DataSoftLink
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.solvers.ts import TimeStepper
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir +
'/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
shape = [4, 4, 4]
dims = [5, 5, 5]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
int_ar = nm.arange(4)
data = {
'list': range(4),
'mesh1': mesh0,
'mesh2': mesh0,
'mesh3': Uncached(mesh0),
'mesh4': SoftLink('/step0/__cdata/data/data/mesh2'),
'mesh5': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data'),
'mesh6': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh2/data',
mesh0),
'mesh7': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data',
True),
'iga' : ig_domain,
'cached1': | Cached(1) | sfepy.base.ioutils.Cached |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report('read dimension %d -> failed' % dim)
ok = False
else:
self.report('read dimension %d -> ok' % dim)
return ok
def test_write_read_meshes(self):
"""
Try to write and then read all supported formats.
"""
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import (supported_formats,
supported_capabilities)
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir
+ '/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
oks = []
for suffix, format_ in six.iteritems(supported_formats):
if isinstance(format_, tuple) or (format_ == 'xyz'):
continue
if 'w' not in supported_capabilities[format_]: continue
filename = op.join(self.options.out_dir, 'test_mesh_wr' + suffix)
self.report('%s format: %s' % (suffix, filename))
mesh0.write(filename, io='auto')
mesh1 = Mesh.from_file(filename)
oks.extend(self._compare_meshes(mesh0, mesh1))
return sum(oks) == len(oks)
def test_hdf5_meshio(self):
try:
from igakit import igalib
except ImportError:
self.report('hdf5_meshio not-tested (missing igalib module)!')
return True
import tempfile
import numpy as nm
import scipy.sparse as sps
from sfepy.discrete.fem.meshio import HDF5MeshIO
from sfepy.base.base import Struct
from sfepy.base.ioutils import Cached, Uncached, SoftLink, \
DataSoftLink
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.solvers.ts import TimeStepper
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir +
'/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
shape = [4, 4, 4]
dims = [5, 5, 5]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
int_ar = nm.arange(4)
data = {
'list': range(4),
'mesh1': mesh0,
'mesh2': mesh0,
'mesh3': Uncached(mesh0),
'mesh4': SoftLink('/step0/__cdata/data/data/mesh2'),
'mesh5': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data'),
'mesh6': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh2/data',
mesh0),
'mesh7': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data',
True),
'iga' : ig_domain,
'cached1': Cached(1),
'cached2': | Cached(int_ar) | sfepy.base.ioutils.Cached |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report('read dimension %d -> failed' % dim)
ok = False
else:
self.report('read dimension %d -> ok' % dim)
return ok
def test_write_read_meshes(self):
"""
Try to write and then read all supported formats.
"""
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import (supported_formats,
supported_capabilities)
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir
+ '/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
oks = []
for suffix, format_ in six.iteritems(supported_formats):
if isinstance(format_, tuple) or (format_ == 'xyz'):
continue
if 'w' not in supported_capabilities[format_]: continue
filename = op.join(self.options.out_dir, 'test_mesh_wr' + suffix)
self.report('%s format: %s' % (suffix, filename))
mesh0.write(filename, io='auto')
mesh1 = Mesh.from_file(filename)
oks.extend(self._compare_meshes(mesh0, mesh1))
return sum(oks) == len(oks)
def test_hdf5_meshio(self):
try:
from igakit import igalib
except ImportError:
self.report('hdf5_meshio not-tested (missing igalib module)!')
return True
import tempfile
import numpy as nm
import scipy.sparse as sps
from sfepy.discrete.fem.meshio import HDF5MeshIO
from sfepy.base.base import Struct
from sfepy.base.ioutils import Cached, Uncached, SoftLink, \
DataSoftLink
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.solvers.ts import TimeStepper
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir +
'/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
shape = [4, 4, 4]
dims = [5, 5, 5]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
int_ar = nm.arange(4)
data = {
'list': range(4),
'mesh1': mesh0,
'mesh2': mesh0,
'mesh3': Uncached(mesh0),
'mesh4': SoftLink('/step0/__cdata/data/data/mesh2'),
'mesh5': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data'),
'mesh6': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh2/data',
mesh0),
'mesh7': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data',
True),
'iga' : ig_domain,
'cached1': Cached(1),
'cached2': Cached(int_ar),
'cached3': | Cached(int_ar) | sfepy.base.ioutils.Cached |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report('read dimension %d -> failed' % dim)
ok = False
else:
self.report('read dimension %d -> ok' % dim)
return ok
def test_write_read_meshes(self):
"""
Try to write and then read all supported formats.
"""
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import (supported_formats,
supported_capabilities)
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir
+ '/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
oks = []
for suffix, format_ in six.iteritems(supported_formats):
if isinstance(format_, tuple) or (format_ == 'xyz'):
continue
if 'w' not in supported_capabilities[format_]: continue
filename = op.join(self.options.out_dir, 'test_mesh_wr' + suffix)
self.report('%s format: %s' % (suffix, filename))
mesh0.write(filename, io='auto')
mesh1 = Mesh.from_file(filename)
oks.extend(self._compare_meshes(mesh0, mesh1))
return sum(oks) == len(oks)
def test_hdf5_meshio(self):
try:
from igakit import igalib
except ImportError:
self.report('hdf5_meshio not-tested (missing igalib module)!')
return True
import tempfile
import numpy as nm
import scipy.sparse as sps
from sfepy.discrete.fem.meshio import HDF5MeshIO
from sfepy.base.base import Struct
from sfepy.base.ioutils import Cached, Uncached, SoftLink, \
DataSoftLink
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.solvers.ts import TimeStepper
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir +
'/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
shape = [4, 4, 4]
dims = [5, 5, 5]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
int_ar = nm.arange(4)
data = {
'list': range(4),
'mesh1': mesh0,
'mesh2': mesh0,
'mesh3': Uncached(mesh0),
'mesh4': SoftLink('/step0/__cdata/data/data/mesh2'),
'mesh5': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data'),
'mesh6': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh2/data',
mesh0),
'mesh7': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data',
True),
'iga' : ig_domain,
'cached1': Cached(1),
'cached2': Cached(int_ar),
'cached3': Cached(int_ar),
'types': ( True, False, None ),
'tuple': ('first string', 'druhý UTF8 řetězec'),
'struct': Struct(
double=nm.arange(4, dtype=float),
int=nm.array([2,3,4,7]),
sparse=sps.csr_matrix(nm.array([1,0,0,5]).
reshape((2,2)))
)
}
with tempfile.NamedTemporaryFile(suffix='.h5', delete=False) as fil:
io = | HDF5MeshIO(fil.name) | sfepy.discrete.fem.meshio.HDF5MeshIO |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report('read dimension %d -> failed' % dim)
ok = False
else:
self.report('read dimension %d -> ok' % dim)
return ok
def test_write_read_meshes(self):
"""
Try to write and then read all supported formats.
"""
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import (supported_formats,
supported_capabilities)
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir
+ '/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
oks = []
for suffix, format_ in six.iteritems(supported_formats):
if isinstance(format_, tuple) or (format_ == 'xyz'):
continue
if 'w' not in supported_capabilities[format_]: continue
filename = op.join(self.options.out_dir, 'test_mesh_wr' + suffix)
self.report('%s format: %s' % (suffix, filename))
mesh0.write(filename, io='auto')
mesh1 = Mesh.from_file(filename)
oks.extend(self._compare_meshes(mesh0, mesh1))
return sum(oks) == len(oks)
def test_hdf5_meshio(self):
try:
from igakit import igalib
except ImportError:
self.report('hdf5_meshio not-tested (missing igalib module)!')
return True
import tempfile
import numpy as nm
import scipy.sparse as sps
from sfepy.discrete.fem.meshio import HDF5MeshIO
from sfepy.base.base import Struct
from sfepy.base.ioutils import Cached, Uncached, SoftLink, \
DataSoftLink
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.solvers.ts import TimeStepper
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir +
'/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
shape = [4, 4, 4]
dims = [5, 5, 5]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
int_ar = nm.arange(4)
data = {
'list': range(4),
'mesh1': mesh0,
'mesh2': mesh0,
'mesh3': Uncached(mesh0),
'mesh4': SoftLink('/step0/__cdata/data/data/mesh2'),
'mesh5': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data'),
'mesh6': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh2/data',
mesh0),
'mesh7': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data',
True),
'iga' : ig_domain,
'cached1': Cached(1),
'cached2': Cached(int_ar),
'cached3': Cached(int_ar),
'types': ( True, False, None ),
'tuple': ('first string', 'druhý UTF8 řetězec'),
'struct': Struct(
double=nm.arange(4, dtype=float),
int=nm.array([2,3,4,7]),
sparse=sps.csr_matrix(nm.array([1,0,0,5]).
reshape((2,2)))
)
}
with tempfile.NamedTemporaryFile(suffix='.h5', delete=False) as fil:
io = HDF5MeshIO(fil.name)
ts = | TimeStepper(0,1.,0.1, 10) | sfepy.solvers.ts.TimeStepper |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report('read dimension %d -> failed' % dim)
ok = False
else:
self.report('read dimension %d -> ok' % dim)
return ok
def test_write_read_meshes(self):
"""
Try to write and then read all supported formats.
"""
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import (supported_formats,
supported_capabilities)
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir
+ '/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
oks = []
for suffix, format_ in six.iteritems(supported_formats):
if isinstance(format_, tuple) or (format_ == 'xyz'):
continue
if 'w' not in supported_capabilities[format_]: continue
filename = op.join(self.options.out_dir, 'test_mesh_wr' + suffix)
self.report('%s format: %s' % (suffix, filename))
mesh0.write(filename, io='auto')
mesh1 = Mesh.from_file(filename)
oks.extend(self._compare_meshes(mesh0, mesh1))
return sum(oks) == len(oks)
def test_hdf5_meshio(self):
try:
from igakit import igalib
except ImportError:
self.report('hdf5_meshio not-tested (missing igalib module)!')
return True
import tempfile
import numpy as nm
import scipy.sparse as sps
from sfepy.discrete.fem.meshio import HDF5MeshIO
from sfepy.base.base import Struct
from sfepy.base.ioutils import Cached, Uncached, SoftLink, \
DataSoftLink
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.solvers.ts import TimeStepper
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir +
'/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
shape = [4, 4, 4]
dims = [5, 5, 5]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
int_ar = nm.arange(4)
data = {
'list': range(4),
'mesh1': mesh0,
'mesh2': mesh0,
'mesh3': Uncached(mesh0),
'mesh4': SoftLink('/step0/__cdata/data/data/mesh2'),
'mesh5': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data'),
'mesh6': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh2/data',
mesh0),
'mesh7': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data',
True),
'iga' : ig_domain,
'cached1': Cached(1),
'cached2': Cached(int_ar),
'cached3': Cached(int_ar),
'types': ( True, False, None ),
'tuple': ('first string', 'druhý UTF8 řetězec'),
'struct': Struct(
double=nm.arange(4, dtype=float),
int=nm.array([2,3,4,7]),
sparse=sps.csr_matrix(nm.array([1,0,0,5]).
reshape((2,2)))
)
}
with tempfile.NamedTemporaryFile(suffix='.h5', delete=False) as fil:
io = HDF5MeshIO(fil.name)
ts = TimeStepper(0,1.,0.1, 10)
io.write(fil.name, mesh0, {
'cdata' : Struct(
mode='custom',
data=data,
unpack_markers=False
)
}, ts=ts)
ts.advance()
mesh = io.read()
data['problem_mesh'] = | DataSoftLink('Mesh', '/mesh', mesh) | sfepy.base.ioutils.DataSoftLink |
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report('read dimension %d -> failed' % dim)
ok = False
else:
self.report('read dimension %d -> ok' % dim)
return ok
def test_write_read_meshes(self):
"""
Try to write and then read all supported formats.
"""
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import (supported_formats,
supported_capabilities)
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir
+ '/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
oks = []
for suffix, format_ in six.iteritems(supported_formats):
if isinstance(format_, tuple) or (format_ == 'xyz'):
continue
if 'w' not in supported_capabilities[format_]: continue
filename = op.join(self.options.out_dir, 'test_mesh_wr' + suffix)
self.report('%s format: %s' % (suffix, filename))
mesh0.write(filename, io='auto')
mesh1 = Mesh.from_file(filename)
oks.extend(self._compare_meshes(mesh0, mesh1))
return sum(oks) == len(oks)
def test_hdf5_meshio(self):
try:
from igakit import igalib
except ImportError:
self.report('hdf5_meshio not-tested (missing igalib module)!')
return True
import tempfile
import numpy as nm
import scipy.sparse as sps
from sfepy.discrete.fem.meshio import HDF5MeshIO
from sfepy.base.base import Struct
from sfepy.base.ioutils import Cached, Uncached, SoftLink, \
DataSoftLink
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.solvers.ts import TimeStepper
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir +
'/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
shape = [4, 4, 4]
dims = [5, 5, 5]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
int_ar = nm.arange(4)
data = {
'list': range(4),
'mesh1': mesh0,
'mesh2': mesh0,
'mesh3': Uncached(mesh0),
'mesh4': SoftLink('/step0/__cdata/data/data/mesh2'),
'mesh5': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data'),
'mesh6': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh2/data',
mesh0),
'mesh7': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data',
True),
'iga' : ig_domain,
'cached1': Cached(1),
'cached2': Cached(int_ar),
'cached3': Cached(int_ar),
'types': ( True, False, None ),
'tuple': ('first string', 'druhý UTF8 řetězec'),
'struct': Struct(
double=nm.arange(4, dtype=float),
int=nm.array([2,3,4,7]),
sparse=sps.csr_matrix(nm.array([1,0,0,5]).
reshape((2,2)))
)
}
with tempfile.NamedTemporaryFile(suffix='.h5', delete=False) as fil:
io = HDF5MeshIO(fil.name)
ts = TimeStepper(0,1.,0.1, 10)
io.write(fil.name, mesh0, {
'cdata' : Struct(
mode='custom',
data=data,
unpack_markers=False
)
}, ts=ts)
ts.advance()
mesh = io.read()
data['problem_mesh'] = DataSoftLink('Mesh', '/mesh', mesh)
io.write(fil.name, mesh0, {
'cdata' : Struct(
mode='custom',
data=data,
unpack_markers=True
)
}, ts=ts)
cache = {'/mesh': mesh }
fout = io.read_data(0, cache=cache)
fout2 = io.read_data(1, cache=cache )
out = fout['cdata']
out2 = fout2['cdata']
assert_(out['mesh7'] is out2['mesh7'],
'These two meshes should be in fact the same object')
assert_(out['mesh6'] is out2['mesh6'],
'These two meshes should be in fact the same object')
assert_(out['mesh5'] is not out2['mesh5'],
'These two meshes shouldn''t be in fact the same object')
assert_(out['mesh1'] is out['mesh2'],
'These two meshes should be in fact the same object')
assert_(out['mesh1'] is out['mesh2'],
'These two meshes should be in fact the same object')
assert_(out['mesh4'] is out['mesh2'],
'These two meshes should be in fact the same object')
assert_(out['mesh5'] is not out['mesh2'],
'These two meshes shouldn''t be in fact the same object')
assert_(out['mesh6'] is out['mesh2'],
'These two meshes should be in fact the same object')
assert_(out['mesh7'] is not out['mesh2'],
'These two meshes shouldn''t be in fact the same object')
assert_(out['mesh3'] is not out['mesh2'],
'These two meshes should be different objects')
assert_(out['cached2'] is out['cached3'],
'These two array should be the same object')
assert_(out2['problem_mesh'] is mesh,
'These two meshes should be the same objects')
assert_(self._compare_meshes(out['mesh1'], mesh0),
'Failed to restore mesh')
assert_(self._compare_meshes(out['mesh3'], mesh0),
'Failed to restore mesh')
assert_((out['struct'].sparse == data['struct'].sparse).todense()
.all(), 'Sparse matrix restore failed')
ts.advance()
io.write(fil.name, mesh0, {
'cdata' : Struct(
mode='custom',
data=[
DataSoftLink('Mesh',
'/step0/__cdata/data/data/mesh1/data',
mesh0),
mesh0
]
)
}, ts=ts)
out3 = io.read_data(2)['cdata']
| assert_(out3[0] is out3[1]) | sfepy.base.base.assert_ |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
| TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs) | sfepy.solvers.solvers.TimeSteppingSolver.__init__ |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
| TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs) | sfepy.solvers.solvers.TimeSteppingSolver.__init__ |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = | resolve(sdeps) | sfepy.base.resolve_deps.resolve |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = | invert_dict(vtos) | sfepy.base.base.invert_dict |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
| output('residual: %e' % err) | sfepy.base.base.output |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
output('----- new time step: %e -----' % ts.dt)
adt.wait = 0
return is_break
class SimpleTimeSteppingSolver(TimeSteppingSolver):
"""
Implicit time stepping solver with a fixed time step.
"""
name = 'ts.simple'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = | make_get_conf(conf, kwargs) | sfepy.solvers.solvers.make_get_conf |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
output('----- new time step: %e -----' % ts.dt)
adt.wait = 0
return is_break
class SimpleTimeSteppingSolver(TimeSteppingSolver):
"""
Implicit time stepping solver with a fixed time step.
"""
name = 'ts.simple'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = | TimeSteppingSolver.process_conf(conf) | sfepy.solvers.solvers.TimeSteppingSolver.process_conf |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
output('----- new time step: %e -----' % ts.dt)
adt.wait = 0
return is_break
class SimpleTimeSteppingSolver(TimeSteppingSolver):
"""
Implicit time stepping solver with a fixed time step.
"""
name = 'ts.simple'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = TimeSteppingSolver.process_conf(conf)
return Struct(t0=get('t0', 0.0),
t1=get('t1', 1.0),
dt=get('dt', None),
n_step=get('n_step', 10),
quasistatic=get('quasistatic', False)) + common
def __init__(self, conf, **kwargs):
| TimeSteppingSolver.__init__(self, conf, **kwargs) | sfepy.solvers.solvers.TimeSteppingSolver.__init__ |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
output('----- new time step: %e -----' % ts.dt)
adt.wait = 0
return is_break
class SimpleTimeSteppingSolver(TimeSteppingSolver):
"""
Implicit time stepping solver with a fixed time step.
"""
name = 'ts.simple'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = TimeSteppingSolver.process_conf(conf)
return Struct(t0=get('t0', 0.0),
t1=get('t1', 1.0),
dt=get('dt', None),
n_step=get('n_step', 10),
quasistatic=get('quasistatic', False)) + common
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, **kwargs)
self.ts = | TimeStepper.from_conf(self.conf) | sfepy.solvers.ts.TimeStepper.from_conf |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
output('----- new time step: %e -----' % ts.dt)
adt.wait = 0
return is_break
class SimpleTimeSteppingSolver(TimeSteppingSolver):
"""
Implicit time stepping solver with a fixed time step.
"""
name = 'ts.simple'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = TimeSteppingSolver.process_conf(conf)
return Struct(t0=get('t0', 0.0),
t1=get('t1', 1.0),
dt=get('dt', None),
n_step=get('n_step', 10),
quasistatic=get('quasistatic', False)) + common
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, **kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
format = '====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
"""
Solve the time-dependent problem.
"""
problem = self.problem
ts = self.ts
suffix, is_save = prepare_save_data(ts, problem.conf)
if state0 is None:
state0 = get_initial_state(problem)
ii = 0
for step, time in ts:
output(self.format % (time, step + 1, ts.n_step))
state = self.solve_step(ts, state0, nls_status=nls_status)
state0 = state.copy(deep=True)
if step_hook is not None:
step_hook(problem, ts, state)
if save_results and (is_save[ii] == ts.step):
filename = problem.get_output_name(suffix=suffix % ts.step)
problem.save_state(filename, state,
post_process_hook=post_process_hook,
file_per_var=None,
ts=ts)
ii += 1
problem.advance(ts)
return state
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
state = make_implicit_step(ts, state0, self.problem,
nls_status=nls_status)
return state
class ExplicitTimeSteppingSolver(SimpleTimeSteppingSolver):
"""
Explicit time stepping solver with a fixed time step.
"""
name = 'ts.explicit'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = | make_get_conf(conf, kwargs) | sfepy.solvers.solvers.make_get_conf |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
output('----- new time step: %e -----' % ts.dt)
adt.wait = 0
return is_break
class SimpleTimeSteppingSolver(TimeSteppingSolver):
"""
Implicit time stepping solver with a fixed time step.
"""
name = 'ts.simple'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = TimeSteppingSolver.process_conf(conf)
return Struct(t0=get('t0', 0.0),
t1=get('t1', 1.0),
dt=get('dt', None),
n_step=get('n_step', 10),
quasistatic=get('quasistatic', False)) + common
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, **kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
format = '====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
"""
Solve the time-dependent problem.
"""
problem = self.problem
ts = self.ts
suffix, is_save = prepare_save_data(ts, problem.conf)
if state0 is None:
state0 = get_initial_state(problem)
ii = 0
for step, time in ts:
output(self.format % (time, step + 1, ts.n_step))
state = self.solve_step(ts, state0, nls_status=nls_status)
state0 = state.copy(deep=True)
if step_hook is not None:
step_hook(problem, ts, state)
if save_results and (is_save[ii] == ts.step):
filename = problem.get_output_name(suffix=suffix % ts.step)
problem.save_state(filename, state,
post_process_hook=post_process_hook,
file_per_var=None,
ts=ts)
ii += 1
problem.advance(ts)
return state
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
state = make_implicit_step(ts, state0, self.problem,
nls_status=nls_status)
return state
class ExplicitTimeSteppingSolver(SimpleTimeSteppingSolver):
"""
Explicit time stepping solver with a fixed time step.
"""
name = 'ts.explicit'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = SimpleTimeSteppingSolver.process_conf(conf, kwargs)
return Struct(mass=get('mass', None,
'missing "mass" in options!'),
lumped=get('lumped', False)) + common
def __init__(self, conf, **kwargs):
SimpleTimeSteppingSolver.__init__(self, conf, **kwargs)
self.mass = | MassOperator(self.problem, self.conf) | sfepy.discrete.mass_operator.MassOperator |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
output('----- new time step: %e -----' % ts.dt)
adt.wait = 0
return is_break
class SimpleTimeSteppingSolver(TimeSteppingSolver):
"""
Implicit time stepping solver with a fixed time step.
"""
name = 'ts.simple'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = TimeSteppingSolver.process_conf(conf)
return Struct(t0=get('t0', 0.0),
t1=get('t1', 1.0),
dt=get('dt', None),
n_step=get('n_step', 10),
quasistatic=get('quasistatic', False)) + common
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, **kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
format = '====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
"""
Solve the time-dependent problem.
"""
problem = self.problem
ts = self.ts
suffix, is_save = prepare_save_data(ts, problem.conf)
if state0 is None:
state0 = get_initial_state(problem)
ii = 0
for step, time in ts:
output(self.format % (time, step + 1, ts.n_step))
state = self.solve_step(ts, state0, nls_status=nls_status)
state0 = state.copy(deep=True)
if step_hook is not None:
step_hook(problem, ts, state)
if save_results and (is_save[ii] == ts.step):
filename = problem.get_output_name(suffix=suffix % ts.step)
problem.save_state(filename, state,
post_process_hook=post_process_hook,
file_per_var=None,
ts=ts)
ii += 1
problem.advance(ts)
return state
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
state = make_implicit_step(ts, state0, self.problem,
nls_status=nls_status)
return state
class ExplicitTimeSteppingSolver(SimpleTimeSteppingSolver):
"""
Explicit time stepping solver with a fixed time step.
"""
name = 'ts.explicit'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = SimpleTimeSteppingSolver.process_conf(conf, kwargs)
return Struct(mass=get('mass', None,
'missing "mass" in options!'),
lumped=get('lumped', False)) + common
def __init__(self, conf, **kwargs):
SimpleTimeSteppingSolver.__init__(self, conf, **kwargs)
self.mass = MassOperator(self.problem, self.conf)
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
state = make_explicit_step(ts, state0, self.problem, self.mass,
nls_status=nls_status)
return state
class AdaptiveTimeSteppingSolver(SimpleTimeSteppingSolver):
"""
Implicit time stepping solver with an adaptive time step.
Either the built-in or user supplied function can be used to adapt the time
step.
"""
name = 'ts.adaptive'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = | make_get_conf(conf, kwargs) | sfepy.solvers.solvers.make_get_conf |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
output('----- new time step: %e -----' % ts.dt)
adt.wait = 0
return is_break
class SimpleTimeSteppingSolver(TimeSteppingSolver):
"""
Implicit time stepping solver with a fixed time step.
"""
name = 'ts.simple'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = TimeSteppingSolver.process_conf(conf)
return Struct(t0=get('t0', 0.0),
t1=get('t1', 1.0),
dt=get('dt', None),
n_step=get('n_step', 10),
quasistatic=get('quasistatic', False)) + common
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, **kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
format = '====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
"""
Solve the time-dependent problem.
"""
problem = self.problem
ts = self.ts
suffix, is_save = prepare_save_data(ts, problem.conf)
if state0 is None:
state0 = get_initial_state(problem)
ii = 0
for step, time in ts:
output(self.format % (time, step + 1, ts.n_step))
state = self.solve_step(ts, state0, nls_status=nls_status)
state0 = state.copy(deep=True)
if step_hook is not None:
step_hook(problem, ts, state)
if save_results and (is_save[ii] == ts.step):
filename = problem.get_output_name(suffix=suffix % ts.step)
problem.save_state(filename, state,
post_process_hook=post_process_hook,
file_per_var=None,
ts=ts)
ii += 1
problem.advance(ts)
return state
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
state = make_implicit_step(ts, state0, self.problem,
nls_status=nls_status)
return state
class ExplicitTimeSteppingSolver(SimpleTimeSteppingSolver):
"""
Explicit time stepping solver with a fixed time step.
"""
name = 'ts.explicit'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = SimpleTimeSteppingSolver.process_conf(conf, kwargs)
return Struct(mass=get('mass', None,
'missing "mass" in options!'),
lumped=get('lumped', False)) + common
def __init__(self, conf, **kwargs):
SimpleTimeSteppingSolver.__init__(self, conf, **kwargs)
self.mass = MassOperator(self.problem, self.conf)
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
state = make_explicit_step(ts, state0, self.problem, self.mass,
nls_status=nls_status)
return state
class AdaptiveTimeSteppingSolver(SimpleTimeSteppingSolver):
"""
Implicit time stepping solver with an adaptive time step.
Either the built-in or user supplied function can be used to adapt the time
step.
"""
name = 'ts.adaptive'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = SimpleTimeSteppingSolver.process_conf(conf, kwargs)
adt = Struct(red_factor=get('dt_red_factor', 0.2),
red_max=get('dt_red_max', 1e-3),
inc_factor=get('dt_inc_factor', 1.25),
inc_on_iter=get('dt_inc_on_iter', 4),
inc_wait=get('dt_inc_wait', 5),
red=1.0, wait=0, dt0=0.0)
return Struct(adapt_fun=get('adapt_fun', adapt_time_step),
adt=adt) + common
def __init__(self, conf, **kwargs):
| TimeSteppingSolver.__init__(self, conf, **kwargs) | sfepy.solvers.solvers.TimeSteppingSolver.__init__ |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
output('----- new time step: %e -----' % ts.dt)
adt.wait = 0
return is_break
class SimpleTimeSteppingSolver(TimeSteppingSolver):
"""
Implicit time stepping solver with a fixed time step.
"""
name = 'ts.simple'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = TimeSteppingSolver.process_conf(conf)
return Struct(t0=get('t0', 0.0),
t1=get('t1', 1.0),
dt=get('dt', None),
n_step=get('n_step', 10),
quasistatic=get('quasistatic', False)) + common
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, **kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
format = '====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
"""
Solve the time-dependent problem.
"""
problem = self.problem
ts = self.ts
suffix, is_save = prepare_save_data(ts, problem.conf)
if state0 is None:
state0 = get_initial_state(problem)
ii = 0
for step, time in ts:
output(self.format % (time, step + 1, ts.n_step))
state = self.solve_step(ts, state0, nls_status=nls_status)
state0 = state.copy(deep=True)
if step_hook is not None:
step_hook(problem, ts, state)
if save_results and (is_save[ii] == ts.step):
filename = problem.get_output_name(suffix=suffix % ts.step)
problem.save_state(filename, state,
post_process_hook=post_process_hook,
file_per_var=None,
ts=ts)
ii += 1
problem.advance(ts)
return state
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
state = make_implicit_step(ts, state0, self.problem,
nls_status=nls_status)
return state
class ExplicitTimeSteppingSolver(SimpleTimeSteppingSolver):
"""
Explicit time stepping solver with a fixed time step.
"""
name = 'ts.explicit'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = SimpleTimeSteppingSolver.process_conf(conf, kwargs)
return Struct(mass=get('mass', None,
'missing "mass" in options!'),
lumped=get('lumped', False)) + common
def __init__(self, conf, **kwargs):
SimpleTimeSteppingSolver.__init__(self, conf, **kwargs)
self.mass = MassOperator(self.problem, self.conf)
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
state = make_explicit_step(ts, state0, self.problem, self.mass,
nls_status=nls_status)
return state
class AdaptiveTimeSteppingSolver(SimpleTimeSteppingSolver):
"""
Implicit time stepping solver with an adaptive time step.
Either the built-in or user supplied function can be used to adapt the time
step.
"""
name = 'ts.adaptive'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = SimpleTimeSteppingSolver.process_conf(conf, kwargs)
adt = Struct(red_factor=get('dt_red_factor', 0.2),
red_max=get('dt_red_max', 1e-3),
inc_factor=get('dt_inc_factor', 1.25),
inc_on_iter=get('dt_inc_on_iter', 4),
inc_wait=get('dt_inc_wait', 5),
red=1.0, wait=0, dt0=0.0)
return Struct(adapt_fun=get('adapt_fun', adapt_time_step),
adt=adt) + common
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, **kwargs)
self.ts = | VariableTimeStepper.from_conf(self.conf) | sfepy.solvers.ts.VariableTimeStepper.from_conf |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
output('----- new time step: %e -----' % ts.dt)
adt.wait = 0
return is_break
class SimpleTimeSteppingSolver(TimeSteppingSolver):
"""
Implicit time stepping solver with a fixed time step.
"""
name = 'ts.simple'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = TimeSteppingSolver.process_conf(conf)
return Struct(t0=get('t0', 0.0),
t1=get('t1', 1.0),
dt=get('dt', None),
n_step=get('n_step', 10),
quasistatic=get('quasistatic', False)) + common
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, **kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
format = '====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
"""
Solve the time-dependent problem.
"""
problem = self.problem
ts = self.ts
suffix, is_save = prepare_save_data(ts, problem.conf)
if state0 is None:
state0 = get_initial_state(problem)
ii = 0
for step, time in ts:
output(self.format % (time, step + 1, ts.n_step))
state = self.solve_step(ts, state0, nls_status=nls_status)
state0 = state.copy(deep=True)
if step_hook is not None:
step_hook(problem, ts, state)
if save_results and (is_save[ii] == ts.step):
filename = problem.get_output_name(suffix=suffix % ts.step)
problem.save_state(filename, state,
post_process_hook=post_process_hook,
file_per_var=None,
ts=ts)
ii += 1
problem.advance(ts)
return state
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
state = make_implicit_step(ts, state0, self.problem,
nls_status=nls_status)
return state
class ExplicitTimeSteppingSolver(SimpleTimeSteppingSolver):
"""
Explicit time stepping solver with a fixed time step.
"""
name = 'ts.explicit'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = SimpleTimeSteppingSolver.process_conf(conf, kwargs)
return Struct(mass=get('mass', None,
'missing "mass" in options!'),
lumped=get('lumped', False)) + common
def __init__(self, conf, **kwargs):
SimpleTimeSteppingSolver.__init__(self, conf, **kwargs)
self.mass = MassOperator(self.problem, self.conf)
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
state = make_explicit_step(ts, state0, self.problem, self.mass,
nls_status=nls_status)
return state
class AdaptiveTimeSteppingSolver(SimpleTimeSteppingSolver):
"""
Implicit time stepping solver with an adaptive time step.
Either the built-in or user supplied function can be used to adapt the time
step.
"""
name = 'ts.adaptive'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = SimpleTimeSteppingSolver.process_conf(conf, kwargs)
adt = Struct(red_factor=get('dt_red_factor', 0.2),
red_max=get('dt_red_max', 1e-3),
inc_factor=get('dt_inc_factor', 1.25),
inc_on_iter=get('dt_inc_on_iter', 4),
inc_wait=get('dt_inc_wait', 5),
red=1.0, wait=0, dt0=0.0)
return Struct(adapt_fun=get('adapt_fun', adapt_time_step),
adt=adt) + common
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, **kwargs)
self.ts = VariableTimeStepper.from_conf(self.conf)
self.adt = adt = self.conf.adt
adt.dt0 = self.ts.get_default_time_step()
self.ts.set_n_digit_from_min_dt(get_min_dt(adt))
self.format = '====== time %e (dt %e, wait %d, step %d of %d) ====='
if isinstance(self.conf.adapt_fun, basestr):
self.adapt_time_step = self.problem.functions[self.conf.adapt_fun]
else:
self.adapt_time_step = self.conf.adapt_fun
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
"""
Solve the time-dependent problem.
"""
problem = self.problem
ts = self.ts
if state0 is None:
state0 = get_initial_state(problem)
ii = 0
for step, time in ts:
output(self.format % (time, ts.dt, self.adt.wait,
step + 1, ts.n_step))
state = self.solve_step(ts, state0, nls_status=nls_status)
state0 = state.copy(deep=True)
if step_hook is not None:
step_hook(problem, ts, state)
if save_results:
filename = problem.get_output_name(suffix=ts.suffix % ts.step)
problem.save_state(filename, state,
post_process_hook=post_process_hook,
file_per_var=None,
ts=ts)
ii += 1
problem.advance(ts)
return state
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
status = | IndexedStruct(n_iter=0, condition=0) | sfepy.base.base.IndexedStruct |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
| output('solving for %s...' % sorder[ib]) | sfepy.base.base.output |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = | get_subdict(parts0, block) | sfepy.base.base.get_subdict |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
| output('...done') | sfepy.base.base.output |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
| output('matrix evaluation failed, giving up...') | sfepy.base.base.output |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
| output('residual evaluation failed, giving up...') | sfepy.base.base.output |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
| output('----- new time step: %e -----' % ts.dt) | sfepy.base.base.output |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
output('----- new time step: %e -----' % ts.dt)
adt.wait = 0
return is_break
class SimpleTimeSteppingSolver(TimeSteppingSolver):
"""
Implicit time stepping solver with a fixed time step.
"""
name = 'ts.simple'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = TimeSteppingSolver.process_conf(conf)
return Struct(t0=get('t0', 0.0),
t1=get('t1', 1.0),
dt=get('dt', None),
n_step=get('n_step', 10),
quasistatic=get('quasistatic', False)) + common
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, **kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
format = '====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
"""
Solve the time-dependent problem.
"""
problem = self.problem
ts = self.ts
suffix, is_save = prepare_save_data(ts, problem.conf)
if state0 is None:
state0 = get_initial_state(problem)
ii = 0
for step, time in ts:
| output(self.format % (time, step + 1, ts.n_step)) | sfepy.base.base.output |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
| output('initial residual: %e' % err) | sfepy.base.base.output |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
| output('initial residual evaluation failed, giving up...') | sfepy.base.base.output |
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
| output('+++++ new time step: %e +++++' % ts.dt) | sfepy.base.base.output |
import numpy as nm
from sfepy.base.conf import transform_functions
from sfepy.base.testing import TestCommon
def get_vertices(coors, domain=None):
x, z = coors[:,0], coors[:,2]
return nm.where((z < 0.1) & (x < 0.1))[0]
def get_cells(coors, domain=None):
return nm.where(coors[:, 0] < 0)[0]
class Test(TestCommon):
@staticmethod
def from_conf( conf, options ):
from sfepy import data_dir
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete import Functions
mesh = Mesh('test mesh',
data_dir + '/meshes/various_formats/abaqus_tet.inp')
mesh.nodal_bcs['set0'] = [0, 7]
domain = | FEDomain('test domain', mesh) | sfepy.discrete.fem.FEDomain |
import numpy as nm
from sfepy.base.conf import transform_functions
from sfepy.base.testing import TestCommon
def get_vertices(coors, domain=None):
x, z = coors[:,0], coors[:,2]
return nm.where((z < 0.1) & (x < 0.1))[0]
def get_cells(coors, domain=None):
return nm.where(coors[:, 0] < 0)[0]
class Test(TestCommon):
@staticmethod
def from_conf( conf, options ):
from sfepy import data_dir
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete import Functions
mesh = Mesh('test mesh',
data_dir + '/meshes/various_formats/abaqus_tet.inp')
mesh.nodal_bcs['set0'] = [0, 7]
domain = FEDomain('test domain', mesh)
conf_functions = {
'get_vertices' : (get_vertices,),
'get_cells' : (get_cells,),
}
functions = Functions.from_conf( | transform_functions(conf_functions) | sfepy.base.conf.transform_functions |
# Vibroacoustics
#
# E.Rohan, V.Lukeš
# Homogenization of the vibro–acoustic transmission on periodically
# perforated elastic plates with arrays of resonators.
# https://arxiv.org/abs/2104.01367 (arXiv:2104.01367v1)
import os.path as op
import numpy as nm
from acoustics_macro_utils import get_homogmat
from sfepy.homogenization.utils import define_box_regions
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.periodic import match_y_plane, match_x_plane
wdir = op.dirname(__file__)
def get_regions(filename_mesh):
mesh = | Mesh.from_file(filename_mesh) | sfepy.discrete.fem.Mesh.from_file |
# Vibroacoustics
#
# E.Rohan, V.Lukeš
# Homogenization of the vibro–acoustic transmission on periodically
# perforated elastic plates with arrays of resonators.
# https://arxiv.org/abs/2104.01367 (arXiv:2104.01367v1)
import os.path as op
import numpy as nm
from acoustics_macro_utils import get_homogmat
from sfepy.homogenization.utils import define_box_regions
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.periodic import match_y_plane, match_x_plane
wdir = op.dirname(__file__)
def get_regions(filename_mesh):
mesh = Mesh.from_file(filename_mesh)
bbox = nm.array(mesh.get_bounding_box())
region_lb, region_rt = bbox
return | define_box_regions(2, region_lb, region_rt) | sfepy.homogenization.utils.define_box_regions |
from __future__ import absolute_import
import os
import sfepy
from sfepy.base.base import load_classes, insert_static_method
from .solvers import *
from .eigen import eig
solver_files = | sfepy.get_paths('sfepy/solvers/*.py') | sfepy.get_paths |
from __future__ import absolute_import
import os
import sfepy
from sfepy.base.base import load_classes, insert_static_method
from .solvers import *
from .eigen import eig
solver_files = sfepy.get_paths('sfepy/solvers/*.py')
remove = ['setup.py', 'solvers.py', 'petsc_worker.py']
solver_files = [name for name in solver_files
if os.path.basename(name) not in remove]
solver_table = load_classes(solver_files,
[LinearSolver, NonlinearSolver,
TimeSteppingSolver, EigenvalueSolver,
OptimizationSolver], package_name='sfepy.solvers')
def register_solver(cls):
"""
Register a custom solver.
"""
solver_table[cls.name] = cls
def any_from_conf(conf, **kwargs):
"""Create an instance of a solver class according to the configuration."""
return solver_table[conf.kind](conf, **kwargs)
| insert_static_method(Solver, any_from_conf) | sfepy.base.base.insert_static_method |
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = | FEDomain('domain', mesh) | sfepy.discrete.fem.FEDomain |
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
test = Test(conf=conf, options=options, omega=omega, field=field)
return test
def test_mass_matrix(self):
from sfepy.discrete.projections import create_mass_matrix
field = self.field
mtx = | create_mass_matrix(field) | sfepy.discrete.projections.create_mass_matrix |
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
test = Test(conf=conf, options=options, omega=omega, field=field)
return test
def test_mass_matrix(self):
from sfepy.discrete.projections import create_mass_matrix
field = self.field
mtx = create_mass_matrix(field)
| assert_(mtx.shape == (field.n_nod, field.n_nod)) | sfepy.base.base.assert_ |
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
test = Test(conf=conf, options=options, omega=omega, field=field)
return test
def test_mass_matrix(self):
from sfepy.discrete.projections import create_mass_matrix
field = self.field
mtx = create_mass_matrix(field)
assert_(mtx.shape == (field.n_nod, field.n_nod))
assert_(abs(mtx.sum() - 1.0) < 1e-14)
return True
def test_projection_tri_quad(self):
from sfepy.discrete.projections import make_l2_projection
source = | FieldVariable('us', 'unknown', self.field) | sfepy.discrete.FieldVariable |
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
test = Test(conf=conf, options=options, omega=omega, field=field)
return test
def test_mass_matrix(self):
from sfepy.discrete.projections import create_mass_matrix
field = self.field
mtx = create_mass_matrix(field)
assert_(mtx.shape == (field.n_nod, field.n_nod))
assert_(abs(mtx.sum() - 1.0) < 1e-14)
return True
def test_projection_tri_quad(self):
from sfepy.discrete.projections import make_l2_projection
source = FieldVariable('us', 'unknown', self.field)
coors = self.field.get_coor()
vals = nm.sin(2.0 * nm.pi * coors[:,0] * coors[:,1])
source.set_data(vals)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_source.vtk')
source.save_as_mesh(name)
mesh = Mesh.from_file('meshes/2d/square_quad.mesh',
prefix_dir=sfepy.data_dir)
domain = | FEDomain('domain', mesh) | sfepy.discrete.fem.FEDomain |
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
test = Test(conf=conf, options=options, omega=omega, field=field)
return test
def test_mass_matrix(self):
from sfepy.discrete.projections import create_mass_matrix
field = self.field
mtx = create_mass_matrix(field)
assert_(mtx.shape == (field.n_nod, field.n_nod))
assert_(abs(mtx.sum() - 1.0) < 1e-14)
return True
def test_projection_tri_quad(self):
from sfepy.discrete.projections import make_l2_projection
source = FieldVariable('us', 'unknown', self.field)
coors = self.field.get_coor()
vals = nm.sin(2.0 * nm.pi * coors[:,0] * coors[:,1])
source.set_data(vals)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_source.vtk')
source.save_as_mesh(name)
mesh = Mesh.from_file('meshes/2d/square_quad.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('bilinear', nm.float64, 'scalar', omega,
approx_order=1)
target = | FieldVariable('ut', 'unknown', field) | sfepy.discrete.FieldVariable |
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
test = Test(conf=conf, options=options, omega=omega, field=field)
return test
def test_mass_matrix(self):
from sfepy.discrete.projections import create_mass_matrix
field = self.field
mtx = create_mass_matrix(field)
assert_(mtx.shape == (field.n_nod, field.n_nod))
assert_(abs(mtx.sum() - 1.0) < 1e-14)
return True
def test_projection_tri_quad(self):
from sfepy.discrete.projections import make_l2_projection
source = FieldVariable('us', 'unknown', self.field)
coors = self.field.get_coor()
vals = nm.sin(2.0 * nm.pi * coors[:,0] * coors[:,1])
source.set_data(vals)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_source.vtk')
source.save_as_mesh(name)
mesh = Mesh.from_file('meshes/2d/square_quad.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('bilinear', nm.float64, 'scalar', omega,
approx_order=1)
target = FieldVariable('ut', 'unknown', field)
| make_l2_projection(target, source) | sfepy.discrete.projections.make_l2_projection |
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
test = Test(conf=conf, options=options, omega=omega, field=field)
return test
def test_mass_matrix(self):
from sfepy.discrete.projections import create_mass_matrix
field = self.field
mtx = create_mass_matrix(field)
assert_(mtx.shape == (field.n_nod, field.n_nod))
assert_(abs(mtx.sum() - 1.0) < 1e-14)
return True
def test_projection_tri_quad(self):
from sfepy.discrete.projections import make_l2_projection
source = FieldVariable('us', 'unknown', self.field)
coors = self.field.get_coor()
vals = nm.sin(2.0 * nm.pi * coors[:,0] * coors[:,1])
source.set_data(vals)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_source.vtk')
source.save_as_mesh(name)
mesh = Mesh.from_file('meshes/2d/square_quad.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('bilinear', nm.float64, 'scalar', omega,
approx_order=1)
target = FieldVariable('ut', 'unknown', field)
make_l2_projection(target, source)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_target.vtk')
target.save_as_mesh(name)
bbox = self.field.domain.get_mesh_bounding_box()
x = nm.linspace(bbox[0, 0] + 0.001, bbox[1, 0] - 0.001, 20)
y = nm.linspace(bbox[0, 1] + 0.001, bbox[1, 1] - 0.001, 20)
xx, yy = nm.meshgrid(x, y)
test_coors = nm.c_[xx.ravel(), yy.ravel()].copy()
vec1 = source.evaluate_at(test_coors)
vec2 = target.evaluate_at(test_coors)
ok = (nm.abs(vec1 - vec2) < 0.01).all()
return ok
def test_projection_iga_fem(self):
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import FEDomain, Field
from sfepy.discrete.iga.domain import IGDomain
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.discrete.projections import (make_l2_projection,
make_l2_projection_data)
shape = [10, 12, 12]
dims = [5, 6, 6]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = | IGDomain('iga', nurbs, bmesh, regions=regions) | sfepy.discrete.iga.domain.IGDomain |
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
test = Test(conf=conf, options=options, omega=omega, field=field)
return test
def test_mass_matrix(self):
from sfepy.discrete.projections import create_mass_matrix
field = self.field
mtx = create_mass_matrix(field)
assert_(mtx.shape == (field.n_nod, field.n_nod))
assert_(abs(mtx.sum() - 1.0) < 1e-14)
return True
def test_projection_tri_quad(self):
from sfepy.discrete.projections import make_l2_projection
source = FieldVariable('us', 'unknown', self.field)
coors = self.field.get_coor()
vals = nm.sin(2.0 * nm.pi * coors[:,0] * coors[:,1])
source.set_data(vals)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_source.vtk')
source.save_as_mesh(name)
mesh = Mesh.from_file('meshes/2d/square_quad.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('bilinear', nm.float64, 'scalar', omega,
approx_order=1)
target = FieldVariable('ut', 'unknown', field)
make_l2_projection(target, source)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_target.vtk')
target.save_as_mesh(name)
bbox = self.field.domain.get_mesh_bounding_box()
x = nm.linspace(bbox[0, 0] + 0.001, bbox[1, 0] - 0.001, 20)
y = nm.linspace(bbox[0, 1] + 0.001, bbox[1, 1] - 0.001, 20)
xx, yy = nm.meshgrid(x, y)
test_coors = nm.c_[xx.ravel(), yy.ravel()].copy()
vec1 = source.evaluate_at(test_coors)
vec2 = target.evaluate_at(test_coors)
ok = (nm.abs(vec1 - vec2) < 0.01).all()
return ok
def test_projection_iga_fem(self):
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import FEDomain, Field
from sfepy.discrete.iga.domain import IGDomain
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.discrete.projections import (make_l2_projection,
make_l2_projection_data)
shape = [10, 12, 12]
dims = [5, 6, 6]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
ig_omega = ig_domain.create_region('Omega', 'all')
ig_field = Field.from_args('iga', nm.float64, 1, ig_omega,
approx_order='iga', poly_space_base='iga')
ig_u = FieldVariable('ig_u', 'parameter', ig_field,
primary_var_name='(set-to-None)')
mesh = | gen_block_mesh(dims, shape, centre, name='fem') | sfepy.mesh.mesh_generators.gen_block_mesh |
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
test = Test(conf=conf, options=options, omega=omega, field=field)
return test
def test_mass_matrix(self):
from sfepy.discrete.projections import create_mass_matrix
field = self.field
mtx = create_mass_matrix(field)
assert_(mtx.shape == (field.n_nod, field.n_nod))
assert_(abs(mtx.sum() - 1.0) < 1e-14)
return True
def test_projection_tri_quad(self):
from sfepy.discrete.projections import make_l2_projection
source = FieldVariable('us', 'unknown', self.field)
coors = self.field.get_coor()
vals = nm.sin(2.0 * nm.pi * coors[:,0] * coors[:,1])
source.set_data(vals)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_source.vtk')
source.save_as_mesh(name)
mesh = Mesh.from_file('meshes/2d/square_quad.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('bilinear', nm.float64, 'scalar', omega,
approx_order=1)
target = FieldVariable('ut', 'unknown', field)
make_l2_projection(target, source)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_target.vtk')
target.save_as_mesh(name)
bbox = self.field.domain.get_mesh_bounding_box()
x = nm.linspace(bbox[0, 0] + 0.001, bbox[1, 0] - 0.001, 20)
y = nm.linspace(bbox[0, 1] + 0.001, bbox[1, 1] - 0.001, 20)
xx, yy = nm.meshgrid(x, y)
test_coors = nm.c_[xx.ravel(), yy.ravel()].copy()
vec1 = source.evaluate_at(test_coors)
vec2 = target.evaluate_at(test_coors)
ok = (nm.abs(vec1 - vec2) < 0.01).all()
return ok
def test_projection_iga_fem(self):
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import FEDomain, Field
from sfepy.discrete.iga.domain import IGDomain
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.discrete.projections import (make_l2_projection,
make_l2_projection_data)
shape = [10, 12, 12]
dims = [5, 6, 6]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
ig_omega = ig_domain.create_region('Omega', 'all')
ig_field = Field.from_args('iga', nm.float64, 1, ig_omega,
approx_order='iga', poly_space_base='iga')
ig_u = FieldVariable('ig_u', 'parameter', ig_field,
primary_var_name='(set-to-None)')
mesh = gen_block_mesh(dims, shape, centre, name='fem')
fe_domain = | FEDomain('fem', mesh) | sfepy.discrete.fem.FEDomain |
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
test = Test(conf=conf, options=options, omega=omega, field=field)
return test
def test_mass_matrix(self):
from sfepy.discrete.projections import create_mass_matrix
field = self.field
mtx = create_mass_matrix(field)
assert_(mtx.shape == (field.n_nod, field.n_nod))
assert_(abs(mtx.sum() - 1.0) < 1e-14)
return True
def test_projection_tri_quad(self):
from sfepy.discrete.projections import make_l2_projection
source = FieldVariable('us', 'unknown', self.field)
coors = self.field.get_coor()
vals = nm.sin(2.0 * nm.pi * coors[:,0] * coors[:,1])
source.set_data(vals)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_source.vtk')
source.save_as_mesh(name)
mesh = Mesh.from_file('meshes/2d/square_quad.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('bilinear', nm.float64, 'scalar', omega,
approx_order=1)
target = FieldVariable('ut', 'unknown', field)
make_l2_projection(target, source)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_target.vtk')
target.save_as_mesh(name)
bbox = self.field.domain.get_mesh_bounding_box()
x = nm.linspace(bbox[0, 0] + 0.001, bbox[1, 0] - 0.001, 20)
y = nm.linspace(bbox[0, 1] + 0.001, bbox[1, 1] - 0.001, 20)
xx, yy = nm.meshgrid(x, y)
test_coors = nm.c_[xx.ravel(), yy.ravel()].copy()
vec1 = source.evaluate_at(test_coors)
vec2 = target.evaluate_at(test_coors)
ok = (nm.abs(vec1 - vec2) < 0.01).all()
return ok
def test_projection_iga_fem(self):
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import FEDomain, Field
from sfepy.discrete.iga.domain import IGDomain
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.discrete.projections import (make_l2_projection,
make_l2_projection_data)
shape = [10, 12, 12]
dims = [5, 6, 6]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
ig_omega = ig_domain.create_region('Omega', 'all')
ig_field = Field.from_args('iga', nm.float64, 1, ig_omega,
approx_order='iga', poly_space_base='iga')
ig_u = FieldVariable('ig_u', 'parameter', ig_field,
primary_var_name='(set-to-None)')
mesh = gen_block_mesh(dims, shape, centre, name='fem')
fe_domain = FEDomain('fem', mesh)
fe_omega = fe_domain.create_region('Omega', 'all')
fe_field = Field.from_args('fem', nm.float64, 1, fe_omega,
approx_order=2)
fe_u = FieldVariable('fe_u', 'parameter', fe_field,
primary_var_name='(set-to-None)')
def _eval_data(ts, coors, mode, **kwargs):
return nm.prod(coors**2, axis=1)[:, None, None]
| make_l2_projection_data(ig_u, _eval_data) | sfepy.discrete.projections.make_l2_projection_data |
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
test = Test(conf=conf, options=options, omega=omega, field=field)
return test
def test_mass_matrix(self):
from sfepy.discrete.projections import create_mass_matrix
field = self.field
mtx = create_mass_matrix(field)
assert_(mtx.shape == (field.n_nod, field.n_nod))
assert_(abs(mtx.sum() - 1.0) < 1e-14)
return True
def test_projection_tri_quad(self):
from sfepy.discrete.projections import make_l2_projection
source = FieldVariable('us', 'unknown', self.field)
coors = self.field.get_coor()
vals = nm.sin(2.0 * nm.pi * coors[:,0] * coors[:,1])
source.set_data(vals)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_source.vtk')
source.save_as_mesh(name)
mesh = Mesh.from_file('meshes/2d/square_quad.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('bilinear', nm.float64, 'scalar', omega,
approx_order=1)
target = FieldVariable('ut', 'unknown', field)
make_l2_projection(target, source)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_target.vtk')
target.save_as_mesh(name)
bbox = self.field.domain.get_mesh_bounding_box()
x = nm.linspace(bbox[0, 0] + 0.001, bbox[1, 0] - 0.001, 20)
y = nm.linspace(bbox[0, 1] + 0.001, bbox[1, 1] - 0.001, 20)
xx, yy = nm.meshgrid(x, y)
test_coors = nm.c_[xx.ravel(), yy.ravel()].copy()
vec1 = source.evaluate_at(test_coors)
vec2 = target.evaluate_at(test_coors)
ok = (nm.abs(vec1 - vec2) < 0.01).all()
return ok
def test_projection_iga_fem(self):
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import FEDomain, Field
from sfepy.discrete.iga.domain import IGDomain
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.discrete.projections import (make_l2_projection,
make_l2_projection_data)
shape = [10, 12, 12]
dims = [5, 6, 6]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
ig_omega = ig_domain.create_region('Omega', 'all')
ig_field = Field.from_args('iga', nm.float64, 1, ig_omega,
approx_order='iga', poly_space_base='iga')
ig_u = FieldVariable('ig_u', 'parameter', ig_field,
primary_var_name='(set-to-None)')
mesh = gen_block_mesh(dims, shape, centre, name='fem')
fe_domain = FEDomain('fem', mesh)
fe_omega = fe_domain.create_region('Omega', 'all')
fe_field = Field.from_args('fem', nm.float64, 1, fe_omega,
approx_order=2)
fe_u = FieldVariable('fe_u', 'parameter', fe_field,
primary_var_name='(set-to-None)')
def _eval_data(ts, coors, mode, **kwargs):
return nm.prod(coors**2, axis=1)[:, None, None]
make_l2_projection_data(ig_u, _eval_data)
| make_l2_projection(fe_u, ig_u) | sfepy.discrete.projections.make_l2_projection |
# Vibroacoustics
#
# E.Rohan, V.Lukeš
# Homogenization of the vibro–acoustic transmission on periodically
# perforated elastic plates with arrays of resonators.
# https://arxiv.org/abs/2104.01367 (arXiv:2104.01367v1)
import os.path as op
import numpy as nm
from collections.abc import Iterable
from scipy.io import savemat, loadmat
from sfepy.base.base import output, debug, Struct
from sfepy import data_dir
from sfepy.discrete.fem.periodic import match_y_plane, match_x_plane
from acoustics_macro_utils import eval_phi, post_process,\
generate_plate_mesh, get_region_entities
from sfepy.discrete.projections import project_by_component
from sfepy.discrete.fem import Mesh, FEDomain
wdir = op.dirname(__file__)
def post_process_macro(out, pb, state, extend=False):
pbvars = pb.get_variables()
n1, ng1, c1, cg1, ds1, nmap1 = get_region_entities(pbvars['p1'])
noff = n1.shape[0]
n2, ng2, c2, cg2, _, nmap2 = get_region_entities(pbvars['p2'], noff=noff)
nend = nm.max(c2) + 1
nmap = nm.hstack([nmap1, nmap2])
n1[:, 2] += pb.conf.eps0 * 0.5
n2[:, 2] -= pb.conf.eps0 * 0.5
mesh2 = Mesh.from_data('m2', nm.vstack([n1, n2]), nm.hstack([ng1, ng2]),
[nm.vstack([c1, c2])], [nm.hstack([cg1, cg2])],
[ds1])
oname = op.join(pb.output_dir, pb.ofn_trunk + '_p.vtk')
out2 = {}
for ir in ['real.', 'imag.']:
pdata = nm.zeros((nmap.shape[0], 1), dtype=nm.float64)
for v, idxs in [('p1', slice(0, noff)), ('p2', slice(noff, nend))]:
pdata[idxs, :] = out[ir + v].data
out2[ir + 'p'] = | Struct(name='p', mode='vertex', data=pdata) | sfepy.base.base.Struct |
#!/usr/bin/env python
"""
First solve the stationary electric conduction problem. Then use its
results to solve the evolutionary heat conduction problem.
Run this example as on a command line::
$ python <path_to_this_file>/thermal_electric.py
"""
from __future__ import absolute_import
import sys
sys.path.append( '.' )
import os
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
# Time stepping for the heat conduction problem.
t0 = 0.0
t1 = 0.5
n_step = 11
# Material parameters.
specific_heat = 1.2
##########
cwd = os.path.split(os.path.join(os.getcwd(), __file__))[0]
options = {
'absolute_mesh_path' : True,
'output_dir' : os.path.join(cwd, 'output')
}
regions = {
'Omega' : 'all',
'Omega1' : 'cells of group 1',
'Omega2' : 'cells of group 2',
'Omega2_Surface': ('r.Omega1 *v r.Omega2', 'facet'),
'Left' : ('vertices in (x < %f)' % -0.4999, 'facet'),
'Right' : ('vertices in (x > %f)' % 0.4999, 'facet'),
}
materials = {
'm' : ({
'thermal_conductivity' : 2.0,
'electric_conductivity' : 1.5,
},),
}
# The fields use the same approximation, so a single field could be used
# instead.
fields = {
'temperature': ('real', 1, 'Omega', 1),
'potential' : ('real', 1, 'Omega', 1),
}
variables = {
'T' : ('unknown field', 'temperature', 0, 1),
's' : ('test field', 'temperature', 'T'),
'phi' : ('unknown field', 'potential', 1),
'psi' : ('test field', 'potential', 'phi'),
'phi_known' : ('parameter field', 'potential', '(set-to-None)'),
}
ics = {
'ic' : ('Omega', {'T.0' : 0.0}),
}
ebcs = {
'left' : ('Left', {'T.0' : 0.0, 'phi.0' : 0.0}),
'right' : ('Right', {'T.0' : 2.0, 'phi.0' : 0.0}),
'inside' : ('Omega2_Surface', {'phi.0' : 'set_electric_bc'}),
}
def set_electric_bc(coor):
y = coor[:,1]
ymin, ymax = y.min(), y.max()
val = 2.0 * (((y - ymin) / (ymax - ymin)) - 0.5)
return val
functions = {
'set_electric_bc' : (lambda ts, coor, bc, problem, **kwargs:
set_electric_bc(coor),),
}
equations = {
'2' : """%.12e * dw_volume_dot.2.Omega( s, dT/dt )
+ dw_laplace.2.Omega( m.thermal_conductivity, s, T )
= dw_electric_source.2.Omega( m.electric_conductivity,
s, phi_known ) """ % specific_heat,
'1' : """dw_laplace.2.Omega( m.electric_conductivity, psi, phi ) = 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
'problem' : 'nonlinear',
}),
'ts' : ('ts.simple', {
't0' : t0,
't1' : t1,
'dt' : None,
'n_step' : n_step, # has precedence over dt!
}),
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
output.prefix = 'therel:'
required, other = | get_standard_keywords() | sfepy.base.conf.get_standard_keywords |
#!/usr/bin/env python
"""
First solve the stationary electric conduction problem. Then use its
results to solve the evolutionary heat conduction problem.
Run this example as on a command line::
$ python <path_to_this_file>/thermal_electric.py
"""
from __future__ import absolute_import
import sys
sys.path.append( '.' )
import os
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
# Time stepping for the heat conduction problem.
t0 = 0.0
t1 = 0.5
n_step = 11
# Material parameters.
specific_heat = 1.2
##########
cwd = os.path.split(os.path.join(os.getcwd(), __file__))[0]
options = {
'absolute_mesh_path' : True,
'output_dir' : os.path.join(cwd, 'output')
}
regions = {
'Omega' : 'all',
'Omega1' : 'cells of group 1',
'Omega2' : 'cells of group 2',
'Omega2_Surface': ('r.Omega1 *v r.Omega2', 'facet'),
'Left' : ('vertices in (x < %f)' % -0.4999, 'facet'),
'Right' : ('vertices in (x > %f)' % 0.4999, 'facet'),
}
materials = {
'm' : ({
'thermal_conductivity' : 2.0,
'electric_conductivity' : 1.5,
},),
}
# The fields use the same approximation, so a single field could be used
# instead.
fields = {
'temperature': ('real', 1, 'Omega', 1),
'potential' : ('real', 1, 'Omega', 1),
}
variables = {
'T' : ('unknown field', 'temperature', 0, 1),
's' : ('test field', 'temperature', 'T'),
'phi' : ('unknown field', 'potential', 1),
'psi' : ('test field', 'potential', 'phi'),
'phi_known' : ('parameter field', 'potential', '(set-to-None)'),
}
ics = {
'ic' : ('Omega', {'T.0' : 0.0}),
}
ebcs = {
'left' : ('Left', {'T.0' : 0.0, 'phi.0' : 0.0}),
'right' : ('Right', {'T.0' : 2.0, 'phi.0' : 0.0}),
'inside' : ('Omega2_Surface', {'phi.0' : 'set_electric_bc'}),
}
def set_electric_bc(coor):
y = coor[:,1]
ymin, ymax = y.min(), y.max()
val = 2.0 * (((y - ymin) / (ymax - ymin)) - 0.5)
return val
functions = {
'set_electric_bc' : (lambda ts, coor, bc, problem, **kwargs:
set_electric_bc(coor),),
}
equations = {
'2' : """%.12e * dw_volume_dot.2.Omega( s, dT/dt )
+ dw_laplace.2.Omega( m.thermal_conductivity, s, T )
= dw_electric_source.2.Omega( m.electric_conductivity,
s, phi_known ) """ % specific_heat,
'1' : """dw_laplace.2.Omega( m.electric_conductivity, psi, phi ) = 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
'problem' : 'nonlinear',
}),
'ts' : ('ts.simple', {
't0' : t0,
't1' : t1,
'dt' : None,
'n_step' : n_step, # has precedence over dt!
}),
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
output.prefix = 'therel:'
required, other = get_standard_keywords()
conf = | ProblemConf.from_file(__file__, required, other) | sfepy.base.conf.ProblemConf.from_file |
#!/usr/bin/env python
"""
First solve the stationary electric conduction problem. Then use its
results to solve the evolutionary heat conduction problem.
Run this example as on a command line::
$ python <path_to_this_file>/thermal_electric.py
"""
from __future__ import absolute_import
import sys
sys.path.append( '.' )
import os
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
# Time stepping for the heat conduction problem.
t0 = 0.0
t1 = 0.5
n_step = 11
# Material parameters.
specific_heat = 1.2
##########
cwd = os.path.split(os.path.join(os.getcwd(), __file__))[0]
options = {
'absolute_mesh_path' : True,
'output_dir' : os.path.join(cwd, 'output')
}
regions = {
'Omega' : 'all',
'Omega1' : 'cells of group 1',
'Omega2' : 'cells of group 2',
'Omega2_Surface': ('r.Omega1 *v r.Omega2', 'facet'),
'Left' : ('vertices in (x < %f)' % -0.4999, 'facet'),
'Right' : ('vertices in (x > %f)' % 0.4999, 'facet'),
}
materials = {
'm' : ({
'thermal_conductivity' : 2.0,
'electric_conductivity' : 1.5,
},),
}
# The fields use the same approximation, so a single field could be used
# instead.
fields = {
'temperature': ('real', 1, 'Omega', 1),
'potential' : ('real', 1, 'Omega', 1),
}
variables = {
'T' : ('unknown field', 'temperature', 0, 1),
's' : ('test field', 'temperature', 'T'),
'phi' : ('unknown field', 'potential', 1),
'psi' : ('test field', 'potential', 'phi'),
'phi_known' : ('parameter field', 'potential', '(set-to-None)'),
}
ics = {
'ic' : ('Omega', {'T.0' : 0.0}),
}
ebcs = {
'left' : ('Left', {'T.0' : 0.0, 'phi.0' : 0.0}),
'right' : ('Right', {'T.0' : 2.0, 'phi.0' : 0.0}),
'inside' : ('Omega2_Surface', {'phi.0' : 'set_electric_bc'}),
}
def set_electric_bc(coor):
y = coor[:,1]
ymin, ymax = y.min(), y.max()
val = 2.0 * (((y - ymin) / (ymax - ymin)) - 0.5)
return val
functions = {
'set_electric_bc' : (lambda ts, coor, bc, problem, **kwargs:
set_electric_bc(coor),),
}
equations = {
'2' : """%.12e * dw_volume_dot.2.Omega( s, dT/dt )
+ dw_laplace.2.Omega( m.thermal_conductivity, s, T )
= dw_electric_source.2.Omega( m.electric_conductivity,
s, phi_known ) """ % specific_heat,
'1' : """dw_laplace.2.Omega( m.electric_conductivity, psi, phi ) = 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
'problem' : 'nonlinear',
}),
'ts' : ('ts.simple', {
't0' : t0,
't1' : t1,
'dt' : None,
'n_step' : n_step, # has precedence over dt!
}),
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
output.prefix = 'therel:'
required, other = get_standard_keywords()
conf = ProblemConf.from_file(__file__, required, other)
problem = | Problem.from_conf(conf, init_equations=False) | sfepy.discrete.Problem.from_conf |
#!/usr/bin/env python
"""
Plot quadrature points for the given geometry and integration order.
"""
from __future__ import absolute_import, print_function
import sys
sys.path.append('.')
from argparse import ArgumentParser
import sfepy.postprocess.plot_quadrature as pq
helps = {
'geometry' :
'reference element geometry, one of "2_3", "2_4", "3_4", "3_8"'
' [default: %(default)s]',
'order' :
'quadrature order [default: %(default)s]',
'boundary' :
'plot boundary quadrature points',
'min_radius' :
'min. radius of points corresponding to the min. weight'
' [default: %(default)s]',
'max_radius' :
'max. radius of points corresponding to the max. weight'
' [default: %(default)s]',
'show_colorbar' :
'show colorbar for quadrature weights',
'show_labels' :
'label quadrature points',
'print_qp' :
'print quadrature points and weights',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-g', '--geometry', metavar='name',
action='store', dest='geometry',
default='2_4', help=helps['geometry'])
parser.add_argument('-n', '--order', metavar='order', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-b', '--boundary',
action='store_true', dest='boundary',
default=False, help=helps['boundary'])
parser.add_argument('-r', '--min-radius', metavar='float', type=float,
action='store', dest='min_radius',
default=10, help=helps['min_radius'])
parser.add_argument('-R', '--max-radius', metavar='float', type=float,
action='store', dest='max_radius',
default=50, help=helps['max_radius'])
parser.add_argument('-c', '--show-colorbar',
action='store_true', dest='show_colorbar',
default=False, help=helps['show_colorbar'])
parser.add_argument('-l', '---show-labels',
action='store_true', dest='show_labels',
default=False, help=helps['show_labels'])
parser.add_argument('-p', '--print-qp',
action='store_true', dest='print_qp',
default=False, help=helps['print_qp'])
options = parser.parse_args()
aux = pq.plot_quadrature(None, options.geometry, options.order,
boundary=options.boundary,
min_radius=options.min_radius,
max_radius=options.max_radius,
show_colorbar=options.show_colorbar,
show_labels=options.show_labels)
if options.print_qp:
ax, coors, weights = aux
for ic, coor in enumerate(coors):
print(ic, coor, weights[ic])
| pq.plt.show() | sfepy.postprocess.plot_quadrature.plt.show |
#!/usr/bin/env python
"""
Plot quadrature points for the given geometry and integration order.
"""
from optparse import OptionParser
import sfepy.postprocess.plot_quadrature as pq
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'geometry' :
'reference element geometry, one of "2_3", "2_4", "3_4", "3_8"'
' [default: %default]',
'order' :
'quadrature order [default: %default]',
'min_radius' :
'min. radius of points corresponding to the min. weight'
' [default: %default]',
'max_radius' :
'max. radius of points corresponding to the max. weight'
' [default: %default]',
'show_colorbar' :
'show colorbar for quadrature weights'
}
def main():
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-g', '--geometry', metavar='name',
action='store', dest='geometry',
default='2_4', help=helps['geometry'])
parser.add_option('-n', '--order', metavar='order', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_option('-r', '--min-radius', metavar='float', type=float,
action='store', dest='min_radius',
default=10, help=helps['min_radius'])
parser.add_option('-R', '--max-radius', metavar='float', type=float,
action='store', dest='max_radius',
default=50, help=helps['max_radius'])
parser.add_option('-c', '--show-colorbar',
action='store_true', dest='show_colorbar',
default=False, help=helps['show_colorbar'])
options, args = parser.parse_args()
if len(args) != 0:
parser.print_help(),
return
pq.plot_quadrature(None, options.geometry, options.order,
options.min_radius, options.max_radius,
options.show_colorbar)
| pq.plt.show() | sfepy.postprocess.plot_quadrature.plt.show |
"""
The Dirichlet, periodic and linear combination boundary condition
classes, as well as the initial condition class.
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import basestr, Container, Struct
from sfepy.discrete.functions import Function
import six
def get_condition_value(val, functions, kind, name):
"""
Check a boundary/initial condition value type and return the value or
corresponding function.
"""
if type(val) == str:
if functions is not None:
try:
fun = functions[val]
except IndexError:
raise ValueError('unknown function %s given for %s %s!'
% (val, kind, name))
else:
raise ValueError('no functions given for %s %s!' % (kind, name))
elif (isinstance(val, Function) or nm.isscalar(val)
or isinstance(val, nm.ndarray)):
fun = val
else:
raise ValueError('unknown value type for %s %s!'
% (kind, name))
return fun
def _get_region(name, regions, bc_name):
try:
region = regions[name]
except IndexError:
msg = "no region '%s' used in condition %s!" % (name, bc_name)
raise IndexError(msg)
return region
class Conditions(Container):
"""
Container for various conditions.
"""
@staticmethod
def from_conf(conf, regions):
conds = []
for key, cc in six.iteritems(conf):
times = cc.get('times', None)
if 'ebc' in key:
region = _get_region(cc.region, regions, cc.name)
cond = EssentialBC(cc.name, region, cc.dofs, key=key,
times=times)
elif 'epbc' in key:
rs = [_get_region(ii, regions, cc.name) for ii in cc.region]
cond = PeriodicBC(cc.name, rs, cc.dofs, cc.match, key=key,
times=times)
elif 'lcbc' in key:
if isinstance(cc.region, basestr):
rs = [_get_region(cc.region, regions, cc.name), None]
else:
rs = [_get_region(ii, regions, cc.name)
for ii in cc.region]
cond = LinearCombinationBC(cc.name, rs, cc.dofs,
cc.dof_map_fun, cc.kind,
key=key,
times=times,
arguments=cc.get('arguments', None))
elif 'ic' in key:
region = _get_region(cc.region, regions, cc.name)
cond = InitialCondition(cc.name, region, cc.dofs, key=key)
else:
raise ValueError('unknown condition type! (%s)' % key)
conds.append(cond)
obj = Conditions(conds)
return obj
def group_by_variables(self, groups=None):
"""
Group boundary conditions of each variable. Each condition is a
group is a single condition.
Parameters
----------
groups : dict, optional
If present, update the `groups` dictionary.
Returns
-------
out : dict
The dictionary with variable names as keys and lists of
single condition instances as values.
"""
if groups is None:
out = {}
else:
out = groups
for cond in self:
for single_cond in cond.iter_single():
vname = single_cond.dofs[0].split('.')[0]
out.setdefault(vname, Conditions()).append(single_cond)
return out
def canonize_dof_names(self, dofs):
"""
Canonize the DOF names using the full list of DOFs of a
variable.
"""
for cond in self:
cond.canonize_dof_names(dofs)
def sort(self):
"""
Sort boundary conditions by their key.
"""
self._objs.sort(key=lambda a: a.key)
self.update()
def zero_dofs(self):
"""
Set all boundary condition values to zero, if applicable.
"""
for cond in self:
if isinstance(cond, EssentialBC):
cond.zero_dofs()
def _canonize(dofs, all_dofs):
"""
Helper function.
"""
vname, dd = dofs.split('.')
if dd == 'all':
cdofs = all_dofs
elif dd[0] == '[':
cdofs = [vname + '.' + ii.strip()
for ii in dd[1:-1].split(',')]
else:
cdofs = [dofs]
return cdofs
class Condition(Struct):
"""
Common boundary condition methods.
"""
def __init__(self, name, **kwargs):
| Struct.__init__(self, name=name, **kwargs) | sfepy.base.base.Struct.__init__ |
import numpy as nm
from sfepy.linalg import dot_sequences
from sfepy.terms.terms import Term, terms
from sfepy.terms.terms_th import THTerm, ETHTerm
from sfepy.terms.terms_elastic import CauchyStressTerm
class BiotTerm(Term):
r"""
Biot coupling term with :math:`\alpha_{ij}` given in:
* vector form exploiting symmetry - in 3D it has the
indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in 2D it has
the indices ordered as :math:`[11, 22, 12]`,
* matrix form - non-symmetric coupling parameter.
Corresponds to weak forms of Biot gradient and divergence terms.
Can be evaluated. Can use derivatives.
:Definition:
.. math::
\int_{\Omega} p\ \alpha_{ij} e_{ij}(\ul{v}) \mbox{ , } \int_{\Omega}
q\ \alpha_{ij} e_{ij}(\ul{u})
:Arguments 1:
- material : :math:`\alpha_{ij}`
- virtual : :math:`\ul{v}`
- state : :math:`p`
:Arguments 2:
- material : :math:`\alpha_{ij}`
- state : :math:`\ul{u}`
- virtual : :math:`q`
:Arguments 3:
- material : :math:`\alpha_{ij}`
- parameter_v : :math:`\ul{u}`
- parameter_s : :math:`p`
"""
name = 'dw_biot'
arg_types = (('material', 'virtual', 'state'),
('material', 'state', 'virtual'),
('material', 'parameter_v', 'parameter_s'))
arg_shapes = [{'material' : 'S, 1',
'virtual/grad' : ('D', None), 'state/grad' : 1,
'virtual/div' : (1, None), 'state/div' : 'D',
'parameter_v' : 'D', 'parameter_s' : 1},
{'material' : 'D, D'}]
modes = ('grad', 'div', 'eval')
def get_fargs(self, mat, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
sym_mode = False if mat.shape[-2] == mat.shape[-1] > 1 else True
if not sym_mode:
sh = mat.shape
# the gradient given by 'self.get' is transposed
mat = nm.swapaxes(mat, 2, 3)
mat = mat.reshape(sh[:2] + (sh[2]**2, 1))
if self.mode == 'grad':
qp_var, qp_name = svar, 'val'
else:
if sym_mode:
qp_var, qp_name = vvar, 'cauchy_strain'
else:
qp_var, qp_name = vvar, 'grad'
if mode == 'weak':
vvg, _ = self.get_mapping(vvar)
svg, _ = self.get_mapping(svar)
if diff_var is None:
val_qp = self.get(qp_var, qp_name)
if qp_name == 'grad':
sh = val_qp.shape
val_qp = val_qp.reshape(sh[:2] + (sh[2]**2, 1))
fmode = 0
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return 1.0, val_qp, mat, svg, vvg, fmode
elif mode == 'eval':
vvg, _ = self.get_mapping(vvar)
if sym_mode:
strain = self.get(vvar, 'cauchy_strain')
else:
strain = self.get(vvar, 'grad')
sh = strain.shape
strain = strain.reshape(sh[:2] + (sh[2]**2, 1))
pval = self.get(svar, 'val')
return 1.0, pval, strain, mat, vvg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
return (n_el, 1, 1, 1), vvar.dtype
def set_arg_types(self):
self.function = {
'grad' : terms.dw_biot_grad,
'div' : terms.dw_biot_div,
'eval' : terms.d_biot_div,
}[self.mode]
class BiotStressTerm(CauchyStressTerm):
r"""
Evaluate Biot stress tensor.
It is given in the usual vector form exploiting symmetry: in 3D it has 6
components with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in
2D it has 3 components with the indices ordered as :math:`[11, 22, 12]`.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
- \int_{\Omega} \alpha_{ij} \bar{p}
.. math::
\mbox{vector for } K \from \Ical_h:
- \int_{T_K} \alpha_{ij} \bar{p} / \int_{T_K} 1
.. math::
- \alpha_{ij} \bar{p}|_{qp}
:Arguments:
- material : :math:`\alpha_{ij}`
- parameter : :math:`\bar{p}`
"""
name = 'ev_biot_stress'
arg_types = ('material', 'parameter')
arg_shapes = {'material' : 'S, 1', 'parameter' : 1}
@staticmethod
def function(out, val_qp, mat, vg, fmode):
if fmode == 2:
out[:] = | dot_sequences(mat, val_qp) | sfepy.linalg.dot_sequences |
import numpy as nm
from sfepy.linalg import dot_sequences
from sfepy.terms.terms import Term, terms
from sfepy.terms.terms_th import THTerm, ETHTerm
from sfepy.terms.terms_elastic import CauchyStressTerm
class BiotTerm(Term):
r"""
Biot coupling term with :math:`\alpha_{ij}` given in:
* vector form exploiting symmetry - in 3D it has the
indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in 2D it has
the indices ordered as :math:`[11, 22, 12]`,
* matrix form - non-symmetric coupling parameter.
Corresponds to weak forms of Biot gradient and divergence terms.
Can be evaluated. Can use derivatives.
:Definition:
.. math::
\int_{\Omega} p\ \alpha_{ij} e_{ij}(\ul{v}) \mbox{ , } \int_{\Omega}
q\ \alpha_{ij} e_{ij}(\ul{u})
:Arguments 1:
- material : :math:`\alpha_{ij}`
- virtual : :math:`\ul{v}`
- state : :math:`p`
:Arguments 2:
- material : :math:`\alpha_{ij}`
- state : :math:`\ul{u}`
- virtual : :math:`q`
:Arguments 3:
- material : :math:`\alpha_{ij}`
- parameter_v : :math:`\ul{u}`
- parameter_s : :math:`p`
"""
name = 'dw_biot'
arg_types = (('material', 'virtual', 'state'),
('material', 'state', 'virtual'),
('material', 'parameter_v', 'parameter_s'))
arg_shapes = [{'material' : 'S, 1',
'virtual/grad' : ('D', None), 'state/grad' : 1,
'virtual/div' : (1, None), 'state/div' : 'D',
'parameter_v' : 'D', 'parameter_s' : 1},
{'material' : 'D, D'}]
modes = ('grad', 'div', 'eval')
def get_fargs(self, mat, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
sym_mode = False if mat.shape[-2] == mat.shape[-1] > 1 else True
if not sym_mode:
sh = mat.shape
# the gradient given by 'self.get' is transposed
mat = nm.swapaxes(mat, 2, 3)
mat = mat.reshape(sh[:2] + (sh[2]**2, 1))
if self.mode == 'grad':
qp_var, qp_name = svar, 'val'
else:
if sym_mode:
qp_var, qp_name = vvar, 'cauchy_strain'
else:
qp_var, qp_name = vvar, 'grad'
if mode == 'weak':
vvg, _ = self.get_mapping(vvar)
svg, _ = self.get_mapping(svar)
if diff_var is None:
val_qp = self.get(qp_var, qp_name)
if qp_name == 'grad':
sh = val_qp.shape
val_qp = val_qp.reshape(sh[:2] + (sh[2]**2, 1))
fmode = 0
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return 1.0, val_qp, mat, svg, vvg, fmode
elif mode == 'eval':
vvg, _ = self.get_mapping(vvar)
if sym_mode:
strain = self.get(vvar, 'cauchy_strain')
else:
strain = self.get(vvar, 'grad')
sh = strain.shape
strain = strain.reshape(sh[:2] + (sh[2]**2, 1))
pval = self.get(svar, 'val')
return 1.0, pval, strain, mat, vvg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
return (n_el, 1, 1, 1), vvar.dtype
def set_arg_types(self):
self.function = {
'grad' : terms.dw_biot_grad,
'div' : terms.dw_biot_div,
'eval' : terms.d_biot_div,
}[self.mode]
class BiotStressTerm(CauchyStressTerm):
r"""
Evaluate Biot stress tensor.
It is given in the usual vector form exploiting symmetry: in 3D it has 6
components with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in
2D it has 3 components with the indices ordered as :math:`[11, 22, 12]`.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
- \int_{\Omega} \alpha_{ij} \bar{p}
.. math::
\mbox{vector for } K \from \Ical_h:
- \int_{T_K} \alpha_{ij} \bar{p} / \int_{T_K} 1
.. math::
- \alpha_{ij} \bar{p}|_{qp}
:Arguments:
- material : :math:`\alpha_{ij}`
- parameter : :math:`\bar{p}`
"""
name = 'ev_biot_stress'
arg_types = ('material', 'parameter')
arg_shapes = {'material' : 'S, 1', 'parameter' : 1}
@staticmethod
def function(out, val_qp, mat, vg, fmode):
if fmode == 2:
out[:] = dot_sequences(mat, val_qp)
status = 0
else:
status = | terms.de_cauchy_stress(out, val_qp, mat, vg, fmode) | sfepy.terms.terms.terms.de_cauchy_stress |
r"""
Diametrically point loaded 2-D disk. See :ref:`sec-primer`.
Find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
= 0
\;, \quad \forall \ul{v} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
from __future__ import absolute_import
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.discrete.fem.utils import refine_mesh
from sfepy import data_dir
# Fix the mesh file name if you run this file outside the SfePy directory.
filename_mesh = data_dir + '/meshes/2d/its2D.mesh'
refinement_level = 0
filename_mesh = | refine_mesh(filename_mesh, refinement_level) | sfepy.discrete.fem.utils.refine_mesh |
r"""
Diametrically point loaded 2-D disk. See :ref:`sec-primer`.
Find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
= 0
\;, \quad \forall \ul{v} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
from __future__ import absolute_import
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.discrete.fem.utils import refine_mesh
from sfepy import data_dir
# Fix the mesh file name if you run this file outside the SfePy directory.
filename_mesh = data_dir + '/meshes/2d/its2D.mesh'
refinement_level = 0
filename_mesh = refine_mesh(filename_mesh, refinement_level)
output_dir = '.' # set this to a valid directory you have write access to
young = 2000.0 # Young's modulus [MPa]
poisson = 0.4 # Poisson's ratio
options = {
'output_dir' : output_dir,
}
regions = {
'Omega' : 'all',
'Left' : ('vertices in (x < 0.001)', 'facet'),
'Bottom' : ('vertices in (y < 0.001)', 'facet'),
'Top' : ('vertex 2', 'vertex'),
}
materials = {
'Asphalt' : ({'D': | stiffness_from_youngpoisson(2, young, poisson) | sfepy.mechanics.matcoefs.stiffness_from_youngpoisson |
"""
Friction-slip model formulated as the implicit complementarity problem.
To integrate over a (dual) mesh, one needs:
* coordinates of element vertices
* element connectivity
* local base for each element
* constant in each sub-triangle of the dual mesh
Data for each dual element:
* connectivity of its sub-triangles
* base directions t_1, t_2
Normal stresses:
* Assemble the rezidual and apply the LCBC operator described below.
Solution in \hat{V}_h^c:
* construct a restriction operator via LCBC just like in the no-penetration case
* use the substitution:
u_1 = n_1 * w
u_2 = n_2 * w
u_3 = n_3 * w
The new DOF is `w`.
* for the record, no-penetration does:
w_1 = - (1 / n_1) * (u_2 * n_2 + u_3 * n_3)
w_2 = u_2
w_3 = u_3
"""
from sfepy.base.base import *
from sfepy.base.compat import unique
import sfepy.linalg as la
from sfepy.fem import Mesh, Domain, Field, Variables
from sfepy.fem.mappings import VolumeMapping, SurfaceMapping
from sfepy.fem.fe_surface import FESurface
from sfepy.fem.utils import compute_nodal_normals
def edge_data_to_output(coors, conn, e_sort, data):
out = nm.zeros_like(coors)
out[conn[e_sort,0]] = data
return Struct(name='output_data',
mode='vertex', data=out,
dofs=None)
class DualMesh(Struct):
"""Dual mesh corresponding to a (surface) region."""
def __init__(self, region):
"""
Assume a single GeometryElement type in all groups, linear
approximation.
Works for one group only for the moment.
"""
domain = region.domain
self.dim = domain.shape.dim
self.region = copy(region)
self.region.setup_face_indices()
self.mesh_coors = domain.mesh.coors
# add_to_regions=True due to Field implementation shortcomings.
omega = domain.create_region('Omega', 'all', add_to_regions=True)
self.field = | Field('displacements', nm.float64, (3,), omega, 1) | sfepy.fem.Field |
"""
Friction-slip model formulated as the implicit complementarity problem.
To integrate over a (dual) mesh, one needs:
* coordinates of element vertices
* element connectivity
* local base for each element
* constant in each sub-triangle of the dual mesh
Data for each dual element:
* connectivity of its sub-triangles
* base directions t_1, t_2
Normal stresses:
* Assemble the rezidual and apply the LCBC operator described below.
Solution in \hat{V}_h^c:
* construct a restriction operator via LCBC just like in the no-penetration case
* use the substitution:
u_1 = n_1 * w
u_2 = n_2 * w
u_3 = n_3 * w
The new DOF is `w`.
* for the record, no-penetration does:
w_1 = - (1 / n_1) * (u_2 * n_2 + u_3 * n_3)
w_2 = u_2
w_3 = u_3
"""
from sfepy.base.base import *
from sfepy.base.compat import unique
import sfepy.linalg as la
from sfepy.fem import Mesh, Domain, Field, Variables
from sfepy.fem.mappings import VolumeMapping, SurfaceMapping
from sfepy.fem.fe_surface import FESurface
from sfepy.fem.utils import compute_nodal_normals
def edge_data_to_output(coors, conn, e_sort, data):
out = nm.zeros_like(coors)
out[conn[e_sort,0]] = data
return Struct(name='output_data',
mode='vertex', data=out,
dofs=None)
class DualMesh(Struct):
"""Dual mesh corresponding to a (surface) region."""
def __init__(self, region):
"""
Assume a single GeometryElement type in all groups, linear
approximation.
Works for one group only for the moment.
"""
domain = region.domain
self.dim = domain.shape.dim
self.region = copy(region)
self.region.setup_face_indices()
self.mesh_coors = domain.mesh.coors
# add_to_regions=True due to Field implementation shortcomings.
omega = domain.create_region('Omega', 'all', add_to_regions=True)
self.field = Field('displacements', nm.float64, (3,), omega, 1)
self.gel = domain.geom_els.values()[0]
self.sgel = self.gel.surface_facet
face_key = 's%d' % self.sgel.n_vertex
# Coordinate interpolation to face centres.
self.ps = self.gel.interp.poly_spaces[face_key]
centre = self.ps.node_coors.sum(axis=0) / self.ps.n_nod
self.bf = self.ps.eval_base(centre[None,:])
self.surfaces = surfaces = {}
self.dual_surfaces = dual_surfaces = {}
for ig, conn in enumerate(domain.mesh.conns):
surface = FESurface(None, self.region, self.gel.faces, conn, ig)
surfaces[ig] = surface
dual_surface = self.describe_dual_surface(surface)
dual_surfaces[ig] = dual_surface
def describe_dual_surface(self, surface):
n_fa, n_edge = surface.n_fa, self.sgel.n_edge
mesh_coors = self.mesh_coors
# Face centres.
fcoors = mesh_coors[surface.econn]
centre_coors = nm.dot(self.bf.squeeze(), fcoors)
surface_coors = mesh_coors[surface.nodes]
dual_coors = nm.r_[surface_coors, centre_coors]
coor_offset = surface.nodes.shape[0]
# Normals in primary mesh nodes.
nodal_normals = compute_nodal_normals(surface.nodes, self.region,
self.field)
ee = surface.leconn[:,self.sgel.edges].copy()
edges_per_face = ee.copy()
sh = edges_per_face.shape
ee.shape = edges_per_face.shape = (sh[0] * sh[1], sh[2])
edges_per_face.sort(axis=1)
eo = nm.empty((sh[0] * sh[1],), dtype=nm.object)
eo[:] = [tuple(ii) for ii in edges_per_face]
ueo, e_sort, e_id = | unique(eo, return_index=True, return_inverse=True) | sfepy.base.compat.unique |