diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c3a7ccc4b33f27dbae7958641a89106cf9580326 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__init__.py @@ -0,0 +1,27 @@ +""" +============================================================== +Finite Difference Differentiation (:mod:`scipy.differentiate`) +============================================================== + +.. currentmodule:: scipy.differentiate + +SciPy ``differentiate`` provides functions for performing finite difference +numerical differentiation of black-box functions. + +.. autosummary:: + :toctree: generated/ + + derivative + jacobian + hessian + +""" + + +from ._differentiate import * + +__all__ = ['derivative', 'jacobian', 'hessian'] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..881c26bbcce394b260543f151e39d9952f4dc5ad Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__pycache__/_differentiate.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__pycache__/_differentiate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..702e141bf4ba2ebc7a340828c906523bfb995f90 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__pycache__/_differentiate.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/_differentiate.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/_differentiate.py new file mode 100644 index 0000000000000000000000000000000000000000..0e104a071055161b69f62cec317e8a07b4466653 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/_differentiate.py @@ -0,0 +1,1129 @@ +# mypy: disable-error-code="attr-defined" +import warnings +import numpy as np +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._util import _RichResult +from scipy._lib._array_api import array_namespace, xp_sign, xp_copy, xp_take_along_axis + +_EERRORINCREASE = -1 # used in derivative + +def _derivative_iv(f, x, args, tolerances, maxiter, order, initial_step, + step_factor, step_direction, preserve_shape, callback): + # Input validation for `derivative` + xp = array_namespace(x) + + if not callable(f): + raise ValueError('`f` must be callable.') + + if not np.iterable(args): + args = (args,) + + tolerances = {} if tolerances is None else tolerances + atol = tolerances.get('atol', None) + rtol = tolerances.get('rtol', None) + + # tolerances are floats, not arrays; OK to use NumPy + message = 'Tolerances and step parameters must be non-negative scalars.' + tols = np.asarray([atol if atol is not None else 1, + rtol if rtol is not None else 1, + step_factor]) + if (not np.issubdtype(tols.dtype, np.number) or np.any(tols < 0) + or np.any(np.isnan(tols)) or tols.shape != (3,)): + raise ValueError(message) + step_factor = float(tols[2]) + + maxiter_int = int(maxiter) + if maxiter != maxiter_int or maxiter <= 0: + raise ValueError('`maxiter` must be a positive integer.') + + order_int = int(order) + if order_int != order or order <= 0: + raise ValueError('`order` must be a positive integer.') + + step_direction = xp.asarray(step_direction) + initial_step = xp.asarray(initial_step) + temp = xp.broadcast_arrays(x, step_direction, initial_step) + x, step_direction, initial_step = temp + + message = '`preserve_shape` must be True or False.' + if preserve_shape not in {True, False}: + raise ValueError(message) + + if callback is not None and not callable(callback): + raise ValueError('`callback` must be callable.') + + return (f, x, args, atol, rtol, maxiter_int, order_int, initial_step, + step_factor, step_direction, preserve_shape, callback) + + +def derivative(f, x, *, args=(), tolerances=None, maxiter=10, + order=8, initial_step=0.5, step_factor=2.0, + step_direction=0, preserve_shape=False, callback=None): + """Evaluate the derivative of a elementwise, real scalar function numerically. + + For each element of the output of `f`, `derivative` approximates the first + derivative of `f` at the corresponding element of `x` using finite difference + differentiation. + + This function works elementwise when `x`, `step_direction`, and `args` contain + (broadcastable) arrays. + + Parameters + ---------- + f : callable + The function whose derivative is desired. The signature must be:: + + f(xi: ndarray, *argsi) -> ndarray + + where each element of ``xi`` is a finite real number and ``argsi`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable with + ``xi``. `f` must be an elementwise function: each scalar element ``f(xi)[j]`` + must equal ``f(xi[j])`` for valid indices ``j``. It must not mutate the array + ``xi`` or the arrays in ``argsi``. + x : float array_like + Abscissae at which to evaluate the derivative. Must be broadcastable with + `args` and `step_direction`. + args : tuple of array_like, optional + Additional positional array arguments to be passed to `f`. Arrays + must be broadcastable with one another and the arrays of `init`. + If the callable for which the root is desired requires arguments that are + not broadcastable with `x`, wrap that callable with `f` such that `f` + accepts only `x` and broadcastable ``*args``. + tolerances : dictionary of floats, optional + Absolute and relative tolerances. Valid keys of the dictionary are: + + - ``atol`` - absolute tolerance on the derivative + - ``rtol`` - relative tolerance on the derivative + + Iteration will stop when ``res.error < atol + rtol * abs(res.df)``. The default + `atol` is the smallest normal number of the appropriate dtype, and + the default `rtol` is the square root of the precision of the + appropriate dtype. + order : int, default: 8 + The (positive integer) order of the finite difference formula to be + used. Odd integers will be rounded up to the next even integer. + initial_step : float array_like, default: 0.5 + The (absolute) initial step size for the finite difference derivative + approximation. + step_factor : float, default: 2.0 + The factor by which the step size is *reduced* in each iteration; i.e. + the step size in iteration 1 is ``initial_step/step_factor``. If + ``step_factor < 1``, subsequent steps will be greater than the initial + step; this may be useful if steps smaller than some threshold are + undesirable (e.g. due to subtractive cancellation error). + maxiter : int, default: 10 + The maximum number of iterations of the algorithm to perform. See + Notes. + step_direction : integer array_like + An array representing the direction of the finite difference steps (for + use when `x` lies near to the boundary of the domain of the function.) + Must be broadcastable with `x` and all `args`. + Where 0 (default), central differences are used; where negative (e.g. + -1), steps are non-positive; and where positive (e.g. 1), all steps are + non-negative. + preserve_shape : bool, default: False + In the following, "arguments of `f`" refers to the array ``xi`` and + any arrays within ``argsi``. Let ``shape`` be the broadcasted shape + of `x` and all elements of `args` (which is conceptually + distinct from ``xi` and ``argsi`` passed into `f`). + + - When ``preserve_shape=False`` (default), `f` must accept arguments + of *any* broadcastable shapes. + + - When ``preserve_shape=True``, `f` must accept arguments of shape + ``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of + abscissae at which the function is being evaluated. + + In either case, for each scalar element ``xi[j]`` within ``xi``, the array + returned by `f` must include the scalar ``f(xi[j])`` at the same index. + Consequently, the shape of the output is always the shape of the input + ``xi``. + + See Examples. + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `derivative` (but containing the current + iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `derivative` will return a result. `callback` must not mutate + `res` or its attributes. + + Returns + ------- + res : _RichResult + An object similar to an instance of `scipy.optimize.OptimizeResult` with the + following attributes. The descriptions are written as though the values will + be scalars; however, if `f` returns an array, the outputs will be + arrays of the same shape. + + success : bool array + ``True`` where the algorithm terminated successfully (status ``0``); + ``False`` otherwise. + status : int array + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm converged to the specified tolerances. + - ``-1`` : The error estimate increased, so iteration was terminated. + - ``-2`` : The maximum number of iterations was reached. + - ``-3`` : A non-finite value was encountered. + - ``-4`` : Iteration was terminated by `callback`. + - ``1`` : The algorithm is proceeding normally (in `callback` only). + + df : float array + The derivative of `f` at `x`, if the algorithm terminated + successfully. + error : float array + An estimate of the error: the magnitude of the difference between + the current estimate of the derivative and the estimate in the + previous iteration. + nit : int array + The number of iterations of the algorithm that were performed. + nfev : int array + The number of points at which `f` was evaluated. + x : float array + The value at which the derivative of `f` was evaluated + (after broadcasting with `args` and `step_direction`). + + See Also + -------- + jacobian, hessian + + Notes + ----- + The implementation was inspired by jacobi [1]_, numdifftools [2]_, and + DERIVEST [3]_, but the implementation follows the theory of Taylor series + more straightforwardly (and arguably naively so). + In the first iteration, the derivative is estimated using a finite + difference formula of order `order` with maximum step size `initial_step`. + Each subsequent iteration, the maximum step size is reduced by + `step_factor`, and the derivative is estimated again until a termination + condition is reached. The error estimate is the magnitude of the difference + between the current derivative approximation and that of the previous + iteration. + + The stencils of the finite difference formulae are designed such that + abscissae are "nested": after `f` is evaluated at ``order + 1`` + points in the first iteration, `f` is evaluated at only two new points + in each subsequent iteration; ``order - 1`` previously evaluated function + values required by the finite difference formula are reused, and two + function values (evaluations at the points furthest from `x`) are unused. + + Step sizes are absolute. When the step size is small relative to the + magnitude of `x`, precision is lost; for example, if `x` is ``1e20``, the + default initial step size of ``0.5`` cannot be resolved. Accordingly, + consider using larger initial step sizes for large magnitudes of `x`. + + The default tolerances are challenging to satisfy at points where the + true derivative is exactly zero. If the derivative may be exactly zero, + consider specifying an absolute tolerance (e.g. ``atol=1e-12``) to + improve convergence. + + References + ---------- + .. [1] Hans Dembinski (@HDembinski). jacobi. + https://github.com/HDembinski/jacobi + .. [2] Per A. Brodtkorb and John D'Errico. numdifftools. + https://numdifftools.readthedocs.io/en/latest/ + .. [3] John D'Errico. DERIVEST: Adaptive Robust Numerical Differentiation. + https://www.mathworks.com/matlabcentral/fileexchange/13490-adaptive-robust-numerical-differentiation + .. [4] Numerical Differentition. Wikipedia. + https://en.wikipedia.org/wiki/Numerical_differentiation + + Examples + -------- + Evaluate the derivative of ``np.exp`` at several points ``x``. + + >>> import numpy as np + >>> from scipy.differentiate import derivative + >>> f = np.exp + >>> df = np.exp # true derivative + >>> x = np.linspace(1, 2, 5) + >>> res = derivative(f, x) + >>> res.df # approximation of the derivative + array([2.71828183, 3.49034296, 4.48168907, 5.75460268, 7.3890561 ]) + >>> res.error # estimate of the error + array([7.13740178e-12, 9.16600129e-12, 1.17594823e-11, 1.51061386e-11, + 1.94262384e-11]) + >>> abs(res.df - df(x)) # true error + array([2.53130850e-14, 3.55271368e-14, 5.77315973e-14, 5.59552404e-14, + 6.92779167e-14]) + + Show the convergence of the approximation as the step size is reduced. + Each iteration, the step size is reduced by `step_factor`, so for + sufficiently small initial step, each iteration reduces the error by a + factor of ``1/step_factor**order`` until finite precision arithmetic + inhibits further improvement. + + >>> import matplotlib.pyplot as plt + >>> iter = list(range(1, 12)) # maximum iterations + >>> hfac = 2 # step size reduction per iteration + >>> hdir = [-1, 0, 1] # compare left-, central-, and right- steps + >>> order = 4 # order of differentiation formula + >>> x = 1 + >>> ref = df(x) + >>> errors = [] # true error + >>> for i in iter: + ... res = derivative(f, x, maxiter=i, step_factor=hfac, + ... step_direction=hdir, order=order, + ... # prevent early termination + ... tolerances=dict(atol=0, rtol=0)) + ... errors.append(abs(res.df - ref)) + >>> errors = np.array(errors) + >>> plt.semilogy(iter, errors[:, 0], label='left differences') + >>> plt.semilogy(iter, errors[:, 1], label='central differences') + >>> plt.semilogy(iter, errors[:, 2], label='right differences') + >>> plt.xlabel('iteration') + >>> plt.ylabel('error') + >>> plt.legend() + >>> plt.show() + >>> (errors[1, 1] / errors[0, 1], 1 / hfac**order) + (0.06215223140159822, 0.0625) + + The implementation is vectorized over `x`, `step_direction`, and `args`. + The function is evaluated once before the first iteration to perform input + validation and standardization, and once per iteration thereafter. + + >>> def f(x, p): + ... f.nit += 1 + ... return x**p + >>> f.nit = 0 + >>> def df(x, p): + ... return p*x**(p-1) + >>> x = np.arange(1, 5) + >>> p = np.arange(1, 6).reshape((-1, 1)) + >>> hdir = np.arange(-1, 2).reshape((-1, 1, 1)) + >>> res = derivative(f, x, args=(p,), step_direction=hdir, maxiter=1) + >>> np.allclose(res.df, df(x, p)) + True + >>> res.df.shape + (3, 5, 4) + >>> f.nit + 2 + + By default, `preserve_shape` is False, and therefore the callable + `f` may be called with arrays of any broadcastable shapes. + For example: + + >>> shapes = [] + >>> def f(x, c): + ... shape = np.broadcast_shapes(x.shape, c.shape) + ... shapes.append(shape) + ... return np.sin(c*x) + >>> + >>> c = [1, 5, 10, 20] + >>> res = derivative(f, 0, args=(c,)) + >>> shapes + [(4,), (4, 8), (4, 2), (3, 2), (2, 2), (1, 2)] + + To understand where these shapes are coming from - and to better + understand how `derivative` computes accurate results - note that + higher values of ``c`` correspond with higher frequency sinusoids. + The higher frequency sinusoids make the function's derivative change + faster, so more function evaluations are required to achieve the target + accuracy: + + >>> res.nfev + array([11, 13, 15, 17], dtype=int32) + + The initial ``shape``, ``(4,)``, corresponds with evaluating the + function at a single abscissa and all four frequencies; this is used + for input validation and to determine the size and dtype of the arrays + that store results. The next shape corresponds with evaluating the + function at an initial grid of abscissae and all four frequencies. + Successive calls to the function evaluate the function at two more + abscissae, increasing the effective order of the approximation by two. + However, in later function evaluations, the function is evaluated at + fewer frequencies because the corresponding derivative has already + converged to the required tolerance. This saves function evaluations to + improve performance, but it requires the function to accept arguments of + any shape. + + "Vector-valued" functions are unlikely to satisfy this requirement. + For example, consider + + >>> def f(x): + ... return [x, np.sin(3*x), x+np.sin(10*x), np.sin(20*x)*(x-1)**2] + + This integrand is not compatible with `derivative` as written; for instance, + the shape of the output will not be the same as the shape of ``x``. Such a + function *could* be converted to a compatible form with the introduction of + additional parameters, but this would be inconvenient. In such cases, + a simpler solution would be to use `preserve_shape`. + + >>> shapes = [] + >>> def f(x): + ... shapes.append(x.shape) + ... x0, x1, x2, x3 = x + ... return [x0, np.sin(3*x1), x2+np.sin(10*x2), np.sin(20*x3)*(x3-1)**2] + >>> + >>> x = np.zeros(4) + >>> res = derivative(f, x, preserve_shape=True) + >>> shapes + [(4,), (4, 8), (4, 2), (4, 2), (4, 2), (4, 2)] + + Here, the shape of ``x`` is ``(4,)``. With ``preserve_shape=True``, the + function may be called with argument ``x`` of shape ``(4,)`` or ``(4, n)``, + and this is what we observe. + + """ + # TODO (followup): + # - investigate behavior at saddle points + # - multivariate functions? + # - relative steps? + # - show example of `np.vectorize` + + res = _derivative_iv(f, x, args, tolerances, maxiter, order, initial_step, + step_factor, step_direction, preserve_shape, callback) + (func, x, args, atol, rtol, maxiter, order, + h0, fac, hdir, preserve_shape, callback) = res + + # Initialization + # Since f(x) (no step) is not needed for central differences, it may be + # possible to eliminate this function evaluation. However, it's useful for + # input validation and standardization, and everything else is designed to + # reduce function calls, so let's keep it simple. + temp = eim._initialize(func, (x,), args, preserve_shape=preserve_shape) + func, xs, fs, args, shape, dtype, xp = temp + + finfo = xp.finfo(dtype) + atol = finfo.smallest_normal if atol is None else atol + rtol = finfo.eps**0.5 if rtol is None else rtol # keep same as `hessian` + + x, f = xs[0], fs[0] + df = xp.full_like(f, xp.nan) + + # Ideally we'd broadcast the shape of `hdir` in `_elementwise_algo_init`, but + # it's simpler to do it here than to generalize `_elementwise_algo_init` further. + # `hdir` and `x` are already broadcasted in `_derivative_iv`, so we know + # that `hdir` can be broadcasted to the final shape. Same with `h0`. + hdir = xp.broadcast_to(hdir, shape) + hdir = xp.reshape(hdir, (-1,)) + hdir = xp.astype(xp_sign(hdir), dtype) + h0 = xp.broadcast_to(h0, shape) + h0 = xp.reshape(h0, (-1,)) + h0 = xp.astype(h0, dtype) + h0[h0 <= 0] = xp.asarray(xp.nan, dtype=dtype) + + status = xp.full_like(x, eim._EINPROGRESS, dtype=xp.int32) # in progress + nit, nfev = 0, 1 # one function evaluations performed above + # Boolean indices of left, central, right, and (all) one-sided steps + il = hdir < 0 + ic = hdir == 0 + ir = hdir > 0 + io = il | ir + + # Most of these attributes are reasonably obvious, but: + # - `fs` holds all the function values of all active `x`. The zeroth + # axis corresponds with active points `x`, the first axis corresponds + # with the different steps (in the order described in + # `_derivative_weights`). + # - `terms` (which could probably use a better name) is half the `order`, + # which is always even. + work = _RichResult(x=x, df=df, fs=f[:, xp.newaxis], error=xp.nan, h=h0, + df_last=xp.nan, error_last=xp.nan, fac=fac, + atol=atol, rtol=rtol, nit=nit, nfev=nfev, + status=status, dtype=dtype, terms=(order+1)//2, + hdir=hdir, il=il, ic=ic, ir=ir, io=io, + # Store the weights in an object so they can't get compressed + # Using RichResult to allow dot notation, but a dict would work + diff_state=_RichResult(central=[], right=[], fac=None)) + + # This is the correspondence between terms in the `work` object and the + # final result. In this case, the mapping is trivial. Note that `success` + # is prepended automatically. + res_work_pairs = [('status', 'status'), ('df', 'df'), ('error', 'error'), + ('nit', 'nit'), ('nfev', 'nfev'), ('x', 'x')] + + def pre_func_eval(work): + """Determine the abscissae at which the function needs to be evaluated. + + See `_derivative_weights` for a description of the stencil (pattern + of the abscissae). + + In the first iteration, there is only one stored function value in + `work.fs`, `f(x)`, so we need to evaluate at `order` new points. In + subsequent iterations, we evaluate at two new points. Note that + `work.x` is always flattened into a 1D array after broadcasting with + all `args`, so we add a new axis at the end and evaluate all point + in one call to the function. + + For improvement: + - Consider measuring the step size actually taken, since ``(x + h) - x`` + is not identically equal to `h` with floating point arithmetic. + - Adjust the step size automatically if `x` is too big to resolve the + step. + - We could probably save some work if there are no central difference + steps or no one-sided steps. + """ + n = work.terms # half the order + h = work.h[:, xp.newaxis] # step size + c = work.fac # step reduction factor + d = c**0.5 # square root of step reduction factor (one-sided stencil) + # Note - no need to be careful about dtypes until we allocate `x_eval` + + if work.nit == 0: + hc = h / c**xp.arange(n, dtype=work.dtype) + hc = xp.concat((-xp.flip(hc, axis=-1), hc), axis=-1) + else: + hc = xp.concat((-h, h), axis=-1) / c**(n-1) + + if work.nit == 0: + hr = h / d**xp.arange(2*n, dtype=work.dtype) + else: + hr = xp.concat((h, h/d), axis=-1) / c**(n-1) + + n_new = 2*n if work.nit == 0 else 2 # number of new abscissae + x_eval = xp.zeros((work.hdir.shape[0], n_new), dtype=work.dtype) + il, ic, ir = work.il, work.ic, work.ir + x_eval[ir] = work.x[ir][:, xp.newaxis] + hr[ir] + x_eval[ic] = work.x[ic][:, xp.newaxis] + hc[ic] + x_eval[il] = work.x[il][:, xp.newaxis] - hr[il] + return x_eval + + def post_func_eval(x, f, work): + """ Estimate the derivative and error from the function evaluations + + As in `pre_func_eval`: in the first iteration, there is only one stored + function value in `work.fs`, `f(x)`, so we need to add the `order` new + points. In subsequent iterations, we add two new points. The tricky + part is getting the order to match that of the weights, which is + described in `_derivative_weights`. + + For improvement: + - Change the order of the weights (and steps in `pre_func_eval`) to + simplify `work_fc` concatenation and eliminate `fc` concatenation. + - It would be simple to do one-step Richardson extrapolation with `df` + and `df_last` to increase the order of the estimate and/or improve + the error estimate. + - Process the function evaluations in a more numerically favorable + way. For instance, combining the pairs of central difference evals + into a second-order approximation and using Richardson extrapolation + to produce a higher order approximation seemed to retain accuracy up + to very high order. + - Alternatively, we could use `polyfit` like Jacobi. An advantage of + fitting polynomial to more points than necessary is improved noise + tolerance. + """ + n = work.terms + n_new = n if work.nit == 0 else 1 + il, ic, io = work.il, work.ic, work.io + + # Central difference + # `work_fc` is *all* the points at which the function has been evaluated + # `fc` is the points we're using *this iteration* to produce the estimate + work_fc = (f[ic][:, :n_new], work.fs[ic], f[ic][:, -n_new:]) + work_fc = xp.concat(work_fc, axis=-1) + if work.nit == 0: + fc = work_fc + else: + fc = (work_fc[:, :n], work_fc[:, n:n+1], work_fc[:, -n:]) + fc = xp.concat(fc, axis=-1) + + # One-sided difference + work_fo = xp.concat((work.fs[io], f[io]), axis=-1) + if work.nit == 0: + fo = work_fo + else: + fo = xp.concat((work_fo[:, 0:1], work_fo[:, -2*n:]), axis=-1) + + work.fs = xp.zeros((ic.shape[0], work.fs.shape[-1] + 2*n_new), dtype=work.dtype) + work.fs[ic] = work_fc + work.fs[io] = work_fo + + wc, wo = _derivative_weights(work, n, xp) + work.df_last = xp.asarray(work.df, copy=True) + work.df[ic] = fc @ wc / work.h[ic] + work.df[io] = fo @ wo / work.h[io] + work.df[il] *= -1 + + work.h /= work.fac + work.error_last = work.error + # Simple error estimate - the difference in derivative estimates between + # this iteration and the last. This is typically conservative because if + # convergence has begin, the true error is much closer to the difference + # between the current estimate and the *next* error estimate. However, + # we could use Richarson extrapolation to produce an error estimate that + # is one order higher, and take the difference between that and + # `work.df` (which would just be constant factor that depends on `fac`.) + work.error = xp.abs(work.df - work.df_last) + + def check_termination(work): + """Terminate due to convergence, non-finite values, or error increase""" + stop = xp.astype(xp.zeros_like(work.df), xp.bool) + + i = work.error < work.atol + work.rtol*abs(work.df) + work.status[i] = eim._ECONVERGED + stop[i] = True + + if work.nit > 0: + i = ~((xp.isfinite(work.x) & xp.isfinite(work.df)) | stop) + work.df[i], work.status[i] = xp.nan, eim._EVALUEERR + stop[i] = True + + # With infinite precision, there is a step size below which + # all smaller step sizes will reduce the error. But in floating point + # arithmetic, catastrophic cancellation will begin to cause the error + # to increase again. This heuristic tries to avoid step sizes that are + # too small. There may be more theoretically sound approaches for + # detecting a step size that minimizes the total error, but this + # heuristic seems simple and effective. + i = (work.error > work.error_last*10) & ~stop + work.status[i] = _EERRORINCREASE + stop[i] = True + + return stop + + def post_termination_check(work): + return + + def customize_result(res, shape): + return shape + + return eim._loop(work, callback, shape, maxiter, func, args, dtype, + pre_func_eval, post_func_eval, check_termination, + post_termination_check, customize_result, res_work_pairs, + xp, preserve_shape) + + +def _derivative_weights(work, n, xp): + # This produces the weights of the finite difference formula for a given + # stencil. In experiments, use of a second-order central difference formula + # with Richardson extrapolation was more accurate numerically, but it was + # more complicated, and it would have become even more complicated when + # adding support for one-sided differences. However, now that all the + # function evaluation values are stored, they can be processed in whatever + # way is desired to produce the derivative estimate. We leave alternative + # approaches to future work. To be more self-contained, here is the theory + # for deriving the weights below. + # + # Recall that the Taylor expansion of a univariate, scalar-values function + # about a point `x` may be expressed as: + # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3) + # Suppose we evaluate f(x), f(x+h), and f(x-h). We have: + # f(x) = f(x) + # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3) + # f(x - h) = f(x) - f'(x)*h + f''(x)/2!*h**2 + O(h**3) + # We can solve for weights `wi` such that: + # w1*f(x) = w1*(f(x)) + # + w2*f(x + h) = w2*(f(x) + f'(x)*h + f''(x)/2!*h**2) + O(h**3) + # + w3*f(x - h) = w3*(f(x) - f'(x)*h + f''(x)/2!*h**2) + O(h**3) + # = 0 + f'(x)*h + 0 + O(h**3) + # Then + # f'(x) ~ (w1*f(x) + w2*f(x+h) + w3*f(x-h))/h + # is a finite difference derivative approximation with error O(h**2), + # and so it is said to be a "second-order" approximation. Under certain + # conditions (e.g. well-behaved function, `h` sufficiently small), the + # error in the approximation will decrease with h**2; that is, if `h` is + # reduced by a factor of 2, the error is reduced by a factor of 4. + # + # By default, we use eighth-order formulae. Our central-difference formula + # uses abscissae: + # x-h/c**3, x-h/c**2, x-h/c, x-h, x, x+h, x+h/c, x+h/c**2, x+h/c**3 + # where `c` is the step factor. (Typically, the step factor is greater than + # one, so the outermost points - as written above - are actually closest to + # `x`.) This "stencil" is chosen so that each iteration, the step can be + # reduced by the factor `c`, and most of the function evaluations can be + # reused with the new step size. For example, in the next iteration, we + # will have: + # x-h/c**4, x-h/c**3, x-h/c**2, x-h/c, x, x+h/c, x+h/c**2, x+h/c**3, x+h/c**4 + # We do not reuse `x-h` and `x+h` for the new derivative estimate. + # While this would increase the order of the formula and thus the + # theoretical convergence rate, it is also less stable numerically. + # (As noted above, there are other ways of processing the values that are + # more stable. Thus, even now we store `f(x-h)` and `f(x+h)` in `work.fs` + # to simplify future development of this sort of improvement.) + # + # The (right) one-sided formula is produced similarly using abscissae + # x, x+h, x+h/d, x+h/d**2, ..., x+h/d**6, x+h/d**7, x+h/d**7 + # where `d` is the square root of `c`. (The left one-sided formula simply + # uses -h.) When the step size is reduced by factor `c = d**2`, we have + # abscissae: + # x, x+h/d**2, x+h/d**3..., x+h/d**8, x+h/d**9, x+h/d**9 + # `d` is chosen as the square root of `c` so that the rate of the step-size + # reduction is the same per iteration as in the central difference case. + # Note that because the central difference formulas are inherently of even + # order, for simplicity, we use only even-order formulas for one-sided + # differences, too. + + # It's possible for the user to specify `fac` in, say, double precision but + # `x` and `args` in single precision. `fac` gets converted to single + # precision, but we should always use double precision for the intermediate + # calculations here to avoid additional error in the weights. + fac = float(work.fac) + + # Note that if the user switches back to floating point precision with + # `x` and `args`, then `fac` will not necessarily equal the (lower + # precision) cached `_derivative_weights.fac`, and the weights will + # need to be recalculated. This could be fixed, but it's late, and of + # low consequence. + + diff_state = work.diff_state + if fac != diff_state.fac: + diff_state.central = [] + diff_state.right = [] + diff_state.fac = fac + + if len(diff_state.central) != 2*n + 1: + # Central difference weights. Consider refactoring this; it could + # probably be more compact. + # Note: Using NumPy here is OK; we convert to xp-type at the end + i = np.arange(-n, n + 1) + p = np.abs(i) - 1. # center point has power `p` -1, but sign `s` is 0 + s = np.sign(i) + + h = s / fac ** p + A = np.vander(h, increasing=True).T + b = np.zeros(2*n + 1) + b[1] = 1 + weights = np.linalg.solve(A, b) + + # Enforce identities to improve accuracy + weights[n] = 0 + for i in range(n): + weights[-i-1] = -weights[i] + + # Cache the weights. We only need to calculate them once unless + # the step factor changes. + diff_state.central = weights + + # One-sided difference weights. The left one-sided weights (with + # negative steps) are simply the negative of the right one-sided + # weights, so no need to compute them separately. + i = np.arange(2*n + 1) + p = i - 1. + s = np.sign(i) + + h = s / np.sqrt(fac) ** p + A = np.vander(h, increasing=True).T + b = np.zeros(2 * n + 1) + b[1] = 1 + weights = np.linalg.solve(A, b) + + diff_state.right = weights + + return (xp.asarray(diff_state.central, dtype=work.dtype), + xp.asarray(diff_state.right, dtype=work.dtype)) + + +def jacobian(f, x, *, tolerances=None, maxiter=10, order=8, initial_step=0.5, + step_factor=2.0, step_direction=0): + r"""Evaluate the Jacobian of a function numerically. + + Parameters + ---------- + f : callable + The function whose Jacobian is desired. The signature must be:: + + f(xi: ndarray) -> ndarray + + where each element of ``xi`` is a finite real. If the function to be + differentiated accepts additional arguments, wrap it (e.g. using + `functools.partial` or ``lambda``) and pass the wrapped callable + into `jacobian`. `f` must not mutate the array ``xi``. See Notes + regarding vectorization and the dimensionality of the input and output. + x : float array_like + Points at which to evaluate the Jacobian. Must have at least one dimension. + See Notes regarding the dimensionality and vectorization. + tolerances : dictionary of floats, optional + Absolute and relative tolerances. Valid keys of the dictionary are: + + - ``atol`` - absolute tolerance on the derivative + - ``rtol`` - relative tolerance on the derivative + + Iteration will stop when ``res.error < atol + rtol * abs(res.df)``. The default + `atol` is the smallest normal number of the appropriate dtype, and + the default `rtol` is the square root of the precision of the + appropriate dtype. + maxiter : int, default: 10 + The maximum number of iterations of the algorithm to perform. See + Notes. + order : int, default: 8 + The (positive integer) order of the finite difference formula to be + used. Odd integers will be rounded up to the next even integer. + initial_step : float array_like, default: 0.5 + The (absolute) initial step size for the finite difference derivative + approximation. Must be broadcastable with `x` and `step_direction`. + step_factor : float, default: 2.0 + The factor by which the step size is *reduced* in each iteration; i.e. + the step size in iteration 1 is ``initial_step/step_factor``. If + ``step_factor < 1``, subsequent steps will be greater than the initial + step; this may be useful if steps smaller than some threshold are + undesirable (e.g. due to subtractive cancellation error). + step_direction : integer array_like + An array representing the direction of the finite difference steps (e.g. + for use when `x` lies near to the boundary of the domain of the function.) + Must be broadcastable with `x` and `initial_step`. + Where 0 (default), central differences are used; where negative (e.g. + -1), steps are non-positive; and where positive (e.g. 1), all steps are + non-negative. + + Returns + ------- + res : _RichResult + An object similar to an instance of `scipy.optimize.OptimizeResult` with the + following attributes. The descriptions are written as though the values will + be scalars; however, if `f` returns an array, the outputs will be + arrays of the same shape. + + success : bool array + ``True`` where the algorithm terminated successfully (status ``0``); + ``False`` otherwise. + status : int array + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm converged to the specified tolerances. + - ``-1`` : The error estimate increased, so iteration was terminated. + - ``-2`` : The maximum number of iterations was reached. + - ``-3`` : A non-finite value was encountered. + + df : float array + The Jacobian of `f` at `x`, if the algorithm terminated + successfully. + error : float array + An estimate of the error: the magnitude of the difference between + the current estimate of the Jacobian and the estimate in the + previous iteration. + nit : int array + The number of iterations of the algorithm that were performed. + nfev : int array + The number of points at which `f` was evaluated. + + Each element of an attribute is associated with the corresponding + element of `df`. For instance, element ``i`` of `nfev` is the + number of points at which `f` was evaluated for the sake of + computing element ``i`` of `df`. + + See Also + -------- + derivative, hessian + + Notes + ----- + Suppose we wish to evaluate the Jacobian of a function + :math:`f: \mathbf{R}^m \rightarrow \mathbf{R}^n`. Assign to variables + ``m`` and ``n`` the positive integer values of :math:`m` and :math:`n`, + respectively, and let ``...`` represent an arbitrary tuple of integers. + If we wish to evaluate the Jacobian at a single point, then: + + - argument `x` must be an array of shape ``(m,)`` + - argument `f` must be vectorized to accept an array of shape ``(m, ...)``. + The first axis represents the :math:`m` inputs of :math:`f`; the remainder + are for evaluating the function at multiple points in a single call. + - argument `f` must return an array of shape ``(n, ...)``. The first + axis represents the :math:`n` outputs of :math:`f`; the remainder + are for the result of evaluating the function at multiple points. + - attribute ``df`` of the result object will be an array of shape ``(n, m)``, + the Jacobian. + + This function is also vectorized in the sense that the Jacobian can be + evaluated at ``k`` points in a single call. In this case, `x` would be an + array of shape ``(m, k)``, `f` would accept an array of shape + ``(m, k, ...)`` and return an array of shape ``(n, k, ...)``, and the ``df`` + attribute of the result would have shape ``(n, m, k)``. + + Suppose the desired callable ``f_not_vectorized`` is not vectorized; it can + only accept an array of shape ``(m,)``. A simple solution to satisfy the required + interface is to wrap ``f_not_vectorized`` as follows:: + + def f(x): + return np.apply_along_axis(f_not_vectorized, axis=0, arr=x) + + Alternatively, suppose the desired callable ``f_vec_q`` is vectorized, but + only for 2-D arrays of shape ``(m, q)``. To satisfy the required interface, + consider:: + + def f(x): + m, batch = x.shape[0], x.shape[1:] # x.shape is (m, ...) + x = np.reshape(x, (m, -1)) # `-1` is short for q = prod(batch) + res = f_vec_q(x) # pass shape (m, q) to function + n = res.shape[0] + return np.reshape(res, (n,) + batch) # return shape (n, ...) + + Then pass the wrapped callable ``f`` as the first argument of `jacobian`. + + References + ---------- + .. [1] Jacobian matrix and determinant, *Wikipedia*, + https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant + + Examples + -------- + The Rosenbrock function maps from :math:`\mathbf{R}^m \rightarrow \mathbf{R}`; + the SciPy implementation `scipy.optimize.rosen` is vectorized to accept an + array of shape ``(m, p)`` and return an array of shape ``p``. Suppose we wish + to evaluate the Jacobian (AKA the gradient because the function returns a scalar) + at ``[0.5, 0.5, 0.5]``. + + >>> import numpy as np + >>> from scipy.differentiate import jacobian + >>> from scipy.optimize import rosen, rosen_der + >>> m = 3 + >>> x = np.full(m, 0.5) + >>> res = jacobian(rosen, x) + >>> ref = rosen_der(x) # reference value of the gradient + >>> res.df, ref + (array([-51., -1., 50.]), array([-51., -1., 50.])) + + As an example of a function with multiple outputs, consider Example 4 + from [1]_. + + >>> def f(x): + ... x1, x2, x3 = x + ... return [x1, 5*x3, 4*x2**2 - 2*x3, x3*np.sin(x1)] + + The true Jacobian is given by: + + >>> def df(x): + ... x1, x2, x3 = x + ... one = np.ones_like(x1) + ... return [[one, 0*one, 0*one], + ... [0*one, 0*one, 5*one], + ... [0*one, 8*x2, -2*one], + ... [x3*np.cos(x1), 0*one, np.sin(x1)]] + + Evaluate the Jacobian at an arbitrary point. + + >>> rng = np.random.default_rng(389252938452) + >>> x = rng.random(size=3) + >>> res = jacobian(f, x) + >>> ref = df(x) + >>> res.df.shape == (4, 3) + True + >>> np.allclose(res.df, ref) + True + + Evaluate the Jacobian at 10 arbitrary points in a single call. + + >>> x = rng.random(size=(3, 10)) + >>> res = jacobian(f, x) + >>> ref = df(x) + >>> res.df.shape == (4, 3, 10) + True + >>> np.allclose(res.df, ref) + True + + """ + xp = array_namespace(x) + x = xp.asarray(x) + int_dtype = xp.isdtype(x.dtype, 'integral') + x0 = xp.asarray(x, dtype=xp.asarray(1.0).dtype) if int_dtype else x + + if x0.ndim < 1: + message = "Argument `x` must be at least 1-D." + raise ValueError(message) + + m = x0.shape[0] + i = xp.arange(m) + + def wrapped(x): + p = () if x.ndim == x0.ndim else (x.shape[-1],) # number of abscissae + + new_shape = (m, m) + x0.shape[1:] + p + xph = xp.expand_dims(x0, axis=1) + if x.ndim != x0.ndim: + xph = xp.expand_dims(xph, axis=-1) + xph = xp_copy(xp.broadcast_to(xph, new_shape), xp=xp) + xph[i, i] = x + return f(xph) + + res = derivative(wrapped, x, tolerances=tolerances, + maxiter=maxiter, order=order, initial_step=initial_step, + step_factor=step_factor, preserve_shape=True, + step_direction=step_direction) + + del res.x # the user knows `x`, and the way it gets broadcasted is meaningless here + return res + + +def hessian(f, x, *, tolerances=None, maxiter=10, + order=8, initial_step=0.5, step_factor=2.0): + r"""Evaluate the Hessian of a function numerically. + + Parameters + ---------- + f : callable + The function whose Hessian is desired. The signature must be:: + + f(xi: ndarray) -> ndarray + + where each element of ``xi`` is a finite real. If the function to be + differentiated accepts additional arguments, wrap it (e.g. using + `functools.partial` or ``lambda``) and pass the wrapped callable + into `hessian`. `f` must not mutate the array ``xi``. See Notes + regarding vectorization and the dimensionality of the input and output. + x : float array_like + Points at which to evaluate the Hessian. Must have at least one dimension. + See Notes regarding the dimensionality and vectorization. + tolerances : dictionary of floats, optional + Absolute and relative tolerances. Valid keys of the dictionary are: + + - ``atol`` - absolute tolerance on the derivative + - ``rtol`` - relative tolerance on the derivative + + Iteration will stop when ``res.error < atol + rtol * abs(res.df)``. The default + `atol` is the smallest normal number of the appropriate dtype, and + the default `rtol` is the square root of the precision of the + appropriate dtype. + order : int, default: 8 + The (positive integer) order of the finite difference formula to be + used. Odd integers will be rounded up to the next even integer. + initial_step : float, default: 0.5 + The (absolute) initial step size for the finite difference derivative + approximation. + step_factor : float, default: 2.0 + The factor by which the step size is *reduced* in each iteration; i.e. + the step size in iteration 1 is ``initial_step/step_factor``. If + ``step_factor < 1``, subsequent steps will be greater than the initial + step; this may be useful if steps smaller than some threshold are + undesirable (e.g. due to subtractive cancellation error). + maxiter : int, default: 10 + The maximum number of iterations of the algorithm to perform. See + Notes. + + Returns + ------- + res : _RichResult + An object similar to an instance of `scipy.optimize.OptimizeResult` with the + following attributes. The descriptions are written as though the values will + be scalars; however, if `f` returns an array, the outputs will be + arrays of the same shape. + + success : bool array + ``True`` where the algorithm terminated successfully (status ``0``); + ``False`` otherwise. + status : int array + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm converged to the specified tolerances. + - ``-1`` : The error estimate increased, so iteration was terminated. + - ``-2`` : The maximum number of iterations was reached. + - ``-3`` : A non-finite value was encountered. + + ddf : float array + The Hessian of `f` at `x`, if the algorithm terminated + successfully. + error : float array + An estimate of the error: the magnitude of the difference between + the current estimate of the Hessian and the estimate in the + previous iteration. + nfev : int array + The number of points at which `f` was evaluated. + + Each element of an attribute is associated with the corresponding + element of `ddf`. For instance, element ``[i, j]`` of `nfev` is the + number of points at which `f` was evaluated for the sake of + computing element ``[i, j]`` of `ddf`. + + See Also + -------- + derivative, jacobian + + Notes + ----- + Suppose we wish to evaluate the Hessian of a function + :math:`f: \mathbf{R}^m \rightarrow \mathbf{R}`, and we assign to variable + ``m`` the positive integer value of :math:`m`. If we wish to evaluate + the Hessian at a single point, then: + + - argument `x` must be an array of shape ``(m,)`` + - argument `f` must be vectorized to accept an array of shape + ``(m, ...)``. The first axis represents the :math:`m` inputs of + :math:`f`; the remaining axes indicated by ellipses are for evaluating + the function at several abscissae in a single call. + - argument `f` must return an array of shape ``(...)``. + - attribute ``dff`` of the result object will be an array of shape ``(m, m)``, + the Hessian. + + This function is also vectorized in the sense that the Hessian can be + evaluated at ``k`` points in a single call. In this case, `x` would be an + array of shape ``(m, k)``, `f` would accept an array of shape + ``(m, ...)`` and return an array of shape ``(...)``, and the ``ddf`` + attribute of the result would have shape ``(m, m, k)``. Note that the + axis associated with the ``k`` points is included within the axes + denoted by ``(...)``. + + Currently, `hessian` is implemented by nesting calls to `jacobian`. + All options passed to `hessian` are used for both the inner and outer + calls with one exception: the `rtol` used in the inner `jacobian` call + is tightened by a factor of 100 with the expectation that the inner + error can be ignored. A consequence is that `rtol` should not be set + less than 100 times the precision of the dtype of `x`; a warning is + emitted otherwise. + + References + ---------- + .. [1] Hessian matrix, *Wikipedia*, + https://en.wikipedia.org/wiki/Hessian_matrix + + Examples + -------- + The Rosenbrock function maps from :math:`\mathbf{R}^m \rightarrow \mathbf{R}`; + the SciPy implementation `scipy.optimize.rosen` is vectorized to accept an + array of shape ``(m, ...)`` and return an array of shape ``...``. Suppose we + wish to evaluate the Hessian at ``[0.5, 0.5, 0.5]``. + + >>> import numpy as np + >>> from scipy.differentiate import hessian + >>> from scipy.optimize import rosen, rosen_hess + >>> m = 3 + >>> x = np.full(m, 0.5) + >>> res = hessian(rosen, x) + >>> ref = rosen_hess(x) # reference value of the Hessian + >>> np.allclose(res.ddf, ref) + True + + `hessian` is vectorized to evaluate the Hessian at multiple points + in a single call. + + >>> rng = np.random.default_rng(4589245925010) + >>> x = rng.random((m, 10)) + >>> res = hessian(rosen, x) + >>> ref = [rosen_hess(xi) for xi in x.T] + >>> ref = np.moveaxis(ref, 0, -1) + >>> np.allclose(res.ddf, ref) + True + + """ + # todo: + # - add ability to vectorize over additional parameters (*args?) + # - error estimate stack with inner jacobian (or use legit 2D stencil) + + kwargs = dict(maxiter=maxiter, order=order, initial_step=initial_step, + step_factor=step_factor) + tolerances = {} if tolerances is None else tolerances + atol = tolerances.get('atol', None) + rtol = tolerances.get('rtol', None) + + xp = array_namespace(x) + x = xp.asarray(x) + dtype = x.dtype if not xp.isdtype(x.dtype, 'integral') else xp.asarray(1.).dtype + finfo = xp.finfo(dtype) + rtol = finfo.eps**0.5 if rtol is None else rtol # keep same as `derivative` + + # tighten the inner tolerance to make the inner error negligible + rtol_min = finfo.eps * 100 + message = (f"The specified `{rtol=}`, but error estimates are likely to be " + f"unreliable when `rtol < {rtol_min}`.") + if 0 < rtol < rtol_min: # rtol <= 0 is an error + warnings.warn(message, RuntimeWarning, stacklevel=2) + rtol = rtol_min + + def df(x): + tolerances = dict(rtol=rtol/100, atol=atol) + temp = jacobian(f, x, tolerances=tolerances, **kwargs) + nfev.append(temp.nfev if len(nfev) == 0 else temp.nfev.sum(axis=-1)) + return temp.df + + nfev = [] # track inner function evaluations + res = jacobian(df, x, tolerances=tolerances, **kwargs) # jacobian of jacobian + + nfev = xp.cumulative_sum(xp.stack(nfev), axis=0) + res_nit = xp.astype(res.nit[xp.newaxis, ...], xp.int64) # appease torch + res.nfev = xp_take_along_axis(nfev, res_nit, axis=0)[0] + res.ddf = res.df + del res.df # this is renamed to ddf + del res.nit # this is only the outer-jacobian nit + + return res diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e57fbe3cc5ae290f66216e92dec4b2c71474d1e5 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__pycache__/test_differentiate.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__pycache__/test_differentiate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f629f993def87b3fbce204a64c0246f5a90d7996 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__pycache__/test_differentiate.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/test_differentiate.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/test_differentiate.py new file mode 100644 index 0000000000000000000000000000000000000000..64bc8193cc237465e9427300bedfac8712963e4c --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/test_differentiate.py @@ -0,0 +1,695 @@ +import math +import pytest + +import numpy as np + +from scipy.conftest import array_api_compatible +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._array_api_no_0d import xp_assert_close, xp_assert_equal, xp_assert_less +from scipy._lib._array_api import is_numpy, is_torch, array_namespace + +from scipy import stats, optimize, special +from scipy.differentiate import derivative, jacobian, hessian +from scipy.differentiate._differentiate import _EERRORINCREASE + + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_xp_backends")] + +array_api_strict_skip_reason = 'Array API does not support fancy indexing assignment.' +jax_skip_reason = 'JAX arrays do not support item assignment.' + + +@pytest.mark.skip_xp_backends('array_api_strict', reason=array_api_strict_skip_reason) +@pytest.mark.skip_xp_backends('jax.numpy',reason=jax_skip_reason) +class TestDerivative: + + def f(self, x): + return special.ndtr(x) + + @pytest.mark.parametrize('x', [0.6, np.linspace(-0.05, 1.05, 10)]) + def test_basic(self, x, xp): + # Invert distribution CDF and compare against distribution `ppf` + default_dtype = xp.asarray(1.).dtype + res = derivative(self.f, xp.asarray(x, dtype=default_dtype)) + ref = xp.asarray(stats.norm().pdf(x), dtype=default_dtype) + xp_assert_close(res.df, ref) + # This would be nice, but doesn't always work out. `error` is an + # estimate, not a bound. + if not is_torch(xp): + xp_assert_less(xp.abs(res.df - ref), res.error) + + @pytest.mark.skip_xp_backends(np_only=True) + @pytest.mark.parametrize('case', stats._distr_params.distcont) + def test_accuracy(self, case): + distname, params = case + dist = getattr(stats, distname)(*params) + x = dist.median() + 0.1 + res = derivative(dist.cdf, x) + ref = dist.pdf(x) + xp_assert_close(res.df, ref, atol=1e-10) + + @pytest.mark.parametrize('order', [1, 6]) + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, order, shape, xp): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + x = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6 + n = np.size(x) + state = {} + + @np.vectorize + def _derivative_single(x): + return derivative(self.f, x, order=order) + + def f(x, *args, **kwargs): + state['nit'] += 1 + state['feval'] += 1 if (x.size == n or x.ndim <=1) else x.shape[-1] + return self.f(x, *args, **kwargs) + + state['nit'] = -1 + state['feval'] = 0 + + res = derivative(f, xp.asarray(x, dtype=xp.float64), order=order) + refs = _derivative_single(x).ravel() + + ref_x = [ref.x for ref in refs] + xp_assert_close(xp.reshape(res.x, (-1,)), xp.asarray(ref_x)) + + ref_df = [ref.df for ref in refs] + xp_assert_close(xp.reshape(res.df, (-1,)), xp.asarray(ref_df)) + + ref_error = [ref.error for ref in refs] + xp_assert_close(xp.reshape(res.error, (-1,)), xp.asarray(ref_error), + atol=1e-12) + + ref_success = [bool(ref.success) for ref in refs] + xp_assert_equal(xp.reshape(res.success, (-1,)), xp.asarray(ref_success)) + + ref_flag = [np.int32(ref.status) for ref in refs] + xp_assert_equal(xp.reshape(res.status, (-1,)), xp.asarray(ref_flag)) + + ref_nfev = [np.int32(ref.nfev) for ref in refs] + xp_assert_equal(xp.reshape(res.nfev, (-1,)), xp.asarray(ref_nfev)) + if is_numpy(xp): # can't expect other backends to be exactly the same + assert xp.max(res.nfev) == state['feval'] + + ref_nit = [np.int32(ref.nit) for ref in refs] + xp_assert_equal(xp.reshape(res.nit, (-1,)), xp.asarray(ref_nit)) + if is_numpy(xp): # can't expect other backends to be exactly the same + assert xp.max(res.nit) == state['nit'] + + def test_flags(self, xp): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + rng = np.random.default_rng(5651219684984213) + def f(xs, js): + f.nit += 1 + funcs = [lambda x: x - 2.5, # converges + lambda x: xp.exp(x)*rng.random(), # error increases + lambda x: xp.exp(x), # reaches maxiter due to order=2 + lambda x: xp.full_like(x, xp.nan)] # stops due to NaN + res = [funcs[int(j)](x) for x, j in zip(xs, xp.reshape(js, (-1,)))] + return xp.stack(res) + f.nit = 0 + + args = (xp.arange(4, dtype=xp.int64),) + res = derivative(f, xp.ones(4, dtype=xp.float64), + tolerances=dict(rtol=1e-14), + order=2, args=args) + + ref_flags = xp.asarray([eim._ECONVERGED, + _EERRORINCREASE, + eim._ECONVERR, + eim._EVALUEERR], dtype=xp.int32) + xp_assert_equal(res.status, ref_flags) + + def test_flags_preserve_shape(self, xp): + # Same test as above but using `preserve_shape` option to simplify. + rng = np.random.default_rng(5651219684984213) + def f(x): + out = [x - 2.5, # converges + xp.exp(x)*rng.random(), # error increases + xp.exp(x), # reaches maxiter due to order=2 + xp.full_like(x, xp.nan)] # stops due to NaN + return xp.stack(out) + + res = derivative(f, xp.asarray(1, dtype=xp.float64), + tolerances=dict(rtol=1e-14), + order=2, preserve_shape=True) + + ref_flags = xp.asarray([eim._ECONVERGED, + _EERRORINCREASE, + eim._ECONVERR, + eim._EVALUEERR], dtype=xp.int32) + xp_assert_equal(res.status, ref_flags) + + def test_preserve_shape(self, xp): + # Test `preserve_shape` option + def f(x): + out = [x, xp.sin(3*x), x+xp.sin(10*x), xp.sin(20*x)*(x-1)**2] + return xp.stack(out) + + x = xp.asarray(0.) + ref = xp.asarray([xp.asarray(1), 3*xp.cos(3*x), 1+10*xp.cos(10*x), + 20*xp.cos(20*x)*(x-1)**2 + 2*xp.sin(20*x)*(x-1)]) + res = derivative(f, x, preserve_shape=True) + xp_assert_close(res.df, ref) + + def test_convergence(self, xp): + # Test that the convergence tolerances behave as expected + x = xp.asarray(1., dtype=xp.float64) + f = special.ndtr + ref = float(stats.norm.pdf(1.)) + tolerances0 = dict(atol=0, rtol=0) + + tolerances = tolerances0.copy() + tolerances['atol'] = 1e-3 + res1 = derivative(f, x, tolerances=tolerances, order=4) + assert abs(res1.df - ref) < 1e-3 + tolerances['atol'] = 1e-6 + res2 = derivative(f, x, tolerances=tolerances, order=4) + assert abs(res2.df - ref) < 1e-6 + assert abs(res2.df - ref) < abs(res1.df - ref) + + tolerances = tolerances0.copy() + tolerances['rtol'] = 1e-3 + res1 = derivative(f, x, tolerances=tolerances, order=4) + assert abs(res1.df - ref) < 1e-3 * ref + tolerances['rtol'] = 1e-6 + res2 = derivative(f, x, tolerances=tolerances, order=4) + assert abs(res2.df - ref) < 1e-6 * ref + assert abs(res2.df - ref) < abs(res1.df - ref) + + def test_step_parameters(self, xp): + # Test that step factors have the expected effect on accuracy + x = xp.asarray(1., dtype=xp.float64) + f = special.ndtr + ref = float(stats.norm.pdf(1.)) + + res1 = derivative(f, x, initial_step=0.5, maxiter=1) + res2 = derivative(f, x, initial_step=0.05, maxiter=1) + assert abs(res2.df - ref) < abs(res1.df - ref) + + res1 = derivative(f, x, step_factor=2, maxiter=1) + res2 = derivative(f, x, step_factor=20, maxiter=1) + assert abs(res2.df - ref) < abs(res1.df - ref) + + # `step_factor` can be less than 1: `initial_step` is the minimum step + kwargs = dict(order=4, maxiter=1, step_direction=0) + res = derivative(f, x, initial_step=0.5, step_factor=0.5, **kwargs) + ref = derivative(f, x, initial_step=1, step_factor=2, **kwargs) + xp_assert_close(res.df, ref.df, rtol=5e-15) + + # This is a similar test for one-sided difference + kwargs = dict(order=2, maxiter=1, step_direction=1) + res = derivative(f, x, initial_step=1, step_factor=2, **kwargs) + ref = derivative(f, x, initial_step=1/np.sqrt(2), step_factor=0.5, **kwargs) + xp_assert_close(res.df, ref.df, rtol=5e-15) + + kwargs['step_direction'] = -1 + res = derivative(f, x, initial_step=1, step_factor=2, **kwargs) + ref = derivative(f, x, initial_step=1/np.sqrt(2), step_factor=0.5, **kwargs) + xp_assert_close(res.df, ref.df, rtol=5e-15) + + def test_step_direction(self, xp): + # test that `step_direction` works as expected + def f(x): + y = xp.exp(x) + y[(x < 0) + (x > 2)] = xp.nan + return y + + x = xp.linspace(0, 2, 10) + step_direction = xp.zeros_like(x) + step_direction[x < 0.6], step_direction[x > 1.4] = 1, -1 + res = derivative(f, x, step_direction=step_direction) + xp_assert_close(res.df, xp.exp(x)) + assert xp.all(res.success) + + def test_vectorized_step_direction_args(self, xp): + # test that `step_direction` and `args` are vectorized properly + def f(x, p): + return x ** p + + def df(x, p): + return p * x ** (p - 1) + + x = xp.reshape(xp.asarray([1, 2, 3, 4]), (-1, 1, 1)) + hdir = xp.reshape(xp.asarray([-1, 0, 1]), (1, -1, 1)) + p = xp.reshape(xp.asarray([2, 3]), (1, 1, -1)) + res = derivative(f, x, step_direction=hdir, args=(p,)) + ref = xp.broadcast_to(df(x, p), res.df.shape) + ref = xp.asarray(ref, dtype=xp.asarray(1.).dtype) + xp_assert_close(res.df, ref) + + def test_initial_step(self, xp): + # Test that `initial_step` works as expected and is vectorized + def f(x): + return xp.exp(x) + + x = xp.asarray(0., dtype=xp.float64) + step_direction = xp.asarray([-1, 0, 1]) + h0 = xp.reshape(xp.logspace(-3, 0, 10), (-1, 1)) + res = derivative(f, x, initial_step=h0, order=2, maxiter=1, + step_direction=step_direction) + err = xp.abs(res.df - f(x)) + + # error should be smaller for smaller step sizes + assert xp.all(err[:-1, ...] < err[1:, ...]) + + # results of vectorized call should match results with + # initial_step taken one at a time + for i in range(h0.shape[0]): + ref = derivative(f, x, initial_step=h0[i, 0], order=2, maxiter=1, + step_direction=step_direction) + xp_assert_close(res.df[i, :], ref.df, rtol=1e-14) + + def test_maxiter_callback(self, xp): + # Test behavior of `maxiter` parameter and `callback` interface + x = xp.asarray(0.612814, dtype=xp.float64) + maxiter = 3 + + def f(x): + res = special.ndtr(x) + return res + + default_order = 8 + res = derivative(f, x, maxiter=maxiter, tolerances=dict(rtol=1e-15)) + assert not xp.any(res.success) + assert xp.all(res.nfev == default_order + 1 + (maxiter - 1)*2) + assert xp.all(res.nit == maxiter) + + def callback(res): + callback.iter += 1 + callback.res = res + assert hasattr(res, 'x') + assert float(res.df) not in callback.dfs + callback.dfs.add(float(res.df)) + assert res.status == eim._EINPROGRESS + if callback.iter == maxiter: + raise StopIteration + callback.iter = -1 # callback called once before first iteration + callback.res = None + callback.dfs = set() + + res2 = derivative(f, x, callback=callback, tolerances=dict(rtol=1e-15)) + # terminating with callback is identical to terminating due to maxiter + # (except for `status`) + for key in res.keys(): + if key == 'status': + assert res[key] == eim._ECONVERR + assert res2[key] == eim._ECALLBACK + else: + assert res2[key] == callback.res[key] == res[key] + + @pytest.mark.parametrize("hdir", (-1, 0, 1)) + @pytest.mark.parametrize("x", (0.65, [0.65, 0.7])) + @pytest.mark.parametrize("dtype", ('float16', 'float32', 'float64')) + def test_dtype(self, hdir, x, dtype, xp): + if dtype == 'float16' and not is_numpy(xp): + pytest.skip('float16 not tested for alternative backends') + + # Test that dtypes are preserved + dtype = getattr(xp, dtype) + x = xp.asarray(x, dtype=dtype) + + def f(x): + assert x.dtype == dtype + return xp.exp(x) + + def callback(res): + assert res.x.dtype == dtype + assert res.df.dtype == dtype + assert res.error.dtype == dtype + + res = derivative(f, x, order=4, step_direction=hdir, callback=callback) + assert res.x.dtype == dtype + assert res.df.dtype == dtype + assert res.error.dtype == dtype + eps = xp.finfo(dtype).eps + # not sure why torch is less accurate here; might be worth investigating + rtol = eps**0.5 * 50 if is_torch(xp) else eps**0.5 + xp_assert_close(res.df, xp.exp(res.x), rtol=rtol) + + def test_input_validation(self, xp): + # Test input validation for appropriate error messages + one = xp.asarray(1) + + message = '`f` must be callable.' + with pytest.raises(ValueError, match=message): + derivative(None, one) + + message = 'Abscissae and function output must be real numbers.' + with pytest.raises(ValueError, match=message): + derivative(lambda x: x, xp.asarray(-4+1j)) + + message = "When `preserve_shape=False`, the shape of the array..." + with pytest.raises(ValueError, match=message): + derivative(lambda x: [1, 2, 3], xp.asarray([-2, -3])) + + message = 'Tolerances and step parameters must be non-negative...' + with pytest.raises(ValueError, match=message): + derivative(lambda x: x, one, tolerances=dict(atol=-1)) + with pytest.raises(ValueError, match=message): + derivative(lambda x: x, one, tolerances=dict(rtol='ekki')) + with pytest.raises(ValueError, match=message): + derivative(lambda x: x, one, step_factor=object()) + + message = '`maxiter` must be a positive integer.' + with pytest.raises(ValueError, match=message): + derivative(lambda x: x, one, maxiter=1.5) + with pytest.raises(ValueError, match=message): + derivative(lambda x: x, one, maxiter=0) + + message = '`order` must be a positive integer' + with pytest.raises(ValueError, match=message): + derivative(lambda x: x, one, order=1.5) + with pytest.raises(ValueError, match=message): + derivative(lambda x: x, one, order=0) + + message = '`preserve_shape` must be True or False.' + with pytest.raises(ValueError, match=message): + derivative(lambda x: x, one, preserve_shape='herring') + + message = '`callback` must be callable.' + with pytest.raises(ValueError, match=message): + derivative(lambda x: x, one, callback='shrubbery') + + def test_special_cases(self, xp): + # Test edge cases and other special cases + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + xp_test = array_namespace(x) # needs `isdtype` + assert xp_test.isdtype(x.dtype, 'real floating') + return x ** 99 - 1 + + if not is_torch(xp): # torch defaults to float32 + res = derivative(f, xp.asarray(7), tolerances=dict(rtol=1e-10)) + assert res.success + xp_assert_close(res.df, xp.asarray(99*7.**98)) + + # Test invalid step size and direction + res = derivative(xp.exp, xp.asarray(1), step_direction=xp.nan) + xp_assert_equal(res.df, xp.asarray(xp.nan)) + xp_assert_equal(res.status, xp.asarray(-3, dtype=xp.int32)) + + res = derivative(xp.exp, xp.asarray(1), initial_step=0) + xp_assert_equal(res.df, xp.asarray(xp.nan)) + xp_assert_equal(res.status, xp.asarray(-3, dtype=xp.int32)) + + # Test that if success is achieved in the correct number + # of iterations if function is a polynomial. Ideally, all polynomials + # of order 0-2 would get exact result with 0 refinement iterations, + # all polynomials of order 3-4 would be differentiated exactly after + # 1 iteration, etc. However, it seems that `derivative` needs an + # extra iteration to detect convergence based on the error estimate. + + for n in range(6): + x = xp.asarray(1.5, dtype=xp.float64) + def f(x): + return 2*x**n + + ref = 2*n*x**(n-1) + + res = derivative(f, x, maxiter=1, order=max(1, n)) + xp_assert_close(res.df, ref, rtol=1e-15) + xp_assert_equal(res.error, xp.asarray(xp.nan, dtype=xp.float64)) + + res = derivative(f, x, order=max(1, n)) + assert res.success + assert res.nit == 2 + xp_assert_close(res.df, ref, rtol=1e-15) + + # Test scalar `args` (not in tuple) + def f(x, c): + return c*x - 1 + + res = derivative(f, xp.asarray(2), args=xp.asarray(3)) + xp_assert_close(res.df, xp.asarray(3.)) + + # no need to run a test on multiple backends if it's xfailed + @pytest.mark.skip_xp_backends(np_only=True) + @pytest.mark.xfail + @pytest.mark.parametrize("case", ( # function, evaluation point + (lambda x: (x - 1) ** 3, 1), + (lambda x: np.where(x > 1, (x - 1) ** 5, (x - 1) ** 3), 1) + )) + def test_saddle_gh18811(self, case): + # With default settings, `derivative` will not always converge when + # the true derivative is exactly zero. This tests that specifying a + # (tight) `atol` alleviates the problem. See discussion in gh-18811. + atol = 1e-16 + res = derivative(*case, step_direction=[-1, 0, 1], atol=atol) + assert np.all(res.success) + xp_assert_close(res.df, 0, atol=atol) + + +class JacobianHessianTest: + def test_iv(self, xp): + jh_func = self.jh_func.__func__ + + # Test input validation + message = "Argument `x` must be at least 1-D." + with pytest.raises(ValueError, match=message): + jh_func(xp.sin, 1, tolerances=dict(atol=-1)) + + # Confirm that other parameters are being passed to `derivative`, + # which raises an appropriate error message. + x = xp.ones(3) + func = optimize.rosen + message = 'Tolerances and step parameters must be non-negative scalars.' + with pytest.raises(ValueError, match=message): + jh_func(func, x, tolerances=dict(atol=-1)) + with pytest.raises(ValueError, match=message): + jh_func(func, x, tolerances=dict(rtol=-1)) + with pytest.raises(ValueError, match=message): + jh_func(func, x, step_factor=-1) + + message = '`order` must be a positive integer.' + with pytest.raises(ValueError, match=message): + jh_func(func, x, order=-1) + + message = '`maxiter` must be a positive integer.' + with pytest.raises(ValueError, match=message): + jh_func(func, x, maxiter=-1) + + +@pytest.mark.skip_xp_backends('array_api_strict', reason=array_api_strict_skip_reason) +@pytest.mark.skip_xp_backends('jax.numpy',reason=jax_skip_reason) +class TestJacobian(JacobianHessianTest): + jh_func = jacobian + + # Example functions and Jacobians from Wikipedia: + # https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant#Examples + + def f1(z, xp): + x, y = z + return xp.stack([x ** 2 * y, 5 * x + xp.sin(y)]) + + def df1(z): + x, y = z + return [[2 * x * y, x ** 2], [np.full_like(x, 5), np.cos(y)]] + + f1.mn = 2, 2 # type: ignore[attr-defined] + f1.ref = df1 # type: ignore[attr-defined] + + def f2(z, xp): + r, phi = z + return xp.stack([r * xp.cos(phi), r * xp.sin(phi)]) + + def df2(z): + r, phi = z + return [[np.cos(phi), -r * np.sin(phi)], + [np.sin(phi), r * np.cos(phi)]] + + f2.mn = 2, 2 # type: ignore[attr-defined] + f2.ref = df2 # type: ignore[attr-defined] + + def f3(z, xp): + r, phi, th = z + return xp.stack([r * xp.sin(phi) * xp.cos(th), r * xp.sin(phi) * xp.sin(th), + r * xp.cos(phi)]) + + def df3(z): + r, phi, th = z + return [[np.sin(phi) * np.cos(th), r * np.cos(phi) * np.cos(th), + -r * np.sin(phi) * np.sin(th)], + [np.sin(phi) * np.sin(th), r * np.cos(phi) * np.sin(th), + r * np.sin(phi) * np.cos(th)], + [np.cos(phi), -r * np.sin(phi), np.zeros_like(r)]] + + f3.mn = 3, 3 # type: ignore[attr-defined] + f3.ref = df3 # type: ignore[attr-defined] + + def f4(x, xp): + x1, x2, x3 = x + return xp.stack([x1, 5 * x3, 4 * x2 ** 2 - 2 * x3, x3 * xp.sin(x1)]) + + def df4(x): + x1, x2, x3 = x + one = np.ones_like(x1) + return [[one, 0 * one, 0 * one], + [0 * one, 0 * one, 5 * one], + [0 * one, 8 * x2, -2 * one], + [x3 * np.cos(x1), 0 * one, np.sin(x1)]] + + f4.mn = 3, 4 # type: ignore[attr-defined] + f4.ref = df4 # type: ignore[attr-defined] + + def f5(x, xp): + x1, x2, x3 = x + return xp.stack([5 * x2, 4 * x1 ** 2 - 2 * xp.sin(x2 * x3), x2 * x3]) + + def df5(x): + x1, x2, x3 = x + one = np.ones_like(x1) + return [[0 * one, 5 * one, 0 * one], + [8 * x1, -2 * x3 * np.cos(x2 * x3), -2 * x2 * np.cos(x2 * x3)], + [0 * one, x3, x2]] + + f5.mn = 3, 3 # type: ignore[attr-defined] + f5.ref = df5 # type: ignore[attr-defined] + + def rosen(x, _): return optimize.rosen(x) + rosen.mn = 5, 1 # type: ignore[attr-defined] + rosen.ref = optimize.rosen_der # type: ignore[attr-defined] + + @pytest.mark.parametrize('dtype', ('float32', 'float64')) + @pytest.mark.parametrize('size', [(), (6,), (2, 3)]) + @pytest.mark.parametrize('func', [f1, f2, f3, f4, f5, rosen]) + def test_examples(self, dtype, size, func, xp): + atol = 1e-10 if dtype == 'float64' else 1.99e-3 + dtype = getattr(xp, dtype) + rng = np.random.default_rng(458912319542) + m, n = func.mn + x = rng.random(size=(m,) + size) + res = jacobian(lambda x: func(x , xp), xp.asarray(x, dtype=dtype)) + # convert list of arrays to single array before converting to xp array + ref = xp.asarray(np.asarray(func.ref(x)), dtype=dtype) + xp_assert_close(res.df, ref, atol=atol) + + def test_attrs(self, xp): + # Test attributes of result object + z = xp.asarray([0.5, 0.25]) + + # case in which some elements of the Jacobian are harder + # to calculate than others + def df1(z): + x, y = z + return xp.stack([xp.cos(0.5*x) * xp.cos(y), xp.sin(2*x) * y**2]) + + def df1_0xy(x, y): + return xp.cos(0.5*x) * xp.cos(y) + + def df1_1xy(x, y): + return xp.sin(2*x) * y**2 + + res = jacobian(df1, z, initial_step=10) + if is_numpy(xp): + assert len(np.unique(res.nit)) == 4 + assert len(np.unique(res.nfev)) == 4 + + res00 = jacobian(lambda x: df1_0xy(x, z[1]), z[0:1], initial_step=10) + res01 = jacobian(lambda y: df1_0xy(z[0], y), z[1:2], initial_step=10) + res10 = jacobian(lambda x: df1_1xy(x, z[1]), z[0:1], initial_step=10) + res11 = jacobian(lambda y: df1_1xy(z[0], y), z[1:2], initial_step=10) + ref = optimize.OptimizeResult() + for attr in ['success', 'status', 'df', 'nit', 'nfev']: + ref_attr = xp.asarray([[getattr(res00, attr), getattr(res01, attr)], + [getattr(res10, attr), getattr(res11, attr)]]) + ref[attr] = xp.squeeze(ref_attr) + rtol = 1.5e-5 if res[attr].dtype == xp.float32 else 1.5e-14 + xp_assert_close(res[attr], ref[attr], rtol=rtol) + + def test_step_direction_size(self, xp): + # Check that `step_direction` and `initial_step` can be used to ensure that + # the usable domain of a function is respected. + rng = np.random.default_rng(23892589425245) + b = rng.random(3) + eps = 1e-7 # torch needs wiggle room? + + def f(x): + x[0, x[0] < b[0]] = xp.nan + x[0, x[0] > b[0] + 0.25] = xp.nan + x[1, x[1] > b[1]] = xp.nan + x[1, x[1] < b[1] - 0.1-eps] = xp.nan + return TestJacobian.f5(x, xp) + + dir = [1, -1, 0] + h0 = [0.25, 0.1, 0.5] + atol = {'atol': 1e-8} + res = jacobian(f, xp.asarray(b, dtype=xp.float64), initial_step=h0, + step_direction=dir, tolerances=atol) + ref = xp.asarray(TestJacobian.df5(b), dtype=xp.float64) + xp_assert_close(res.df, ref, atol=1e-8) + assert xp.all(xp.isfinite(ref)) + + +@pytest.mark.skip_xp_backends('array_api_strict', reason=array_api_strict_skip_reason) +@pytest.mark.skip_xp_backends('jax.numpy',reason=jax_skip_reason) +class TestHessian(JacobianHessianTest): + jh_func = hessian + + @pytest.mark.parametrize('shape', [(), (4,), (2, 4)]) + def test_example(self, shape, xp): + rng = np.random.default_rng(458912319542) + m = 3 + x = xp.asarray(rng.random((m,) + shape), dtype=xp.float64) + res = hessian(optimize.rosen, x) + if shape: + x = xp.reshape(x, (m, -1)) + ref = xp.stack([optimize.rosen_hess(xi) for xi in x.T]) + ref = xp.moveaxis(ref, 0, -1) + ref = xp.reshape(ref, (m, m,) + shape) + else: + ref = optimize.rosen_hess(x) + xp_assert_close(res.ddf, ref, atol=1e-8) + + # # Removed symmetry enforcement; consider adding back in as a feature + # # check symmetry + # for key in ['ddf', 'error', 'nfev', 'success', 'status']: + # assert_equal(res[key], np.swapaxes(res[key], 0, 1)) + + def test_float32(self, xp): + rng = np.random.default_rng(458912319542) + x = xp.asarray(rng.random(3), dtype=xp.float32) + res = hessian(optimize.rosen, x) + ref = optimize.rosen_hess(x) + mask = (ref != 0) + xp_assert_close(res.ddf[mask], ref[mask]) + atol = 1e-2 * xp.abs(xp.min(ref[mask])) + xp_assert_close(res.ddf[~mask], ref[~mask], atol=atol) + + def test_nfev(self, xp): + z = xp.asarray([0.5, 0.25]) + xp_test = array_namespace(z) + + def f1(z): + x, y = xp_test.broadcast_arrays(*z) + f1.nfev = f1.nfev + (math.prod(x.shape[2:]) if x.ndim > 2 else 1) + return xp.sin(x) * y ** 3 + f1.nfev = 0 + + + res = hessian(f1, z, initial_step=10) + f1.nfev = 0 + res00 = hessian(lambda x: f1([x[0], z[1]]), z[0:1], initial_step=10) + assert res.nfev[0, 0] == f1.nfev == res00.nfev[0, 0] + + f1.nfev = 0 + res11 = hessian(lambda y: f1([z[0], y[0]]), z[1:2], initial_step=10) + assert res.nfev[1, 1] == f1.nfev == res11.nfev[0, 0] + + # Removed symmetry enforcement; consider adding back in as a feature + # assert_equal(res.nfev, res.nfev.T) # check symmetry + # assert np.unique(res.nfev).size == 3 + + + @pytest.mark.thread_unsafe + @pytest.mark.skip_xp_backends(np_only=True, + reason='Python list input uses NumPy backend') + def test_small_rtol_warning(self, xp): + message = 'The specified `rtol=1e-15`, but...' + with pytest.warns(RuntimeWarning, match=message): + hessian(xp.sin, [1.], tolerances=dict(rtol=1e-15)) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de2308d623436f492b7c7659c557b88880d3e7d6 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/__pycache__/_arffread.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/__pycache__/_arffread.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09f28bb166555e4c677e2c07f0e53e368979eb6e Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/__pycache__/_arffread.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/nodata.arff b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/nodata.arff new file mode 100644 index 0000000000000000000000000000000000000000..5766aeb229a1b31378026274c366e8e9e44fd487 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/nodata.arff @@ -0,0 +1,11 @@ +@RELATION iris + +@ATTRIBUTE sepallength REAL +@ATTRIBUTE sepalwidth REAL +@ATTRIBUTE petallength REAL +@ATTRIBUTE petalwidth REAL +@ATTRIBUTE class {Iris-setosa,Iris-versicolor,Iris-virginica} + +@DATA + +% This file has no data diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/test10.arff b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/test10.arff new file mode 100644 index 0000000000000000000000000000000000000000..094ac5094a842866666726b358d2c66bf927c9d2 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/test10.arff @@ -0,0 +1,8 @@ +@relation test9 + +@attribute attr_relational relational + @attribute attr_number integer +@end attr_relational + +@data +'0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n29\n30\n31\n32\n33\n34\n35\n36\n37\n38\n39\n40\n41\n42\n43\n44\n45\n46\n47\n48\n49\n50\n51\n52\n53\n54\n55\n56\n57\n58\n59\n60\n61\n62\n63\n64\n65\n66\n67\n68\n69\n70\n71\n72\n73\n74\n75\n76\n77\n78\n79\n80\n81\n82\n83\n84\n85\n86\n87\n88\n89\n90\n91\n92\n93\n94\n95\n96\n97\n98\n99\n100\n101\n102\n103\n104\n105\n106\n107\n108\n109\n110\n111\n112\n113\n114\n115\n116\n117\n118\n119\n120\n121\n122\n123\n124\n125\n126\n127\n128\n129\n130\n131\n132\n133\n134\n135\n136\n137\n138\n139\n140\n141\n142\n143\n144\n145\n146\n147\n148\n149\n150\n151\n152\n153\n154\n155\n156\n157\n158\n159\n160\n161\n162\n163\n164\n165\n166\n167\n168\n169\n170\n171\n172\n173\n174\n175\n176\n177\n178\n179\n180\n181\n182\n183\n184\n185\n186\n187\n188\n189\n190\n191\n192\n193\n194\n195\n196\n197\n198\n199\n200\n201\n202\n203\n204\n205\n206\n207\n208\n209\n210\n211\n212\n213\n214\n215\n216\n217\n218\n219\n220\n221\n222\n223\n224\n225\n226\n227\n228\n229\n230\n231\n232\n233\n234\n235\n236\n237\n238\n239\n240\n241\n242\n243\n244\n245\n246\n247\n248\n249\n250\n251\n252\n253\n254\n255\n256\n257\n258\n259\n260\n261\n262\n263\n264\n265\n266\n267\n268\n269\n270\n271\n272\n273\n274\n275\n276\n277\n278\n279\n280\n281\n282\n283\n284\n285\n286\n287\n288\n289\n290\n291\n292\n293\n294\n295\n296\n297\n298\n299\n300\n301\n302\n303\n304\n305\n306\n307\n308\n309\n310\n311\n312\n313\n314\n315\n316\n317\n318\n319\n320\n321\n322\n323\n324\n325\n326\n327\n328\n329\n330\n331\n332\n333\n334\n335\n336\n337\n338\n339\n340\n341\n342\n343\n344\n345\n346\n347\n348\n349\n350\n351\n352\n353\n354\n355\n356\n357\n358\n359\n360\n361\n362\n363\n364\n365\n366\n367\n368\n369\n370\n371\n372\n373\n374\n375\n376\n377\n378\n379\n380\n381\n382\n383\n384\n385\n386\n387\n388\n389\n390\n391\n392\n393\n394\n395\n396\n397\n398\n399\n400\n401\n402\n403\n404\n405\n406\n407\n408\n409\n410\n411\n412\n413\n414\n415\n416\n417\n418\n419\n420\n421\n422\n423\n424\n425\n426\n427\n428\n429\n430\n431\n432\n433\n434\n435\n436\n437\n438\n439\n440\n441\n442\n443\n444\n445\n446\n447\n448\n449\n450\n451\n452\n453\n454\n455\n456\n457\n458\n459\n460\n461\n462\n463\n464\n465\n466\n467\n468\n469\n470\n471\n472\n473\n474\n475\n476\n477\n478\n479\n480\n481\n482\n483\n484\n485\n486\n487\n488\n489\n490\n491\n492\n493\n494\n495\n496\n497\n498\n499\n500\n501\n502\n503\n504\n505\n506\n507\n508\n509\n510\n511\n512\n513\n514\n515\n516\n517\n518\n519\n520\n521\n522\n523\n524\n525\n526\n527\n528\n529\n530\n531\n532\n533\n534\n535\n536\n537\n538\n539\n540\n541\n542\n543\n544\n545\n546\n547\n548\n549\n550\n551\n552\n553\n554\n555\n556\n557\n558\n559\n560\n561\n562\n563\n564\n565\n566\n567\n568\n569\n570\n571\n572\n573\n574\n575\n576\n577\n578\n579\n580\n581\n582\n583\n584\n585\n586\n587\n588\n589\n590\n591\n592\n593\n594\n595\n596\n597\n598\n599\n600\n601\n602\n603\n604\n605\n606\n607\n608\n609\n610\n611\n612\n613\n614\n615\n616\n617\n618\n619\n620\n621\n622\n623\n624\n625\n626\n627\n628\n629\n630\n631\n632\n633\n634\n635\n636\n637\n638\n639\n640\n641\n642\n643\n644\n645\n646\n647\n648\n649\n650\n651\n652\n653\n654\n655\n656\n657\n658\n659\n660\n661\n662\n663\n664\n665\n666\n667\n668\n669\n670\n671\n672\n673\n674\n675\n676\n677\n678\n679\n680\n681\n682\n683\n684\n685\n686\n687\n688\n689\n690\n691\n692\n693\n694\n695\n696\n697\n698\n699\n700\n701\n702\n703\n704\n705\n706\n707\n708\n709\n710\n711\n712\n713\n714\n715\n716\n717\n718\n719\n720\n721\n722\n723\n724\n725\n726\n727\n728\n729\n730\n731\n732\n733\n734\n735\n736\n737\n738\n739\n740\n741\n742\n743\n744\n745\n746\n747\n748\n749\n750\n751\n752\n753\n754\n755\n756\n757\n758\n759\n760\n761\n762\n763\n764\n765\n766\n767\n768\n769\n770\n771\n772\n773\n774\n775\n776\n777\n778\n779\n780\n781\n782\n783\n784\n785\n786\n787\n788\n789\n790\n791\n792\n793\n794\n795\n796\n797\n798\n799\n800\n801\n802\n803\n804\n805\n806\n807\n808\n809\n810\n811\n812\n813\n814\n815\n816\n817\n818\n819\n820\n821\n822\n823\n824\n825\n826\n827\n828\n829\n830\n831\n832\n833\n834\n835\n836\n837\n838\n839\n840\n841\n842\n843\n844\n845\n846\n847\n848\n849\n850\n851\n852\n853\n854\n855\n856\n857\n858\n859\n860\n861\n862\n863\n864\n865\n866\n867\n868\n869\n870\n871\n872\n873\n874\n875\n876\n877\n878\n879\n880\n881\n882\n883\n884\n885\n886\n887\n888\n889\n890\n891\n892\n893\n894\n895\n896\n897\n898\n899\n900\n901\n902\n903\n904\n905\n906\n907\n908\n909\n910\n911\n912\n913\n914\n915\n916\n917\n918\n919\n920\n921\n922\n923\n924\n925\n926\n927\n928\n929\n930\n931\n932\n933\n934\n935\n936\n937\n938\n939\n940\n941\n942\n943\n944\n945\n946\n947\n948\n949\n950\n951\n952\n953\n954\n955\n956\n957\n958\n959\n960\n961\n962\n963\n964\n965\n966\n967\n968\n969\n970\n971\n972\n973\n974\n975\n976\n977\n978\n979\n980\n981\n982\n983\n984\n985\n986\n987\n988\n989\n990\n991\n992\n993\n994\n995\n996\n997\n998\n999\n1000\n1001\n1002\n1003\n1004\n1005\n1006\n1007\n1008\n1009\n1010\n1011\n1012\n1013\n1014\n1015\n1016\n1017\n1018\n1019\n1020\n1021\n1022\n1023\n1024\n1025\n1026\n1027\n1028\n1029\n1030\n1031\n1032\n1033\n1034\n1035\n1036\n1037\n1038\n1039\n1040\n1041\n1042\n1043\n1044\n1045\n1046\n1047\n1048\n1049\n1050\n1051\n1052\n1053\n1054\n1055\n1056\n1057\n1058\n1059\n1060\n1061\n1062\n1063\n1064\n1065\n1066\n1067\n1068\n1069\n1070\n1071\n1072\n1073\n1074\n1075\n1076\n1077\n1078\n1079\n1080\n1081\n1082\n1083\n1084\n1085\n1086\n1087\n1088\n1089\n1090\n1091\n1092\n1093\n1094\n1095\n1096\n1097\n1098\n1099\n1100\n1101\n1102\n1103\n1104\n1105\n1106\n1107\n1108\n1109\n1110\n1111\n1112\n1113\n1114\n1115\n1116\n1117\n1118\n1119\n1120\n1121\n1122\n1123\n1124\n1125\n1126\n1127\n1128\n1129\n1130\n1131\n1132\n1133\n1134\n1135\n1136\n1137\n1138\n1139\n1140\n1141\n1142\n1143\n1144\n1145\n1146\n1147\n1148\n1149\n1150\n1151\n1152\n1153\n1154\n1155\n1156\n1157\n1158\n1159\n1160\n1161\n1162\n1163\n1164\n1165\n1166\n1167\n1168\n1169\n1170\n1171\n1172\n1173\n1174\n1175\n1176\n1177\n1178\n1179\n1180\n1181\n1182\n1183\n1184\n1185\n1186\n1187\n1188\n1189\n1190\n1191\n1192\n1193\n1194\n1195\n1196\n1197\n1198\n1199\n1200\n1201\n1202\n1203\n1204\n1205\n1206\n1207\n1208\n1209\n1210\n1211\n1212\n1213\n1214\n1215\n1216\n1217\n1218\n1219\n1220\n1221\n1222\n1223\n1224\n1225\n1226\n1227\n1228\n1229\n1230\n1231\n1232\n1233\n1234\n1235\n1236\n1237\n1238\n1239\n1240\n1241\n1242\n1243\n1244\n1245\n1246\n1247\n1248\n1249\n1250\n1251\n1252\n1253\n1254\n1255\n1256\n1257\n1258\n1259\n1260\n1261\n1262\n1263\n1264\n1265\n1266\n1267\n1268\n1269\n1270\n1271\n1272\n1273\n1274\n1275\n1276\n1277\n1278\n1279\n1280\n1281\n1282\n1283\n1284\n1285\n1286\n1287\n1288\n1289\n1290\n1291\n1292\n1293\n1294\n1295\n1296\n1297\n1298\n1299\n1300\n1301\n1302\n1303\n1304\n1305\n1306\n1307\n1308\n1309\n1310\n1311\n1312\n1313\n1314\n1315\n1316\n1317\n1318\n1319\n1320\n1321\n1322\n1323\n1324\n1325\n1326\n1327\n1328\n1329\n1330\n1331\n1332\n1333\n1334\n1335\n1336\n1337\n1338\n1339\n1340\n1341\n1342\n1343\n1344\n1345\n1346\n1347\n1348\n1349\n1350\n1351\n1352\n1353\n1354\n1355\n1356\n1357\n1358\n1359\n1360\n1361\n1362\n1363\n1364\n1365\n1366\n1367\n1368\n1369\n1370\n1371\n1372\n1373\n1374\n1375\n1376\n1377\n1378\n1379\n1380\n1381\n1382\n1383\n1384\n1385\n1386\n1387\n1388\n1389\n1390\n1391\n1392\n1393\n1394\n1395\n1396\n1397\n1398\n1399\n1400\n1401\n1402\n1403\n1404\n1405\n1406\n1407\n1408\n1409\n1410\n1411\n1412\n1413\n1414\n1415\n1416\n1417\n1418\n1419\n1420\n1421\n1422\n1423\n1424\n1425\n1426\n1427\n1428\n1429\n1430\n1431\n1432\n1433\n1434\n1435\n1436\n1437\n1438\n1439\n1440\n1441\n1442\n1443\n1444\n1445\n1446\n1447\n1448\n1449\n1450\n1451\n1452\n1453\n1454\n1455\n1456\n1457\n1458\n1459\n1460\n1461\n1462\n1463\n1464\n1465\n1466\n1467\n1468\n1469\n1470\n1471\n1472\n1473\n1474\n1475\n1476\n1477\n1478\n1479\n1480\n1481\n1482\n1483\n1484\n1485\n1486\n1487\n1488\n1489\n1490\n1491\n1492\n1493\n1494\n1495\n1496\n1497\n1498\n1499\n1500\n1501\n1502\n1503\n1504\n1505\n1506\n1507\n1508\n1509\n1510\n1511\n1512\n1513\n1514\n1515\n1516\n1517\n1518\n1519\n1520\n1521\n1522\n1523\n1524\n1525\n1526\n1527\n1528\n1529\n1530\n1531\n1532\n1533\n1534\n1535\n1536\n1537\n1538\n1539\n1540\n1541\n1542\n1543\n1544\n1545\n1546\n1547\n1548\n1549\n1550\n1551\n1552\n1553\n1554\n1555\n1556\n1557\n1558\n1559\n1560\n1561\n1562\n1563\n1564\n1565\n1566\n1567\n1568\n1569\n1570\n1571\n1572\n1573\n1574\n1575\n1576\n1577\n1578\n1579\n1580\n1581\n1582\n1583\n1584\n1585\n1586\n1587\n1588\n1589\n1590\n1591\n1592\n1593\n1594\n1595\n1596\n1597\n1598\n1599\n1600\n1601\n1602\n1603\n1604\n1605\n1606\n1607\n1608\n1609\n1610\n1611\n1612\n1613\n1614\n1615\n1616\n1617\n1618\n1619\n1620\n1621\n1622\n1623\n1624\n1625\n1626\n1627\n1628\n1629\n1630\n1631\n1632\n1633\n1634\n1635\n1636\n1637\n1638\n1639\n1640\n1641\n1642\n1643\n1644\n1645\n1646\n1647\n1648\n1649\n1650\n1651\n1652\n1653\n1654\n1655\n1656\n1657\n1658\n1659\n1660\n1661\n1662\n1663\n1664\n1665\n1666\n1667\n1668\n1669\n1670\n1671\n1672\n1673\n1674\n1675\n1676\n1677\n1678\n1679\n1680\n1681\n1682\n1683\n1684\n1685\n1686\n1687\n1688\n1689\n1690\n1691\n1692\n1693\n1694\n1695\n1696\n1697\n1698\n1699\n1700\n1701\n1702\n1703\n1704\n1705\n1706\n1707\n1708\n1709\n1710\n1711\n1712\n1713\n1714\n1715\n1716\n1717\n1718\n1719\n1720\n1721\n1722\n1723\n1724\n1725\n1726\n1727\n1728\n1729\n1730\n1731\n1732\n1733\n1734\n1735\n1736\n1737\n1738\n1739\n1740\n1741\n1742\n1743\n1744\n1745\n1746\n1747\n1748\n1749\n1750\n1751\n1752\n1753\n1754\n1755\n1756\n1757\n1758\n1759\n1760\n1761\n1762\n1763\n1764\n1765\n1766\n1767\n1768\n1769\n1770\n1771\n1772\n1773\n1774\n1775\n1776\n1777\n1778\n1779\n1780\n1781\n1782\n1783\n1784\n1785\n1786\n1787\n1788\n1789\n1790\n1791\n1792\n1793\n1794\n1795\n1796\n1797\n1798\n1799\n1800\n1801\n1802\n1803\n1804\n1805\n1806\n1807\n1808\n1809\n1810\n1811\n1812\n1813\n1814\n1815\n1816\n1817\n1818\n1819\n1820\n1821\n1822\n1823\n1824\n1825\n1826\n1827\n1828\n1829\n1830\n1831\n1832\n1833\n1834\n1835\n1836\n1837\n1838\n1839\n1840\n1841\n1842\n1843\n1844\n1845\n1846\n1847\n1848\n1849\n1850\n1851\n1852\n1853\n1854\n1855\n1856\n1857\n1858\n1859\n1860\n1861\n1862\n1863\n1864\n1865\n1866\n1867\n1868\n1869\n1870\n1871\n1872\n1873\n1874\n1875\n1876\n1877\n1878\n1879\n1880\n1881\n1882\n1883\n1884\n1885\n1886\n1887\n1888\n1889\n1890\n1891\n1892\n1893\n1894\n1895\n1896\n1897\n1898\n1899\n1900\n1901\n1902\n1903\n1904\n1905\n1906\n1907\n1908\n1909\n1910\n1911\n1912\n1913\n1914\n1915\n1916\n1917\n1918\n1919\n1920\n1921\n1922\n1923\n1924\n1925\n1926\n1927\n1928\n1929\n1930\n1931\n1932\n1933\n1934\n1935\n1936\n1937\n1938\n1939\n1940\n1941\n1942\n1943\n1944\n1945\n1946\n1947\n1948\n1949\n1950\n1951\n1952\n1953\n1954\n1955\n1956\n1957\n1958\n1959\n1960\n1961\n1962\n1963\n1964\n1965\n1966\n1967\n1968\n1969\n1970\n1971\n1972\n1973\n1974\n1975\n1976\n1977\n1978\n1979\n1980\n1981\n1982\n1983\n1984\n1985\n1986\n1987\n1988\n1989\n1990\n1991\n1992\n1993\n1994\n1995\n1996\n1997\n1998\n1999\n2000\n2001\n2002\n2003\n2004\n2005\n2006\n2007\n2008\n2009\n2010\n2011\n2012\n2013\n2014\n2015\n2016\n2017\n2018\n2019\n2020\n2021\n2022\n2023\n2024\n2025\n2026\n2027\n2028\n2029\n2030\n2031\n2032\n2033\n2034\n2035\n2036\n2037\n2038\n2039\n2040\n2041\n2042\n2043\n2044\n2045\n2046\n2047\n2048\n2049\n2050\n2051\n2052\n2053\n2054\n2055\n2056\n2057\n2058\n2059\n2060\n2061\n2062\n2063\n2064\n2065\n2066\n2067\n2068\n2069\n2070\n2071\n2072\n2073\n2074\n2075\n2076\n2077\n2078\n2079\n2080\n2081\n2082\n2083\n2084\n2085\n2086\n2087\n2088\n2089\n2090\n2091\n2092\n2093\n2094\n2095\n2096\n2097\n2098\n2099\n2100\n2101\n2102\n2103\n2104\n2105\n2106\n2107\n2108\n2109\n2110\n2111\n2112\n2113\n2114\n2115\n2116\n2117\n2118\n2119\n2120\n2121\n2122\n2123\n2124\n2125\n2126\n2127\n2128\n2129\n2130\n2131\n2132\n2133\n2134\n2135\n2136\n2137\n2138\n2139\n2140\n2141\n2142\n2143\n2144\n2145\n2146\n2147\n2148\n2149\n2150\n2151\n2152\n2153\n2154\n2155\n2156\n2157\n2158\n2159\n2160\n2161\n2162\n2163\n2164\n2165\n2166\n2167\n2168\n2169\n2170\n2171\n2172\n2173\n2174\n2175\n2176\n2177\n2178\n2179\n2180\n2181\n2182\n2183\n2184\n2185\n2186\n2187\n2188\n2189\n2190\n2191\n2192\n2193\n2194\n2195\n2196\n2197\n2198\n2199\n2200\n2201\n2202\n2203\n2204\n2205\n2206\n2207\n2208\n2209\n2210\n2211\n2212\n2213\n2214\n2215\n2216\n2217\n2218\n2219\n2220\n2221\n2222\n2223\n2224\n2225\n2226\n2227\n2228\n2229\n2230\n2231\n2232\n2233\n2234\n2235\n2236\n2237\n2238\n2239\n2240\n2241\n2242\n2243\n2244\n2245\n2246\n2247\n2248\n2249\n2250\n2251\n2252\n2253\n2254\n2255\n2256\n2257\n2258\n2259\n2260\n2261\n2262\n2263\n2264\n2265\n2266\n2267\n2268\n2269\n2270\n2271\n2272\n2273\n2274\n2275\n2276\n2277\n2278\n2279\n2280\n2281\n2282\n2283\n2284\n2285\n2286\n2287\n2288\n2289\n2290\n2291\n2292\n2293\n2294\n2295\n2296\n2297\n2298\n2299\n2300\n2301\n2302\n2303\n2304\n2305\n2306\n2307\n2308\n2309\n2310\n2311\n2312\n2313\n2314\n2315\n2316\n2317\n2318\n2319\n2320\n2321\n2322\n2323\n2324\n2325\n2326\n2327\n2328\n2329\n2330\n2331\n2332\n2333\n2334\n2335\n2336\n2337\n2338\n2339\n2340\n2341\n2342\n2343\n2344\n2345\n2346\n2347\n2348\n2349\n2350\n2351\n2352\n2353\n2354\n2355\n2356\n2357\n2358\n2359\n2360\n2361\n2362\n2363\n2364\n2365\n2366\n2367\n2368\n2369\n2370\n2371\n2372\n2373\n2374\n2375\n2376\n2377\n2378\n2379\n2380\n2381\n2382\n2383\n2384\n2385\n2386\n2387\n2388\n2389\n2390\n2391\n2392\n2393\n2394\n2395\n2396\n2397\n2398\n2399\n2400\n2401\n2402\n2403\n2404\n2405\n2406\n2407\n2408\n2409\n2410\n2411\n2412\n2413\n2414\n2415\n2416\n2417\n2418\n2419\n2420\n2421\n2422\n2423\n2424\n2425\n2426\n2427\n2428\n2429\n2430\n2431\n2432\n2433\n2434\n2435\n2436\n2437\n2438\n2439\n2440\n2441\n2442\n2443\n2444\n2445\n2446\n2447\n2448\n2449\n2450\n2451\n2452\n2453\n2454\n2455\n2456\n2457\n2458\n2459\n2460\n2461\n2462\n2463\n2464\n2465\n2466\n2467\n2468\n2469\n2470\n2471\n2472\n2473\n2474\n2475\n2476\n2477\n2478\n2479\n2480\n2481\n2482\n2483\n2484\n2485\n2486\n2487\n2488\n2489\n2490\n2491\n2492\n2493\n2494\n2495\n2496\n2497\n2498\n2499\n2500\n2501\n2502\n2503\n2504\n2505\n2506\n2507\n2508\n2509\n2510\n2511\n2512\n2513\n2514\n2515\n2516\n2517\n2518\n2519\n2520\n2521\n2522\n2523\n2524\n2525\n2526\n2527\n2528\n2529\n2530\n2531\n2532\n2533\n2534\n2535\n2536\n2537\n2538\n2539\n2540\n2541\n2542\n2543\n2544\n2545\n2546\n2547\n2548\n2549\n2550\n2551\n2552\n2553\n2554\n2555\n2556\n2557\n2558\n2559\n2560\n2561\n2562\n2563\n2564\n2565\n2566\n2567\n2568\n2569\n2570\n2571\n2572\n2573\n2574\n2575\n2576\n2577\n2578\n2579\n2580\n2581\n2582\n2583\n2584\n2585\n2586\n2587\n2588\n2589\n2590\n2591\n2592\n2593\n2594\n2595\n2596\n2597\n2598\n2599\n2600\n2601\n2602\n2603\n2604\n2605\n2606\n2607\n2608\n2609\n2610\n2611\n2612\n2613\n2614\n2615\n2616\n2617\n2618\n2619\n2620\n2621\n2622\n2623\n2624\n2625\n2626\n2627\n2628\n2629\n2630\n2631\n2632\n2633\n2634\n2635\n2636\n2637\n2638\n2639\n2640\n2641\n2642\n2643\n2644\n2645\n2646\n2647\n2648\n2649\n2650\n2651\n2652\n2653\n2654\n2655\n2656\n2657\n2658\n2659\n2660\n2661\n2662\n2663\n2664\n2665\n2666\n2667\n2668\n2669\n2670\n2671\n2672\n2673\n2674\n2675\n2676\n2677\n2678\n2679\n2680\n2681\n2682\n2683\n2684\n2685\n2686\n2687\n2688\n2689\n2690\n2691\n2692\n2693\n2694\n2695\n2696\n2697\n2698\n2699\n2700\n2701\n2702\n2703\n2704\n2705\n2706\n2707\n2708\n2709\n2710\n2711\n2712\n2713\n2714\n2715\n2716\n2717\n2718\n2719\n2720\n2721\n2722\n2723\n2724\n2725\n2726\n2727\n2728\n2729\n2730\n2731\n2732\n2733\n2734\n2735\n2736\n2737\n2738\n2739\n2740\n2741\n2742\n2743\n2744\n2745\n2746\n2747\n2748\n2749\n2750\n2751\n2752\n2753\n2754\n2755\n2756\n2757\n2758\n2759\n2760\n2761\n2762\n2763\n2764\n2765\n2766\n2767\n2768\n2769\n2770\n2771\n2772\n2773\n2774\n2775\n2776\n2777\n2778\n2779\n2780\n2781\n2782\n2783\n2784\n2785\n2786\n2787\n2788\n2789\n2790\n2791\n2792\n2793\n2794\n2795\n2796\n2797\n2798\n2799\n2800\n2801\n2802\n2803\n2804\n2805\n2806\n2807\n2808\n2809\n2810\n2811\n2812\n2813\n2814\n2815\n2816\n2817\n2818\n2819\n2820\n2821\n2822\n2823\n2824\n2825\n2826\n2827\n2828\n2829\n2830\n2831\n2832\n2833\n2834\n2835\n2836\n2837\n2838\n2839\n2840\n2841\n2842\n2843\n2844\n2845\n2846\n2847\n2848\n2849\n2850\n2851\n2852\n2853\n2854\n2855\n2856\n2857\n2858\n2859\n2860\n2861\n2862\n2863\n2864\n2865\n2866\n2867\n2868\n2869\n2870\n2871\n2872\n2873\n2874\n2875\n2876\n2877\n2878\n2879\n2880\n2881\n2882\n2883\n2884\n2885\n2886\n2887\n2888\n2889\n2890\n2891\n2892\n2893\n2894\n2895\n2896\n2897\n2898\n2899\n2900\n2901\n2902\n2903\n2904\n2905\n2906\n2907\n2908\n2909\n2910\n2911\n2912\n2913\n2914\n2915\n2916\n2917\n2918\n2919\n2920\n2921\n2922\n2923\n2924\n2925\n2926\n2927\n2928\n2929\n2930\n2931\n2932\n2933\n2934\n2935\n2936\n2937\n2938\n2939\n2940\n2941\n2942\n2943\n2944\n2945\n2946\n2947\n2948\n2949\n2950\n2951\n2952\n2953\n2954\n2955\n2956\n2957\n2958\n2959\n2960\n2961\n2962\n2963\n2964\n2965\n2966\n2967\n2968\n2969\n2970\n2971\n2972\n2973\n2974\n2975\n2976\n2977\n2978\n2979\n2980\n2981\n2982\n2983\n2984\n2985\n2986\n2987\n2988\n2989\n2990\n2991\n2992\n2993\n2994\n2995\n2996\n2997\n2998\n2999\n3000\n3001\n3002\n3003\n3004\n3005\n3006\n3007\n3008\n3009\n3010\n3011\n3012\n3013\n3014\n3015\n3016\n3017\n3018\n3019\n3020\n3021\n3022\n3023\n3024\n3025\n3026\n3027\n3028\n3029\n3030\n3031\n3032\n3033\n3034\n3035\n3036\n3037\n3038\n3039\n3040\n3041\n3042\n3043\n3044\n3045\n3046\n3047\n3048\n3049\n3050\n3051\n3052\n3053\n3054\n3055\n3056\n3057\n3058\n3059\n3060\n3061\n3062\n3063\n3064\n3065\n3066\n3067\n3068\n3069\n3070\n3071\n3072\n3073\n3074\n3075\n3076\n3077\n3078\n3079\n3080\n3081\n3082\n3083\n3084\n3085\n3086\n3087\n3088\n3089\n3090\n3091\n3092\n3093\n3094\n3095\n3096\n3097\n3098\n3099\n3100\n3101\n3102\n3103\n3104\n3105\n3106\n3107\n3108\n3109\n3110\n3111\n3112\n3113\n3114\n3115\n3116\n3117\n3118\n3119\n3120\n3121\n3122\n3123\n3124\n3125\n3126\n3127\n3128\n3129\n3130\n3131\n3132\n3133\n3134\n3135\n3136\n3137\n3138\n3139\n3140\n3141\n3142\n3143\n3144\n3145\n3146\n3147\n3148\n3149\n3150\n3151\n3152\n3153\n3154\n3155\n3156\n3157\n3158\n3159\n3160\n3161\n3162\n3163\n3164\n3165\n3166\n3167\n3168\n3169\n3170\n3171\n3172\n3173\n3174\n3175\n3176\n3177\n3178\n3179\n3180\n3181\n3182\n3183\n3184\n3185\n3186\n3187\n3188\n3189\n3190\n3191\n3192\n3193\n3194\n3195\n3196\n3197\n3198\n3199\n3200\n3201\n3202\n3203\n3204\n3205\n3206\n3207\n3208\n3209\n3210\n3211\n3212\n3213\n3214\n3215\n3216\n3217\n3218\n3219\n3220\n3221\n3222\n3223\n3224\n3225\n3226\n3227\n3228\n3229\n3230\n3231\n3232\n3233\n3234\n3235\n3236\n3237\n3238\n3239\n3240\n3241\n3242\n3243\n3244\n3245\n3246\n3247\n3248\n3249\n3250\n3251\n3252\n3253\n3254\n3255\n3256\n3257\n3258\n3259\n3260\n3261\n3262\n3263\n3264\n3265\n3266\n3267\n3268\n3269\n3270\n3271\n3272\n3273\n3274\n3275\n3276\n3277\n3278\n3279\n3280\n3281\n3282\n3283\n3284\n3285\n3286\n3287\n3288\n3289\n3290\n3291\n3292\n3293\n3294\n3295\n3296\n3297\n3298\n3299\n3300\n3301\n3302\n3303\n3304\n3305\n3306\n3307\n3308\n3309\n3310\n3311\n3312\n3313\n3314\n3315\n3316\n3317\n3318\n3319\n3320\n3321\n3322\n3323\n3324\n3325\n3326\n3327\n3328\n3329\n3330\n3331\n3332\n3333\n3334\n3335\n3336\n3337\n3338\n3339\n3340\n3341\n3342\n3343\n3344\n3345\n3346\n3347\n3348\n3349\n3350\n3351\n3352\n3353\n3354\n3355\n3356\n3357\n3358\n3359\n3360\n3361\n3362\n3363\n3364\n3365\n3366\n3367\n3368\n3369\n3370\n3371\n3372\n3373\n3374\n3375\n3376\n3377\n3378\n3379\n3380\n3381\n3382\n3383\n3384\n3385\n3386\n3387\n3388\n3389\n3390\n3391\n3392\n3393\n3394\n3395\n3396\n3397\n3398\n3399\n3400\n3401\n3402\n3403\n3404\n3405\n3406\n3407\n3408\n3409\n3410\n3411\n3412\n3413\n3414\n3415\n3416\n3417\n3418\n3419\n3420\n3421\n3422\n3423\n3424\n3425\n3426\n3427\n3428\n3429\n3430\n3431\n3432\n3433\n3434\n3435\n3436\n3437\n3438\n3439\n3440\n3441\n3442\n3443\n3444\n3445\n3446\n3447\n3448\n3449\n3450\n3451\n3452\n3453\n3454\n3455\n3456\n3457\n3458\n3459\n3460\n3461\n3462\n3463\n3464\n3465\n3466\n3467\n3468\n3469\n3470\n3471\n3472\n3473\n3474\n3475\n3476\n3477\n3478\n3479\n3480\n3481\n3482\n3483\n3484\n3485\n3486\n3487\n3488\n3489\n3490\n3491\n3492\n3493\n3494\n3495\n3496\n3497\n3498\n3499\n3500\n3501\n3502\n3503\n3504\n3505\n3506\n3507\n3508\n3509\n3510\n3511\n3512\n3513\n3514\n3515\n3516\n3517\n3518\n3519\n3520\n3521\n3522\n3523\n3524\n3525\n3526\n3527\n3528\n3529\n3530\n3531\n3532\n3533\n3534\n3535\n3536\n3537\n3538\n3539\n3540\n3541\n3542\n3543\n3544\n3545\n3546\n3547\n3548\n3549\n3550\n3551\n3552\n3553\n3554\n3555\n3556\n3557\n3558\n3559\n3560\n3561\n3562\n3563\n3564\n3565\n3566\n3567\n3568\n3569\n3570\n3571\n3572\n3573\n3574\n3575\n3576\n3577\n3578\n3579\n3580\n3581\n3582\n3583\n3584\n3585\n3586\n3587\n3588\n3589\n3590\n3591\n3592\n3593\n3594\n3595\n3596\n3597\n3598\n3599\n3600\n3601\n3602\n3603\n3604\n3605\n3606\n3607\n3608\n3609\n3610\n3611\n3612\n3613\n3614\n3615\n3616\n3617\n3618\n3619\n3620\n3621\n3622\n3623\n3624\n3625\n3626\n3627\n3628\n3629\n3630\n3631\n3632\n3633\n3634\n3635\n3636\n3637\n3638\n3639\n3640\n3641\n3642\n3643\n3644\n3645\n3646\n3647\n3648\n3649\n3650\n3651\n3652\n3653\n3654\n3655\n3656\n3657\n3658\n3659\n3660\n3661\n3662\n3663\n3664\n3665\n3666\n3667\n3668\n3669\n3670\n3671\n3672\n3673\n3674\n3675\n3676\n3677\n3678\n3679\n3680\n3681\n3682\n3683\n3684\n3685\n3686\n3687\n3688\n3689\n3690\n3691\n3692\n3693\n3694\n3695\n3696\n3697\n3698\n3699\n3700\n3701\n3702\n3703\n3704\n3705\n3706\n3707\n3708\n3709\n3710\n3711\n3712\n3713\n3714\n3715\n3716\n3717\n3718\n3719\n3720\n3721\n3722\n3723\n3724\n3725\n3726\n3727\n3728\n3729\n3730\n3731\n3732\n3733\n3734\n3735\n3736\n3737\n3738\n3739\n3740\n3741\n3742\n3743\n3744\n3745\n3746\n3747\n3748\n3749\n3750\n3751\n3752\n3753\n3754\n3755\n3756\n3757\n3758\n3759\n3760\n3761\n3762\n3763\n3764\n3765\n3766\n3767\n3768\n3769\n3770\n3771\n3772\n3773\n3774\n3775\n3776\n3777\n3778\n3779\n3780\n3781\n3782\n3783\n3784\n3785\n3786\n3787\n3788\n3789\n3790\n3791\n3792\n3793\n3794\n3795\n3796\n3797\n3798\n3799\n3800\n3801\n3802\n3803\n3804\n3805\n3806\n3807\n3808\n3809\n3810\n3811\n3812\n3813\n3814\n3815\n3816\n3817\n3818\n3819\n3820\n3821\n3822\n3823\n3824\n3825\n3826\n3827\n3828\n3829\n3830\n3831\n3832\n3833\n3834\n3835\n3836\n3837\n3838\n3839\n3840\n3841\n3842\n3843\n3844\n3845\n3846\n3847\n3848\n3849\n3850\n3851\n3852\n3853\n3854\n3855\n3856\n3857\n3858\n3859\n3860\n3861\n3862\n3863\n3864\n3865\n3866\n3867\n3868\n3869\n3870\n3871\n3872\n3873\n3874\n3875\n3876\n3877\n3878\n3879\n3880\n3881\n3882\n3883\n3884\n3885\n3886\n3887\n3888\n3889\n3890\n3891\n3892\n3893\n3894\n3895\n3896\n3897\n3898\n3899\n3900\n3901\n3902\n3903\n3904\n3905\n3906\n3907\n3908\n3909\n3910\n3911\n3912\n3913\n3914\n3915\n3916\n3917\n3918\n3919\n3920\n3921\n3922\n3923\n3924\n3925\n3926\n3927\n3928\n3929\n3930\n3931\n3932\n3933\n3934\n3935\n3936\n3937\n3938\n3939\n3940\n3941\n3942\n3943\n3944\n3945\n3946\n3947\n3948\n3949\n3950\n3951\n3952\n3953\n3954\n3955\n3956\n3957\n3958\n3959\n3960\n3961\n3962\n3963\n3964\n3965\n3966\n3967\n3968\n3969\n3970\n3971\n3972\n3973\n3974\n3975\n3976\n3977\n3978\n3979\n3980\n3981\n3982\n3983\n3984\n3985\n3986\n3987\n3988\n3989\n3990\n3991\n3992\n3993\n3994\n3995\n3996\n3997\n3998\n3999\n4000\n4001\n4002\n4003\n4004\n4005\n4006\n4007\n4008\n4009\n4010\n4011\n4012\n4013\n4014\n4015\n4016\n4017\n4018\n4019\n4020\n4021\n4022\n4023\n4024\n4025\n4026\n4027\n4028\n4029\n4030\n4031\n4032\n4033\n4034\n4035\n4036\n4037\n4038\n4039\n4040\n4041\n4042\n4043\n4044\n4045\n4046\n4047\n4048\n4049\n4050\n4051\n4052\n4053\n4054\n4055\n4056\n4057\n4058\n4059\n4060\n4061\n4062\n4063\n4064\n4065\n4066\n4067\n4068\n4069\n4070\n4071\n4072\n4073\n4074\n4075\n4076\n4077\n4078\n4079\n4080\n4081\n4082\n4083\n4084\n4085\n4086\n4087\n4088\n4089\n4090\n4091\n4092\n4093\n4094\n4095\n4096\n4097\n4098\n4099\n4100\n4101\n4102\n4103\n4104\n4105\n4106\n4107\n4108\n4109\n4110\n4111\n4112\n4113\n4114\n4115\n4116\n4117\n4118\n4119\n4120\n4121\n4122\n4123\n4124\n4125\n4126\n4127\n4128\n4129\n4130\n4131\n4132\n4133\n4134\n4135\n4136\n4137\n4138\n4139\n4140\n4141\n4142\n4143\n4144\n4145\n4146\n4147\n4148\n4149\n4150\n4151\n4152\n4153\n4154\n4155\n4156\n4157\n4158\n4159\n4160\n4161\n4162\n4163\n4164\n4165\n4166\n4167\n4168\n4169\n4170\n4171\n4172\n4173\n4174\n4175\n4176\n4177\n4178\n4179\n4180\n4181\n4182\n4183\n4184\n4185\n4186\n4187\n4188\n4189\n4190\n4191\n4192\n4193\n4194\n4195\n4196\n4197\n4198\n4199\n4200\n4201\n4202\n4203\n4204\n4205\n4206\n4207\n4208\n4209\n4210\n4211\n4212\n4213\n4214\n4215\n4216\n4217\n4218\n4219\n4220\n4221\n4222\n4223\n4224\n4225\n4226\n4227\n4228\n4229\n4230\n4231\n4232\n4233\n4234\n4235\n4236\n4237\n4238\n4239\n4240\n4241\n4242\n4243\n4244\n4245\n4246\n4247\n4248\n4249\n4250\n4251\n4252\n4253\n4254\n4255\n4256\n4257\n4258\n4259\n4260\n4261\n4262\n4263\n4264\n4265\n4266\n4267\n4268\n4269\n4270\n4271\n4272\n4273\n4274\n4275\n4276\n4277\n4278\n4279\n4280\n4281\n4282\n4283\n4284\n4285\n4286\n4287\n4288\n4289\n4290\n4291\n4292\n4293\n4294\n4295\n4296\n4297\n4298\n4299\n4300\n4301\n4302\n4303\n4304\n4305\n4306\n4307\n4308\n4309\n4310\n4311\n4312\n4313\n4314\n4315\n4316\n4317\n4318\n4319\n4320\n4321\n4322\n4323\n4324\n4325\n4326\n4327\n4328\n4329\n4330\n4331\n4332\n4333\n4334\n4335\n4336\n4337\n4338\n4339\n4340\n4341\n4342\n4343\n4344\n4345\n4346\n4347\n4348\n4349\n4350\n4351\n4352\n4353\n4354\n4355\n4356\n4357\n4358\n4359\n4360\n4361\n4362\n4363\n4364\n4365\n4366\n4367\n4368\n4369\n4370\n4371\n4372\n4373\n4374\n4375\n4376\n4377\n4378\n4379\n4380\n4381\n4382\n4383\n4384\n4385\n4386\n4387\n4388\n4389\n4390\n4391\n4392\n4393\n4394\n4395\n4396\n4397\n4398\n4399\n4400\n4401\n4402\n4403\n4404\n4405\n4406\n4407\n4408\n4409\n4410\n4411\n4412\n4413\n4414\n4415\n4416\n4417\n4418\n4419\n4420\n4421\n4422\n4423\n4424\n4425\n4426\n4427\n4428\n4429\n4430\n4431\n4432\n4433\n4434\n4435\n4436\n4437\n4438\n4439\n4440\n4441\n4442\n4443\n4444\n4445\n4446\n4447\n4448\n4449\n4450\n4451\n4452\n4453\n4454\n4455\n4456\n4457\n4458\n4459\n4460\n4461\n4462\n4463\n4464\n4465\n4466\n4467\n4468\n4469\n4470\n4471\n4472\n4473\n4474\n4475\n4476\n4477\n4478\n4479\n4480\n4481\n4482\n4483\n4484\n4485\n4486\n4487\n4488\n4489\n4490\n4491\n4492\n4493\n4494\n4495\n4496\n4497\n4498\n4499\n4500\n4501\n4502\n4503\n4504\n4505\n4506\n4507\n4508\n4509\n4510\n4511\n4512\n4513\n4514\n4515\n4516\n4517\n4518\n4519\n4520\n4521\n4522\n4523\n4524\n4525\n4526\n4527\n4528\n4529\n4530\n4531\n4532\n4533\n4534\n4535\n4536\n4537\n4538\n4539\n4540\n4541\n4542\n4543\n4544\n4545\n4546\n4547\n4548\n4549\n4550\n4551\n4552\n4553\n4554\n4555\n4556\n4557\n4558\n4559\n4560\n4561\n4562\n4563\n4564\n4565\n4566\n4567\n4568\n4569\n4570\n4571\n4572\n4573\n4574\n4575\n4576\n4577\n4578\n4579\n4580\n4581\n4582\n4583\n4584\n4585\n4586\n4587\n4588\n4589\n4590\n4591\n4592\n4593\n4594\n4595\n4596\n4597\n4598\n4599\n4600\n4601\n4602\n4603\n4604\n4605\n4606\n4607\n4608\n4609\n4610\n4611\n4612\n4613\n4614\n4615\n4616\n4617\n4618\n4619\n4620\n4621\n4622\n4623\n4624\n4625\n4626\n4627\n4628\n4629\n4630\n4631\n4632\n4633\n4634\n4635\n4636\n4637\n4638\n4639\n4640\n4641\n4642\n4643\n4644\n4645\n4646\n4647\n4648\n4649\n4650\n4651\n4652\n4653\n4654\n4655\n4656\n4657\n4658\n4659\n4660\n4661\n4662\n4663\n4664\n4665\n4666\n4667\n4668\n4669\n4670\n4671\n4672\n4673\n4674\n4675\n4676\n4677\n4678\n4679\n4680\n4681\n4682\n4683\n4684\n4685\n4686\n4687\n4688\n4689\n4690\n4691\n4692\n4693\n4694\n4695\n4696\n4697\n4698\n4699\n4700\n4701\n4702\n4703\n4704\n4705\n4706\n4707\n4708\n4709\n4710\n4711\n4712\n4713\n4714\n4715\n4716\n4717\n4718\n4719\n4720\n4721\n4722\n4723\n4724\n4725\n4726\n4727\n4728\n4729\n4730\n4731\n4732\n4733\n4734\n4735\n4736\n4737\n4738\n4739\n4740\n4741\n4742\n4743\n4744\n4745\n4746\n4747\n4748\n4749\n4750\n4751\n4752\n4753\n4754\n4755\n4756\n4757\n4758\n4759\n4760\n4761\n4762\n4763\n4764\n4765\n4766\n4767\n4768\n4769\n4770\n4771\n4772\n4773\n4774\n4775\n4776\n4777\n4778\n4779\n4780\n4781\n4782\n4783\n4784\n4785\n4786\n4787\n4788\n4789\n4790\n4791\n4792\n4793\n4794\n4795\n4796\n4797\n4798\n4799\n4800\n4801\n4802\n4803\n4804\n4805\n4806\n4807\n4808\n4809\n4810\n4811\n4812\n4813\n4814\n4815\n4816\n4817\n4818\n4819\n4820\n4821\n4822\n4823\n4824\n4825\n4826\n4827\n4828\n4829\n4830\n4831\n4832\n4833\n4834\n4835\n4836\n4837\n4838\n4839\n4840\n4841\n4842\n4843\n4844\n4845\n4846\n4847\n4848\n4849\n4850\n4851\n4852\n4853\n4854\n4855\n4856\n4857\n4858\n4859\n4860\n4861\n4862\n4863\n4864\n4865\n4866\n4867\n4868\n4869\n4870\n4871\n4872\n4873\n4874\n4875\n4876\n4877\n4878\n4879\n4880\n4881\n4882\n4883\n4884\n4885\n4886\n4887\n4888\n4889\n4890\n4891\n4892\n4893\n4894\n4895\n4896\n4897\n4898\n4899\n4900\n4901\n4902\n4903\n4904\n4905\n4906\n4907\n4908\n4909\n4910\n4911\n4912\n4913\n4914\n4915\n4916\n4917\n4918\n4919\n4920\n4921\n4922\n4923\n4924\n4925\n4926\n4927\n4928\n4929\n4930\n4931\n4932\n4933\n4934\n4935\n4936\n4937\n4938\n4939\n4940\n4941\n4942\n4943\n4944\n4945\n4946\n4947\n4948\n4949\n4950\n4951\n4952\n4953\n4954\n4955\n4956\n4957\n4958\n4959\n4960\n4961\n4962\n4963\n4964\n4965\n4966\n4967\n4968\n4969\n4970\n4971\n4972\n4973\n4974\n4975\n4976\n4977\n4978\n4979\n4980\n4981\n4982\n4983\n4984\n4985\n4986\n4987\n4988\n4989\n4990\n4991\n4992\n4993\n4994\n4995\n4996\n4997\n4998\n4999\n5000\n5001\n5002\n5003\n5004\n5005\n5006\n5007\n5008\n5009\n5010\n5011\n5012\n5013\n5014\n5015\n5016\n5017\n5018\n5019\n5020\n5021\n5022\n5023\n5024\n5025\n5026\n5027\n5028\n5029\n5030\n5031\n5032\n5033\n5034\n5035\n5036\n5037\n5038\n5039\n5040\n5041\n5042\n5043\n5044\n5045\n5046\n5047\n5048\n5049\n5050\n5051\n5052\n5053\n5054\n5055\n5056\n5057\n5058\n5059\n5060\n5061\n5062\n5063\n5064\n5065\n5066\n5067\n5068\n5069\n5070\n5071\n5072\n5073\n5074\n5075\n5076\n5077\n5078\n5079\n5080\n5081\n5082\n5083\n5084\n5085\n5086\n5087\n5088\n5089\n5090\n5091\n5092\n5093\n5094\n5095\n5096\n5097\n5098\n5099\n5100\n5101\n5102\n5103\n5104\n5105\n5106\n5107\n5108\n5109\n5110\n5111\n5112\n5113\n5114\n5115\n5116\n5117\n5118\n5119\n5120\n5121\n5122\n5123\n5124\n5125\n5126\n5127\n5128\n5129\n5130\n5131\n5132\n5133\n5134\n5135\n5136\n5137\n5138\n5139\n5140\n5141\n5142\n5143\n5144\n5145\n5146\n5147\n5148\n5149\n5150\n5151\n5152\n5153\n5154\n5155\n5156\n5157\n5158\n5159\n5160\n5161\n5162\n5163\n5164\n5165\n5166\n5167\n5168\n5169\n5170\n5171\n5172\n5173\n5174\n5175\n5176\n5177\n5178\n5179\n5180\n5181\n5182\n5183\n5184\n5185\n5186\n5187\n5188\n5189\n5190\n5191\n5192\n5193\n5194\n5195\n5196\n5197\n5198\n5199\n5200\n5201\n5202\n5203\n5204\n5205\n5206\n5207\n5208\n5209\n5210\n5211\n5212\n5213\n5214\n5215\n5216\n5217\n5218\n5219\n5220\n5221\n5222\n5223\n5224\n5225\n5226\n5227\n5228\n5229\n5230\n5231\n5232\n5233\n5234\n5235\n5236\n5237\n5238\n5239\n5240\n5241\n5242\n5243\n5244\n5245\n5246\n5247\n5248\n5249\n5250\n5251\n5252\n5253\n5254\n5255\n5256\n5257\n5258\n5259\n5260\n5261\n5262\n5263\n5264\n5265\n5266\n5267\n5268\n5269\n5270\n5271\n5272\n5273\n5274\n5275\n5276\n5277\n5278\n5279\n5280\n5281\n5282\n5283\n5284\n5285\n5286\n5287\n5288\n5289\n5290\n5291\n5292\n5293\n5294\n5295\n5296\n5297\n5298\n5299\n5300\n5301\n5302\n5303\n5304\n5305\n5306\n5307\n5308\n5309\n5310\n5311\n5312\n5313\n5314\n5315\n5316\n5317\n5318\n5319\n5320\n5321\n5322\n5323\n5324\n5325\n5326\n5327\n5328\n5329\n5330\n5331\n5332\n5333\n5334\n5335\n5336\n5337\n5338\n5339\n5340\n5341\n5342\n5343\n5344\n5345\n5346\n5347\n5348\n5349\n5350\n5351\n5352\n5353\n5354\n5355\n5356\n5357\n5358\n5359\n5360\n5361\n5362\n5363\n5364\n5365\n5366\n5367\n5368\n5369\n5370\n5371\n5372\n5373\n5374\n5375\n5376\n5377\n5378\n5379\n5380\n5381\n5382\n5383\n5384\n5385\n5386\n5387\n5388\n5389\n5390\n5391\n5392\n5393\n5394\n5395\n5396\n5397\n5398\n5399\n5400\n5401\n5402\n5403\n5404\n5405\n5406\n5407\n5408\n5409\n5410\n5411\n5412\n5413\n5414\n5415\n5416\n5417\n5418\n5419\n5420\n5421\n5422\n5423\n5424\n5425\n5426\n5427\n5428\n5429\n5430\n5431\n5432\n5433\n5434\n5435\n5436\n5437\n5438\n5439\n5440\n5441\n5442\n5443\n5444\n5445\n5446\n5447\n5448\n5449\n5450\n5451\n5452\n5453\n5454\n5455\n5456\n5457\n5458\n5459\n5460\n5461\n5462\n5463\n5464\n5465\n5466\n5467\n5468\n5469\n5470\n5471\n5472\n5473\n5474\n5475\n5476\n5477\n5478\n5479\n5480\n5481\n5482\n5483\n5484\n5485\n5486\n5487\n5488\n5489\n5490\n5491\n5492\n5493\n5494\n5495\n5496\n5497\n5498\n5499\n5500\n5501\n5502\n5503\n5504\n5505\n5506\n5507\n5508\n5509\n5510\n5511\n5512\n5513\n5514\n5515\n5516\n5517\n5518\n5519\n5520\n5521\n5522\n5523\n5524\n5525\n5526\n5527\n5528\n5529\n5530\n5531\n5532\n5533\n5534\n5535\n5536\n5537\n5538\n5539\n5540\n5541\n5542\n5543\n5544\n5545\n5546\n5547\n5548\n5549\n5550\n5551\n5552\n5553\n5554\n5555\n5556\n5557\n5558\n5559\n5560\n5561\n5562\n5563\n5564\n5565\n5566\n5567\n5568\n5569\n5570\n5571\n5572\n5573\n5574\n5575\n5576\n5577\n5578\n5579\n5580\n5581\n5582\n5583\n5584\n5585\n5586\n5587\n5588\n5589\n5590\n5591\n5592\n5593\n5594\n5595\n5596\n5597\n5598\n5599\n5600\n5601\n5602\n5603\n5604\n5605\n5606\n5607\n5608\n5609\n5610\n5611\n5612\n5613\n5614\n5615\n5616\n5617\n5618\n5619\n5620\n5621\n5622\n5623\n5624\n5625\n5626\n5627\n5628\n5629\n5630\n5631\n5632\n5633\n5634\n5635\n5636\n5637\n5638\n5639\n5640\n5641\n5642\n5643\n5644\n5645\n5646\n5647\n5648\n5649\n5650\n5651\n5652\n5653\n5654\n5655\n5656\n5657\n5658\n5659\n5660\n5661\n5662\n5663\n5664\n5665\n5666\n5667\n5668\n5669\n5670\n5671\n5672\n5673\n5674\n5675\n5676\n5677\n5678\n5679\n5680\n5681\n5682\n5683\n5684\n5685\n5686\n5687\n5688\n5689\n5690\n5691\n5692\n5693\n5694\n5695\n5696\n5697\n5698\n5699\n5700\n5701\n5702\n5703\n5704\n5705\n5706\n5707\n5708\n5709\n5710\n5711\n5712\n5713\n5714\n5715\n5716\n5717\n5718\n5719\n5720\n5721\n5722\n5723\n5724\n5725\n5726\n5727\n5728\n5729\n5730\n5731\n5732\n5733\n5734\n5735\n5736\n5737\n5738\n5739\n5740\n5741\n5742\n5743\n5744\n5745\n5746\n5747\n5748\n5749\n5750\n5751\n5752\n5753\n5754\n5755\n5756\n5757\n5758\n5759\n5760\n5761\n5762\n5763\n5764\n5765\n5766\n5767\n5768\n5769\n5770\n5771\n5772\n5773\n5774\n5775\n5776\n5777\n5778\n5779\n5780\n5781\n5782\n5783\n5784\n5785\n5786\n5787\n5788\n5789\n5790\n5791\n5792\n5793\n5794\n5795\n5796\n5797\n5798\n5799\n5800\n5801\n5802\n5803\n5804\n5805\n5806\n5807\n5808\n5809\n5810\n5811\n5812\n5813\n5814\n5815\n5816\n5817\n5818\n5819\n5820\n5821\n5822\n5823\n5824\n5825\n5826\n5827\n5828\n5829\n5830\n5831\n5832\n5833\n5834\n5835\n5836\n5837\n5838\n5839\n5840\n5841\n5842\n5843\n5844\n5845\n5846\n5847\n5848\n5849\n5850\n5851\n5852\n5853\n5854\n5855\n5856\n5857\n5858\n5859\n5860\n5861\n5862\n5863\n5864\n5865\n5866\n5867\n5868\n5869\n5870\n5871\n5872\n5873\n5874\n5875\n5876\n5877\n5878\n5879\n5880\n5881\n5882\n5883\n5884\n5885\n5886\n5887\n5888\n5889\n5890\n5891\n5892\n5893\n5894\n5895\n5896\n5897\n5898\n5899\n5900\n5901\n5902\n5903\n5904\n5905\n5906\n5907\n5908\n5909\n5910\n5911\n5912\n5913\n5914\n5915\n5916\n5917\n5918\n5919\n5920\n5921\n5922\n5923\n5924\n5925\n5926\n5927\n5928\n5929\n5930\n5931\n5932\n5933\n5934\n5935\n5936\n5937\n5938\n5939\n5940\n5941\n5942\n5943\n5944\n5945\n5946\n5947\n5948\n5949\n5950\n5951\n5952\n5953\n5954\n5955\n5956\n5957\n5958\n5959\n5960\n5961\n5962\n5963\n5964\n5965\n5966\n5967\n5968\n5969\n5970\n5971\n5972\n5973\n5974\n5975\n5976\n5977\n5978\n5979\n5980\n5981\n5982\n5983\n5984\n5985\n5986\n5987\n5988\n5989\n5990\n5991\n5992\n5993\n5994\n5995\n5996\n5997\n5998\n5999\n6000\n6001\n6002\n6003\n6004\n6005\n6006\n6007\n6008\n6009\n6010\n6011\n6012\n6013\n6014\n6015\n6016\n6017\n6018\n6019\n6020\n6021\n6022\n6023\n6024\n6025\n6026\n6027\n6028\n6029\n6030\n6031\n6032\n6033\n6034\n6035\n6036\n6037\n6038\n6039\n6040\n6041\n6042\n6043\n6044\n6045\n6046\n6047\n6048\n6049\n6050\n6051\n6052\n6053\n6054\n6055\n6056\n6057\n6058\n6059\n6060\n6061\n6062\n6063\n6064\n6065\n6066\n6067\n6068\n6069\n6070\n6071\n6072\n6073\n6074\n6075\n6076\n6077\n6078\n6079\n6080\n6081\n6082\n6083\n6084\n6085\n6086\n6087\n6088\n6089\n6090\n6091\n6092\n6093\n6094\n6095\n6096\n6097\n6098\n6099\n6100\n6101\n6102\n6103\n6104\n6105\n6106\n6107\n6108\n6109\n6110\n6111\n6112\n6113\n6114\n6115\n6116\n6117\n6118\n6119\n6120\n6121\n6122\n6123\n6124\n6125\n6126\n6127\n6128\n6129\n6130\n6131\n6132\n6133\n6134\n6135\n6136\n6137\n6138\n6139\n6140\n6141\n6142\n6143\n6144\n6145\n6146\n6147\n6148\n6149\n6150\n6151\n6152\n6153\n6154\n6155\n6156\n6157\n6158\n6159\n6160\n6161\n6162\n6163\n6164\n6165\n6166\n6167\n6168\n6169\n6170\n6171\n6172\n6173\n6174\n6175\n6176\n6177\n6178\n6179\n6180\n6181\n6182\n6183\n6184\n6185\n6186\n6187\n6188\n6189\n6190\n6191\n6192\n6193\n6194\n6195\n6196\n6197\n6198\n6199\n6200\n6201\n6202\n6203\n6204\n6205\n6206\n6207\n6208\n6209\n6210\n6211\n6212\n6213\n6214\n6215\n6216\n6217\n6218\n6219\n6220\n6221\n6222\n6223\n6224\n6225\n6226\n6227\n6228\n6229\n6230\n6231\n6232\n6233\n6234\n6235\n6236\n6237\n6238\n6239\n6240\n6241\n6242\n6243\n6244\n6245\n6246\n6247\n6248\n6249\n6250\n6251\n6252\n6253\n6254\n6255\n6256\n6257\n6258\n6259\n6260\n6261\n6262\n6263\n6264\n6265\n6266\n6267\n6268\n6269\n6270\n6271\n6272\n6273\n6274\n6275\n6276\n6277\n6278\n6279\n6280\n6281\n6282\n6283\n6284\n6285\n6286\n6287\n6288\n6289\n6290\n6291\n6292\n6293\n6294\n6295\n6296\n6297\n6298\n6299\n6300\n6301\n6302\n6303\n6304\n6305\n6306\n6307\n6308\n6309\n6310\n6311\n6312\n6313\n6314\n6315\n6316\n6317\n6318\n6319\n6320\n6321\n6322\n6323\n6324\n6325\n6326\n6327\n6328\n6329\n6330\n6331\n6332\n6333\n6334\n6335\n6336\n6337\n6338\n6339\n6340\n6341\n6342\n6343\n6344\n6345\n6346\n6347\n6348\n6349\n6350\n6351\n6352\n6353\n6354\n6355\n6356\n6357\n6358\n6359\n6360\n6361\n6362\n6363\n6364\n6365\n6366\n6367\n6368\n6369\n6370\n6371\n6372\n6373\n6374\n6375\n6376\n6377\n6378\n6379\n6380\n6381\n6382\n6383\n6384\n6385\n6386\n6387\n6388\n6389\n6390\n6391\n6392\n6393\n6394\n6395\n6396\n6397\n6398\n6399\n6400\n6401\n6402\n6403\n6404\n6405\n6406\n6407\n6408\n6409\n6410\n6411\n6412\n6413\n6414\n6415\n6416\n6417\n6418\n6419\n6420\n6421\n6422\n6423\n6424\n6425\n6426\n6427\n6428\n6429\n6430\n6431\n6432\n6433\n6434\n6435\n6436\n6437\n6438\n6439\n6440\n6441\n6442\n6443\n6444\n6445\n6446\n6447\n6448\n6449\n6450\n6451\n6452\n6453\n6454\n6455\n6456\n6457\n6458\n6459\n6460\n6461\n6462\n6463\n6464\n6465\n6466\n6467\n6468\n6469\n6470\n6471\n6472\n6473\n6474\n6475\n6476\n6477\n6478\n6479\n6480\n6481\n6482\n6483\n6484\n6485\n6486\n6487\n6488\n6489\n6490\n6491\n6492\n6493\n6494\n6495\n6496\n6497\n6498\n6499\n6500\n6501\n6502\n6503\n6504\n6505\n6506\n6507\n6508\n6509\n6510\n6511\n6512\n6513\n6514\n6515\n6516\n6517\n6518\n6519\n6520\n6521\n6522\n6523\n6524\n6525\n6526\n6527\n6528\n6529\n6530\n6531\n6532\n6533\n6534\n6535\n6536\n6537\n6538\n6539\n6540\n6541\n6542\n6543\n6544\n6545\n6546\n6547\n6548\n6549\n6550\n6551\n6552\n6553\n6554\n6555\n6556\n6557\n6558\n6559\n6560\n6561\n6562\n6563\n6564\n6565\n6566\n6567\n6568\n6569\n6570\n6571\n6572\n6573\n6574\n6575\n6576\n6577\n6578\n6579\n6580\n6581\n6582\n6583\n6584\n6585\n6586\n6587\n6588\n6589\n6590\n6591\n6592\n6593\n6594\n6595\n6596\n6597\n6598\n6599\n6600\n6601\n6602\n6603\n6604\n6605\n6606\n6607\n6608\n6609\n6610\n6611\n6612\n6613\n6614\n6615\n6616\n6617\n6618\n6619\n6620\n6621\n6622\n6623\n6624\n6625\n6626\n6627\n6628\n6629\n6630\n6631\n6632\n6633\n6634\n6635\n6636\n6637\n6638\n6639\n6640\n6641\n6642\n6643\n6644\n6645\n6646\n6647\n6648\n6649\n6650\n6651\n6652\n6653\n6654\n6655\n6656\n6657\n6658\n6659\n6660\n6661\n6662\n6663\n6664\n6665\n6666\n6667\n6668\n6669\n6670\n6671\n6672\n6673\n6674\n6675\n6676\n6677\n6678\n6679\n6680\n6681\n6682\n6683\n6684\n6685\n6686\n6687\n6688\n6689\n6690\n6691\n6692\n6693\n6694\n6695\n6696\n6697\n6698\n6699\n6700\n6701\n6702\n6703\n6704\n6705\n6706\n6707\n6708\n6709\n6710\n6711\n6712\n6713\n6714\n6715\n6716\n6717\n6718\n6719\n6720\n6721\n6722\n6723\n6724\n6725\n6726\n6727\n6728\n6729\n6730\n6731\n6732\n6733\n6734\n6735\n6736\n6737\n6738\n6739\n6740\n6741\n6742\n6743\n6744\n6745\n6746\n6747\n6748\n6749\n6750\n6751\n6752\n6753\n6754\n6755\n6756\n6757\n6758\n6759\n6760\n6761\n6762\n6763\n6764\n6765\n6766\n6767\n6768\n6769\n6770\n6771\n6772\n6773\n6774\n6775\n6776\n6777\n6778\n6779\n6780\n6781\n6782\n6783\n6784\n6785\n6786\n6787\n6788\n6789\n6790\n6791\n6792\n6793\n6794\n6795\n6796\n6797\n6798\n6799\n6800\n6801\n6802\n6803\n6804\n6805\n6806\n6807\n6808\n6809\n6810\n6811\n6812\n6813\n6814\n6815\n6816\n6817\n6818\n6819\n6820\n6821\n6822\n6823\n6824\n6825\n6826\n6827\n6828\n6829\n6830\n6831\n6832\n6833\n6834\n6835\n6836\n6837\n6838\n6839\n6840\n6841\n6842\n6843\n6844\n6845\n6846\n6847\n6848\n6849\n6850\n6851\n6852\n6853\n6854\n6855\n6856\n6857\n6858\n6859\n6860\n6861\n6862\n6863\n6864\n6865\n6866\n6867\n6868\n6869\n6870\n6871\n6872\n6873\n6874\n6875\n6876\n6877\n6878\n6879\n6880\n6881\n6882\n6883\n6884\n6885\n6886\n6887\n6888\n6889\n6890\n6891\n6892\n6893\n6894\n6895\n6896\n6897\n6898\n6899\n6900\n6901\n6902\n6903\n6904\n6905\n6906\n6907\n6908\n6909\n6910\n6911\n6912\n6913\n6914\n6915\n6916\n6917\n6918\n6919\n6920\n6921\n6922\n6923\n6924\n6925\n6926\n6927\n6928\n6929\n6930\n6931\n6932\n6933\n6934\n6935\n6936\n6937\n6938\n6939\n6940\n6941\n6942\n6943\n6944\n6945\n6946\n6947\n6948\n6949\n6950\n6951\n6952\n6953\n6954\n6955\n6956\n6957\n6958\n6959\n6960\n6961\n6962\n6963\n6964\n6965\n6966\n6967\n6968\n6969\n6970\n6971\n6972\n6973\n6974\n6975\n6976\n6977\n6978\n6979\n6980\n6981\n6982\n6983\n6984\n6985\n6986\n6987\n6988\n6989\n6990\n6991\n6992\n6993\n6994\n6995\n6996\n6997\n6998\n6999\n7000\n7001\n7002\n7003\n7004\n7005\n7006\n7007\n7008\n7009\n7010\n7011\n7012\n7013\n7014\n7015\n7016\n7017\n7018\n7019\n7020\n7021\n7022\n7023\n7024\n7025\n7026\n7027\n7028\n7029\n7030\n7031\n7032\n7033\n7034\n7035\n7036\n7037\n7038\n7039\n7040\n7041\n7042\n7043\n7044\n7045\n7046\n7047\n7048\n7049\n7050\n7051\n7052\n7053\n7054\n7055\n7056\n7057\n7058\n7059\n7060\n7061\n7062\n7063\n7064\n7065\n7066\n7067\n7068\n7069\n7070\n7071\n7072\n7073\n7074\n7075\n7076\n7077\n7078\n7079\n7080\n7081\n7082\n7083\n7084\n7085\n7086\n7087\n7088\n7089\n7090\n7091\n7092\n7093\n7094\n7095\n7096\n7097\n7098\n7099\n7100\n7101\n7102\n7103\n7104\n7105\n7106\n7107\n7108\n7109\n7110\n7111\n7112\n7113\n7114\n7115\n7116\n7117\n7118\n7119\n7120\n7121\n7122\n7123\n7124\n7125\n7126\n7127\n7128\n7129\n7130\n7131\n7132\n7133\n7134\n7135\n7136\n7137\n7138\n7139\n7140\n7141\n7142\n7143\n7144\n7145\n7146\n7147\n7148\n7149\n7150\n7151\n7152\n7153\n7154\n7155\n7156\n7157\n7158\n7159\n7160\n7161\n7162\n7163\n7164\n7165\n7166\n7167\n7168\n7169\n7170\n7171\n7172\n7173\n7174\n7175\n7176\n7177\n7178\n7179\n7180\n7181\n7182\n7183\n7184\n7185\n7186\n7187\n7188\n7189\n7190\n7191\n7192\n7193\n7194\n7195\n7196\n7197\n7198\n7199\n7200\n7201\n7202\n7203\n7204\n7205\n7206\n7207\n7208\n7209\n7210\n7211\n7212\n7213\n7214\n7215\n7216\n7217\n7218\n7219\n7220\n7221\n7222\n7223\n7224\n7225\n7226\n7227\n7228\n7229\n7230\n7231\n7232\n7233\n7234\n7235\n7236\n7237\n7238\n7239\n7240\n7241\n7242\n7243\n7244\n7245\n7246\n7247\n7248\n7249\n7250\n7251\n7252\n7253\n7254\n7255\n7256\n7257\n7258\n7259\n7260\n7261\n7262\n7263\n7264\n7265\n7266\n7267\n7268\n7269\n7270\n7271\n7272\n7273\n7274\n7275\n7276\n7277\n7278\n7279\n7280\n7281\n7282\n7283\n7284\n7285\n7286\n7287\n7288\n7289\n7290\n7291\n7292\n7293\n7294\n7295\n7296\n7297\n7298\n7299\n7300\n7301\n7302\n7303\n7304\n7305\n7306\n7307\n7308\n7309\n7310\n7311\n7312\n7313\n7314\n7315\n7316\n7317\n7318\n7319\n7320\n7321\n7322\n7323\n7324\n7325\n7326\n7327\n7328\n7329\n7330\n7331\n7332\n7333\n7334\n7335\n7336\n7337\n7338\n7339\n7340\n7341\n7342\n7343\n7344\n7345\n7346\n7347\n7348\n7349\n7350\n7351\n7352\n7353\n7354\n7355\n7356\n7357\n7358\n7359\n7360\n7361\n7362\n7363\n7364\n7365\n7366\n7367\n7368\n7369\n7370\n7371\n7372\n7373\n7374\n7375\n7376\n7377\n7378\n7379\n7380\n7381\n7382\n7383\n7384\n7385\n7386\n7387\n7388\n7389\n7390\n7391\n7392\n7393\n7394\n7395\n7396\n7397\n7398\n7399\n7400\n7401\n7402\n7403\n7404\n7405\n7406\n7407\n7408\n7409\n7410\n7411\n7412\n7413\n7414\n7415\n7416\n7417\n7418\n7419\n7420\n7421\n7422\n7423\n7424\n7425\n7426\n7427\n7428\n7429\n7430\n7431\n7432\n7433\n7434\n7435\n7436\n7437\n7438\n7439\n7440\n7441\n7442\n7443\n7444\n7445\n7446\n7447\n7448\n7449\n7450\n7451\n7452\n7453\n7454\n7455\n7456\n7457\n7458\n7459\n7460\n7461\n7462\n7463\n7464\n7465\n7466\n7467\n7468\n7469\n7470\n7471\n7472\n7473\n7474\n7475\n7476\n7477\n7478\n7479\n7480\n7481\n7482\n7483\n7484\n7485\n7486\n7487\n7488\n7489\n7490\n7491\n7492\n7493\n7494\n7495\n7496\n7497\n7498\n7499\n7500\n7501\n7502\n7503\n7504\n7505\n7506\n7507\n7508\n7509\n7510\n7511\n7512\n7513\n7514\n7515\n7516\n7517\n7518\n7519\n7520\n7521\n7522\n7523\n7524\n7525\n7526\n7527\n7528\n7529\n7530\n7531\n7532\n7533\n7534\n7535\n7536\n7537\n7538\n7539\n7540\n7541\n7542\n7543\n7544\n7545\n7546\n7547\n7548\n7549\n7550\n7551\n7552\n7553\n7554\n7555\n7556\n7557\n7558\n7559\n7560\n7561\n7562\n7563\n7564\n7565\n7566\n7567\n7568\n7569\n7570\n7571\n7572\n7573\n7574\n7575\n7576\n7577\n7578\n7579\n7580\n7581\n7582\n7583\n7584\n7585\n7586\n7587\n7588\n7589\n7590\n7591\n7592\n7593\n7594\n7595\n7596\n7597\n7598\n7599\n7600\n7601\n7602\n7603\n7604\n7605\n7606\n7607\n7608\n7609\n7610\n7611\n7612\n7613\n7614\n7615\n7616\n7617\n7618\n7619\n7620\n7621\n7622\n7623\n7624\n7625\n7626\n7627\n7628\n7629\n7630\n7631\n7632\n7633\n7634\n7635\n7636\n7637\n7638\n7639\n7640\n7641\n7642\n7643\n7644\n7645\n7646\n7647\n7648\n7649\n7650\n7651\n7652\n7653\n7654\n7655\n7656\n7657\n7658\n7659\n7660\n7661\n7662\n7663\n7664\n7665\n7666\n7667\n7668\n7669\n7670\n7671\n7672\n7673\n7674\n7675\n7676\n7677\n7678\n7679\n7680\n7681\n7682\n7683\n7684\n7685\n7686\n7687\n7688\n7689\n7690\n7691\n7692\n7693\n7694\n7695\n7696\n7697\n7698\n7699\n7700\n7701\n7702\n7703\n7704\n7705\n7706\n7707\n7708\n7709\n7710\n7711\n7712\n7713\n7714\n7715\n7716\n7717\n7718\n7719\n7720\n7721\n7722\n7723\n7724\n7725\n7726\n7727\n7728\n7729\n7730\n7731\n7732\n7733\n7734\n7735\n7736\n7737\n7738\n7739\n7740\n7741\n7742\n7743\n7744\n7745\n7746\n7747\n7748\n7749\n7750\n7751\n7752\n7753\n7754\n7755\n7756\n7757\n7758\n7759\n7760\n7761\n7762\n7763\n7764\n7765\n7766\n7767\n7768\n7769\n7770\n7771\n7772\n7773\n7774\n7775\n7776\n7777\n7778\n7779\n7780\n7781\n7782\n7783\n7784\n7785\n7786\n7787\n7788\n7789\n7790\n7791\n7792\n7793\n7794\n7795\n7796\n7797\n7798\n7799\n7800\n7801\n7802\n7803\n7804\n7805\n7806\n7807\n7808\n7809\n7810\n7811\n7812\n7813\n7814\n7815\n7816\n7817\n7818\n7819\n7820\n7821\n7822\n7823\n7824\n7825\n7826\n7827\n7828\n7829\n7830\n7831\n7832\n7833\n7834\n7835\n7836\n7837\n7838\n7839\n7840\n7841\n7842\n7843\n7844\n7845\n7846\n7847\n7848\n7849\n7850\n7851\n7852\n7853\n7854\n7855\n7856\n7857\n7858\n7859\n7860\n7861\n7862\n7863\n7864\n7865\n7866\n7867\n7868\n7869\n7870\n7871\n7872\n7873\n7874\n7875\n7876\n7877\n7878\n7879\n7880\n7881\n7882\n7883\n7884\n7885\n7886\n7887\n7888\n7889\n7890\n7891\n7892\n7893\n7894\n7895\n7896\n7897\n7898\n7899\n7900\n7901\n7902\n7903\n7904\n7905\n7906\n7907\n7908\n7909\n7910\n7911\n7912\n7913\n7914\n7915\n7916\n7917\n7918\n7919\n7920\n7921\n7922\n7923\n7924\n7925\n7926\n7927\n7928\n7929\n7930\n7931\n7932\n7933\n7934\n7935\n7936\n7937\n7938\n7939\n7940\n7941\n7942\n7943\n7944\n7945\n7946\n7947\n7948\n7949\n7950\n7951\n7952\n7953\n7954\n7955\n7956\n7957\n7958\n7959\n7960\n7961\n7962\n7963\n7964\n7965\n7966\n7967\n7968\n7969\n7970\n7971\n7972\n7973\n7974\n7975\n7976\n7977\n7978\n7979\n7980\n7981\n7982\n7983\n7984\n7985\n7986\n7987\n7988\n7989\n7990\n7991\n7992\n7993\n7994\n7995\n7996\n7997\n7998\n7999\n8000\n8001\n8002\n8003\n8004\n8005\n8006\n8007\n8008\n8009\n8010\n8011\n8012\n8013\n8014\n8015\n8016\n8017\n8018\n8019\n8020\n8021\n8022\n8023\n8024\n8025\n8026\n8027\n8028\n8029\n8030\n8031\n8032\n8033\n8034\n8035\n8036\n8037\n8038\n8039\n8040\n8041\n8042\n8043\n8044\n8045\n8046\n8047\n8048\n8049\n8050\n8051\n8052\n8053\n8054\n8055\n8056\n8057\n8058\n8059\n8060\n8061\n8062\n8063\n8064\n8065\n8066\n8067\n8068\n8069\n8070\n8071\n8072\n8073\n8074\n8075\n8076\n8077\n8078\n8079\n8080\n8081\n8082\n8083\n8084\n8085\n8086\n8087\n8088\n8089\n8090\n8091\n8092\n8093\n8094\n8095\n8096\n8097\n8098\n8099\n8100\n8101\n8102\n8103\n8104\n8105\n8106\n8107\n8108\n8109\n8110\n8111\n8112\n8113\n8114\n8115\n8116\n8117\n8118\n8119\n8120\n8121\n8122\n8123\n8124\n8125\n8126\n8127\n8128\n8129\n8130\n8131\n8132\n8133\n8134\n8135\n8136\n8137\n8138\n8139\n8140\n8141\n8142\n8143\n8144\n8145\n8146\n8147\n8148\n8149\n8150\n8151\n8152\n8153\n8154\n8155\n8156\n8157\n8158\n8159\n8160\n8161\n8162\n8163\n8164\n8165\n8166\n8167\n8168\n8169\n8170\n8171\n8172\n8173\n8174\n8175\n8176\n8177\n8178\n8179\n8180\n8181\n8182\n8183\n8184\n8185\n8186\n8187\n8188\n8189\n8190\n8191\n8192\n8193\n8194\n8195\n8196\n8197\n8198\n8199\n8200\n8201\n8202\n8203\n8204\n8205\n8206\n8207\n8208\n8209\n8210\n8211\n8212\n8213\n8214\n8215\n8216\n8217\n8218\n8219\n8220\n8221\n8222\n8223\n8224\n8225\n8226\n8227\n8228\n8229\n8230\n8231\n8232\n8233\n8234\n8235\n8236\n8237\n8238\n8239\n8240\n8241\n8242\n8243\n8244\n8245\n8246\n8247\n8248\n8249\n8250\n8251\n8252\n8253\n8254\n8255\n8256\n8257\n8258\n8259\n8260\n8261\n8262\n8263\n8264\n8265\n8266\n8267\n8268\n8269\n8270\n8271\n8272\n8273\n8274\n8275\n8276\n8277\n8278\n8279\n8280\n8281\n8282\n8283\n8284\n8285\n8286\n8287\n8288\n8289\n8290\n8291\n8292\n8293\n8294\n8295\n8296\n8297\n8298\n8299\n8300\n8301\n8302\n8303\n8304\n8305\n8306\n8307\n8308\n8309\n8310\n8311\n8312\n8313\n8314\n8315\n8316\n8317\n8318\n8319\n8320\n8321\n8322\n8323\n8324\n8325\n8326\n8327\n8328\n8329\n8330\n8331\n8332\n8333\n8334\n8335\n8336\n8337\n8338\n8339\n8340\n8341\n8342\n8343\n8344\n8345\n8346\n8347\n8348\n8349\n8350\n8351\n8352\n8353\n8354\n8355\n8356\n8357\n8358\n8359\n8360\n8361\n8362\n8363\n8364\n8365\n8366\n8367\n8368\n8369\n8370\n8371\n8372\n8373\n8374\n8375\n8376\n8377\n8378\n8379\n8380\n8381\n8382\n8383\n8384\n8385\n8386\n8387\n8388\n8389\n8390\n8391\n8392\n8393\n8394\n8395\n8396\n8397\n8398\n8399\n8400\n8401\n8402\n8403\n8404\n8405\n8406\n8407\n8408\n8409\n8410\n8411\n8412\n8413\n8414\n8415\n8416\n8417\n8418\n8419\n8420\n8421\n8422\n8423\n8424\n8425\n8426\n8427\n8428\n8429\n8430\n8431\n8432\n8433\n8434\n8435\n8436\n8437\n8438\n8439\n8440\n8441\n8442\n8443\n8444\n8445\n8446\n8447\n8448\n8449\n8450\n8451\n8452\n8453\n8454\n8455\n8456\n8457\n8458\n8459\n8460\n8461\n8462\n8463\n8464\n8465\n8466\n8467\n8468\n8469\n8470\n8471\n8472\n8473\n8474\n8475\n8476\n8477\n8478\n8479\n8480\n8481\n8482\n8483\n8484\n8485\n8486\n8487\n8488\n8489\n8490\n8491\n8492\n8493\n8494\n8495\n8496\n8497\n8498\n8499\n8500\n8501\n8502\n8503\n8504\n8505\n8506\n8507\n8508\n8509\n8510\n8511\n8512\n8513\n8514\n8515\n8516\n8517\n8518\n8519\n8520\n8521\n8522\n8523\n8524\n8525\n8526\n8527\n8528\n8529\n8530\n8531\n8532\n8533\n8534\n8535\n8536\n8537\n8538\n8539\n8540\n8541\n8542\n8543\n8544\n8545\n8546\n8547\n8548\n8549\n8550\n8551\n8552\n8553\n8554\n8555\n8556\n8557\n8558\n8559\n8560\n8561\n8562\n8563\n8564\n8565\n8566\n8567\n8568\n8569\n8570\n8571\n8572\n8573\n8574\n8575\n8576\n8577\n8578\n8579\n8580\n8581\n8582\n8583\n8584\n8585\n8586\n8587\n8588\n8589\n8590\n8591\n8592\n8593\n8594\n8595\n8596\n8597\n8598\n8599\n8600\n8601\n8602\n8603\n8604\n8605\n8606\n8607\n8608\n8609\n8610\n8611\n8612\n8613\n8614\n8615\n8616\n8617\n8618\n8619\n8620\n8621\n8622\n8623\n8624\n8625\n8626\n8627\n8628\n8629\n8630\n8631\n8632\n8633\n8634\n8635\n8636\n8637\n8638\n8639\n8640\n8641\n8642\n8643\n8644\n8645\n8646\n8647\n8648\n8649\n8650\n8651\n8652\n8653\n8654\n8655\n8656\n8657\n8658\n8659\n8660\n8661\n8662\n8663\n8664\n8665\n8666\n8667\n8668\n8669\n8670\n8671\n8672\n8673\n8674\n8675\n8676\n8677\n8678\n8679\n8680\n8681\n8682\n8683\n8684\n8685\n8686\n8687\n8688\n8689\n8690\n8691\n8692\n8693\n8694\n8695\n8696\n8697\n8698\n8699\n8700\n8701\n8702\n8703\n8704\n8705\n8706\n8707\n8708\n8709\n8710\n8711\n8712\n8713\n8714\n8715\n8716\n8717\n8718\n8719\n8720\n8721\n8722\n8723\n8724\n8725\n8726\n8727\n8728\n8729\n8730\n8731\n8732\n8733\n8734\n8735\n8736\n8737\n8738\n8739\n8740\n8741\n8742\n8743\n8744\n8745\n8746\n8747\n8748\n8749\n8750\n8751\n8752\n8753\n8754\n8755\n8756\n8757\n8758\n8759\n8760\n8761\n8762\n8763\n8764\n8765\n8766\n8767\n8768\n8769\n8770\n8771\n8772\n8773\n8774\n8775\n8776\n8777\n8778\n8779\n8780\n8781\n8782\n8783\n8784\n8785\n8786\n8787\n8788\n8789\n8790\n8791\n8792\n8793\n8794\n8795\n8796\n8797\n8798\n8799\n8800\n8801\n8802\n8803\n8804\n8805\n8806\n8807\n8808\n8809\n8810\n8811\n8812\n8813\n8814\n8815\n8816\n8817\n8818\n8819\n8820\n8821\n8822\n8823\n8824\n8825\n8826\n8827\n8828\n8829\n8830\n8831\n8832\n8833\n8834\n8835\n8836\n8837\n8838\n8839\n8840\n8841\n8842\n8843\n8844\n8845\n8846\n8847\n8848\n8849\n8850\n8851\n8852\n8853\n8854\n8855\n8856\n8857\n8858\n8859\n8860\n8861\n8862\n8863\n8864\n8865\n8866\n8867\n8868\n8869\n8870\n8871\n8872\n8873\n8874\n8875\n8876\n8877\n8878\n8879\n8880\n8881\n8882\n8883\n8884\n8885\n8886\n8887\n8888\n8889\n8890\n8891\n8892\n8893\n8894\n8895\n8896\n8897\n8898\n8899\n8900\n8901\n8902\n8903\n8904\n8905\n8906\n8907\n8908\n8909\n8910\n8911\n8912\n8913\n8914\n8915\n8916\n8917\n8918\n8919\n8920\n8921\n8922\n8923\n8924\n8925\n8926\n8927\n8928\n8929\n8930\n8931\n8932\n8933\n8934\n8935\n8936\n8937\n8938\n8939\n8940\n8941\n8942\n8943\n8944\n8945\n8946\n8947\n8948\n8949\n8950\n8951\n8952\n8953\n8954\n8955\n8956\n8957\n8958\n8959\n8960\n8961\n8962\n8963\n8964\n8965\n8966\n8967\n8968\n8969\n8970\n8971\n8972\n8973\n8974\n8975\n8976\n8977\n8978\n8979\n8980\n8981\n8982\n8983\n8984\n8985\n8986\n8987\n8988\n8989\n8990\n8991\n8992\n8993\n8994\n8995\n8996\n8997\n8998\n8999\n9000\n9001\n9002\n9003\n9004\n9005\n9006\n9007\n9008\n9009\n9010\n9011\n9012\n9013\n9014\n9015\n9016\n9017\n9018\n9019\n9020\n9021\n9022\n9023\n9024\n9025\n9026\n9027\n9028\n9029\n9030\n9031\n9032\n9033\n9034\n9035\n9036\n9037\n9038\n9039\n9040\n9041\n9042\n9043\n9044\n9045\n9046\n9047\n9048\n9049\n9050\n9051\n9052\n9053\n9054\n9055\n9056\n9057\n9058\n9059\n9060\n9061\n9062\n9063\n9064\n9065\n9066\n9067\n9068\n9069\n9070\n9071\n9072\n9073\n9074\n9075\n9076\n9077\n9078\n9079\n9080\n9081\n9082\n9083\n9084\n9085\n9086\n9087\n9088\n9089\n9090\n9091\n9092\n9093\n9094\n9095\n9096\n9097\n9098\n9099\n9100\n9101\n9102\n9103\n9104\n9105\n9106\n9107\n9108\n9109\n9110\n9111\n9112\n9113\n9114\n9115\n9116\n9117\n9118\n9119\n9120\n9121\n9122\n9123\n9124\n9125\n9126\n9127\n9128\n9129\n9130\n9131\n9132\n9133\n9134\n9135\n9136\n9137\n9138\n9139\n9140\n9141\n9142\n9143\n9144\n9145\n9146\n9147\n9148\n9149\n9150\n9151\n9152\n9153\n9154\n9155\n9156\n9157\n9158\n9159\n9160\n9161\n9162\n9163\n9164\n9165\n9166\n9167\n9168\n9169\n9170\n9171\n9172\n9173\n9174\n9175\n9176\n9177\n9178\n9179\n9180\n9181\n9182\n9183\n9184\n9185\n9186\n9187\n9188\n9189\n9190\n9191\n9192\n9193\n9194\n9195\n9196\n9197\n9198\n9199\n9200\n9201\n9202\n9203\n9204\n9205\n9206\n9207\n9208\n9209\n9210\n9211\n9212\n9213\n9214\n9215\n9216\n9217\n9218\n9219\n9220\n9221\n9222\n9223\n9224\n9225\n9226\n9227\n9228\n9229\n9230\n9231\n9232\n9233\n9234\n9235\n9236\n9237\n9238\n9239\n9240\n9241\n9242\n9243\n9244\n9245\n9246\n9247\n9248\n9249\n9250\n9251\n9252\n9253\n9254\n9255\n9256\n9257\n9258\n9259\n9260\n9261\n9262\n9263\n9264\n9265\n9266\n9267\n9268\n9269\n9270\n9271\n9272\n9273\n9274\n9275\n9276\n9277\n9278\n9279\n9280\n9281\n9282\n9283\n9284\n9285\n9286\n9287\n9288\n9289\n9290\n9291\n9292\n9293\n9294\n9295\n9296\n9297\n9298\n9299\n9300\n9301\n9302\n9303\n9304\n9305\n9306\n9307\n9308\n9309\n9310\n9311\n9312\n9313\n9314\n9315\n9316\n9317\n9318\n9319\n9320\n9321\n9322\n9323\n9324\n9325\n9326\n9327\n9328\n9329\n9330\n9331\n9332\n9333\n9334\n9335\n9336\n9337\n9338\n9339\n9340\n9341\n9342\n9343\n9344\n9345\n9346\n9347\n9348\n9349\n9350\n9351\n9352\n9353\n9354\n9355\n9356\n9357\n9358\n9359\n9360\n9361\n9362\n9363\n9364\n9365\n9366\n9367\n9368\n9369\n9370\n9371\n9372\n9373\n9374\n9375\n9376\n9377\n9378\n9379\n9380\n9381\n9382\n9383\n9384\n9385\n9386\n9387\n9388\n9389\n9390\n9391\n9392\n9393\n9394\n9395\n9396\n9397\n9398\n9399\n9400\n9401\n9402\n9403\n9404\n9405\n9406\n9407\n9408\n9409\n9410\n9411\n9412\n9413\n9414\n9415\n9416\n9417\n9418\n9419\n9420\n9421\n9422\n9423\n9424\n9425\n9426\n9427\n9428\n9429\n9430\n9431\n9432\n9433\n9434\n9435\n9436\n9437\n9438\n9439\n9440\n9441\n9442\n9443\n9444\n9445\n9446\n9447\n9448\n9449\n9450\n9451\n9452\n9453\n9454\n9455\n9456\n9457\n9458\n9459\n9460\n9461\n9462\n9463\n9464\n9465\n9466\n9467\n9468\n9469\n9470\n9471\n9472\n9473\n9474\n9475\n9476\n9477\n9478\n9479\n9480\n9481\n9482\n9483\n9484\n9485\n9486\n9487\n9488\n9489\n9490\n9491\n9492\n9493\n9494\n9495\n9496\n9497\n9498\n9499\n9500\n9501\n9502\n9503\n9504\n9505\n9506\n9507\n9508\n9509\n9510\n9511\n9512\n9513\n9514\n9515\n9516\n9517\n9518\n9519\n9520\n9521\n9522\n9523\n9524\n9525\n9526\n9527\n9528\n9529\n9530\n9531\n9532\n9533\n9534\n9535\n9536\n9537\n9538\n9539\n9540\n9541\n9542\n9543\n9544\n9545\n9546\n9547\n9548\n9549\n9550\n9551\n9552\n9553\n9554\n9555\n9556\n9557\n9558\n9559\n9560\n9561\n9562\n9563\n9564\n9565\n9566\n9567\n9568\n9569\n9570\n9571\n9572\n9573\n9574\n9575\n9576\n9577\n9578\n9579\n9580\n9581\n9582\n9583\n9584\n9585\n9586\n9587\n9588\n9589\n9590\n9591\n9592\n9593\n9594\n9595\n9596\n9597\n9598\n9599\n9600\n9601\n9602\n9603\n9604\n9605\n9606\n9607\n9608\n9609\n9610\n9611\n9612\n9613\n9614\n9615\n9616\n9617\n9618\n9619\n9620\n9621\n9622\n9623\n9624\n9625\n9626\n9627\n9628\n9629\n9630\n9631\n9632\n9633\n9634\n9635\n9636\n9637\n9638\n9639\n9640\n9641\n9642\n9643\n9644\n9645\n9646\n9647\n9648\n9649\n9650\n9651\n9652\n9653\n9654\n9655\n9656\n9657\n9658\n9659\n9660\n9661\n9662\n9663\n9664\n9665\n9666\n9667\n9668\n9669\n9670\n9671\n9672\n9673\n9674\n9675\n9676\n9677\n9678\n9679\n9680\n9681\n9682\n9683\n9684\n9685\n9686\n9687\n9688\n9689\n9690\n9691\n9692\n9693\n9694\n9695\n9696\n9697\n9698\n9699\n9700\n9701\n9702\n9703\n9704\n9705\n9706\n9707\n9708\n9709\n9710\n9711\n9712\n9713\n9714\n9715\n9716\n9717\n9718\n9719\n9720\n9721\n9722\n9723\n9724\n9725\n9726\n9727\n9728\n9729\n9730\n9731\n9732\n9733\n9734\n9735\n9736\n9737\n9738\n9739\n9740\n9741\n9742\n9743\n9744\n9745\n9746\n9747\n9748\n9749\n9750\n9751\n9752\n9753\n9754\n9755\n9756\n9757\n9758\n9759\n9760\n9761\n9762\n9763\n9764\n9765\n9766\n9767\n9768\n9769\n9770\n9771\n9772\n9773\n9774\n9775\n9776\n9777\n9778\n9779\n9780\n9781\n9782\n9783\n9784\n9785\n9786\n9787\n9788\n9789\n9790\n9791\n9792\n9793\n9794\n9795\n9796\n9797\n9798\n9799\n9800\n9801\n9802\n9803\n9804\n9805\n9806\n9807\n9808\n9809\n9810\n9811\n9812\n9813\n9814\n9815\n9816\n9817\n9818\n9819\n9820\n9821\n9822\n9823\n9824\n9825\n9826\n9827\n9828\n9829\n9830\n9831\n9832\n9833\n9834\n9835\n9836\n9837\n9838\n9839\n9840\n9841\n9842\n9843\n9844\n9845\n9846\n9847\n9848\n9849\n9850\n9851\n9852\n9853\n9854\n9855\n9856\n9857\n9858\n9859\n9860\n9861\n9862\n9863\n9864\n9865\n9866\n9867\n9868\n9869\n9870\n9871\n9872\n9873\n9874\n9875\n9876\n9877\n9878\n9879\n9880\n9881\n9882\n9883\n9884\n9885\n9886\n9887\n9888\n9889\n9890\n9891\n9892\n9893\n9894\n9895\n9896\n9897\n9898\n9899\n9900\n9901\n9902\n9903\n9904\n9905\n9906\n9907\n9908\n9909\n9910\n9911\n9912\n9913\n9914\n9915\n9916\n9917\n9918\n9919\n9920\n9921\n9922\n9923\n9924\n9925\n9926\n9927\n9928\n9929\n9930\n9931\n9932\n9933\n9934\n9935\n9936\n9937\n9938\n9939\n9940\n9941\n9942\n9943\n9944\n9945\n9946\n9947\n9948\n9949\n9950\n9951\n9952\n9953\n9954\n9955\n9956\n9957\n9958\n9959\n9960\n9961\n9962\n9963\n9964\n9965\n9966\n9967\n9968\n9969\n9970\n9971\n9972\n9973\n9974\n9975\n9976\n9977\n9978\n9979\n9980\n9981\n9982\n9983\n9984\n9985\n9986\n9987\n9988\n9989\n9990\n9991\n9992\n9993\n9994\n9995\n9996\n9997\n9998\n9999\n10000\n10001\n10002\n10003\n10004\n10005\n10006\n10007\n10008\n10009\n10010\n10011\n10012\n10013\n10014\n10015\n10016\n10017\n10018\n10019\n10020\n10021\n10022\n10023\n10024\n10025\n10026\n10027\n10028\n10029\n10030\n10031\n10032\n10033\n10034\n10035\n10036\n10037\n10038\n10039\n10040\n10041\n10042\n10043\n10044\n10045\n10046\n10047\n10048\n10049\n10050\n10051\n10052\n10053\n10054\n10055\n10056\n10057\n10058\n10059\n10060\n10061\n10062\n10063\n10064\n10065\n10066\n10067\n10068\n10069\n10070\n10071\n10072\n10073\n10074\n10075\n10076\n10077\n10078\n10079\n10080\n10081\n10082\n10083\n10084\n10085\n10086\n10087\n10088\n10089\n10090\n10091\n10092\n10093\n10094\n10095\n10096\n10097\n10098\n10099\n10100\n10101\n10102\n10103\n10104\n10105\n10106\n10107\n10108\n10109\n10110\n10111\n10112\n10113\n10114\n10115\n10116\n10117\n10118\n10119\n10120\n10121\n10122\n10123\n10124\n10125\n10126\n10127\n10128\n10129\n10130\n10131\n10132\n10133\n10134\n10135\n10136\n10137\n10138\n10139\n10140\n10141\n10142\n10143\n10144\n10145\n10146\n10147\n10148\n10149\n10150\n10151\n10152\n10153\n10154\n10155\n10156\n10157\n10158\n10159\n10160\n10161\n10162\n10163\n10164\n10165\n10166\n10167\n10168\n10169\n10170\n10171\n10172\n10173\n10174\n10175\n10176\n10177\n10178\n10179\n10180\n10181\n10182\n10183\n10184\n10185\n10186\n10187\n10188\n10189\n10190\n10191\n10192\n10193\n10194\n10195\n10196\n10197\n10198\n10199\n10200\n10201\n10202\n10203\n10204\n10205\n10206\n10207\n10208\n10209\n10210\n10211\n10212\n10213\n10214\n10215\n10216\n10217\n10218\n10219\n10220\n10221\n10222\n10223\n10224\n10225\n10226\n10227\n10228\n10229\n10230\n10231\n10232\n10233\n10234\n10235\n10236\n10237\n10238\n10239\n10240\n10241\n10242\n10243\n10244\n10245\n10246\n10247\n10248\n10249\n10250\n10251\n10252\n10253\n10254\n10255\n10256\n10257\n10258\n10259\n10260\n10261\n10262\n10263\n10264\n10265\n10266\n10267\n10268\n10269\n10270\n10271\n10272\n10273\n10274\n10275\n10276\n10277\n10278\n10279\n10280\n10281\n10282\n10283\n10284\n10285\n10286\n10287\n10288\n10289\n10290\n10291\n10292\n10293\n10294\n10295\n10296\n10297\n10298\n10299\n10300\n10301\n10302\n10303\n10304\n10305\n10306\n10307\n10308\n10309\n10310\n10311\n10312\n10313\n10314\n10315\n10316\n10317\n10318\n10319\n10320\n10321\n10322\n10323\n10324\n10325\n10326\n10327\n10328\n10329\n10330\n10331\n10332\n10333\n10334\n10335\n10336\n10337\n10338\n10339\n10340\n10341\n10342\n10343\n10344\n10345\n10346\n10347\n10348\n10349\n10350\n10351\n10352\n10353\n10354\n10355\n10356\n10357\n10358\n10359\n10360\n10361\n10362\n10363\n10364\n10365\n10366\n10367\n10368\n10369\n10370\n10371\n10372\n10373\n10374\n10375\n10376\n10377\n10378\n10379\n10380\n10381\n10382\n10383\n10384\n10385\n10386\n10387\n10388\n10389\n10390\n10391\n10392\n10393\n10394\n10395\n10396\n10397\n10398\n10399\n10400\n10401\n10402\n10403\n10404\n10405\n10406\n10407\n10408\n10409\n10410\n10411\n10412\n10413\n10414\n10415\n10416\n10417\n10418\n10419\n10420\n10421\n10422\n10423\n10424\n10425\n10426\n10427\n10428\n10429\n10430\n10431\n10432\n10433\n10434\n10435\n10436\n10437\n10438\n10439\n10440\n10441\n10442\n10443\n10444\n10445\n10446\n10447\n10448\n10449\n10450\n10451\n10452\n10453\n10454\n10455\n10456\n10457\n10458\n10459\n10460\n10461\n10462\n10463\n10464\n10465\n10466\n10467\n10468\n10469\n10470\n10471\n10472\n10473\n10474\n10475\n10476\n10477\n10478\n10479\n10480\n10481\n10482\n10483\n10484\n10485\n10486\n10487\n10488\n10489\n10490\n10491\n10492\n10493\n10494\n10495\n10496\n10497\n10498\n10499\n10500\n10501\n10502\n10503\n10504\n10505\n10506\n10507\n10508\n10509\n10510\n10511\n10512\n10513\n10514\n10515\n10516\n10517\n10518\n10519\n10520\n10521\n10522\n10523\n10524\n10525\n10526\n10527\n10528\n10529\n10530\n10531\n10532\n10533\n10534\n10535\n10536\n10537\n10538\n10539\n10540\n10541\n10542\n10543\n10544\n10545\n10546\n10547\n10548\n10549\n10550\n10551\n10552\n10553\n10554\n10555\n10556\n10557\n10558\n10559\n10560\n10561\n10562\n10563\n10564\n10565\n10566\n10567\n10568\n10569\n10570\n10571\n10572\n10573\n10574\n10575\n10576\n10577\n10578\n10579\n10580\n10581\n10582\n10583\n10584\n10585\n10586\n10587\n10588\n10589\n10590\n10591\n10592\n10593\n10594\n10595\n10596\n10597\n10598\n10599\n10600\n10601\n10602\n10603\n10604\n10605\n10606\n10607\n10608\n10609\n10610\n10611\n10612\n10613\n10614\n10615\n10616\n10617\n10618\n10619\n10620\n10621\n10622\n10623\n10624\n10625\n10626\n10627\n10628\n10629\n10630\n10631\n10632\n10633\n10634\n10635\n10636\n10637\n10638\n10639\n10640\n10641\n10642\n10643\n10644\n10645\n10646\n10647\n10648\n10649\n10650\n10651\n10652\n10653\n10654\n10655\n10656\n10657\n10658\n10659\n10660\n10661\n10662\n10663\n10664\n10665\n10666\n10667\n10668\n10669\n10670\n10671\n10672\n10673\n10674\n10675\n10676\n10677\n10678\n10679\n10680\n10681\n10682\n10683\n10684\n10685\n10686\n10687\n10688\n10689\n10690\n10691\n10692\n10693\n10694\n10695\n10696\n10697\n10698\n10699\n10700\n10701\n10702\n10703\n10704\n10705\n10706\n10707\n10708\n10709\n10710\n10711\n10712\n10713\n10714\n10715\n10716\n10717\n10718\n10719\n10720\n10721\n10722\n10723\n10724\n10725\n10726\n10727\n10728\n10729\n10730\n10731\n10732\n10733\n10734\n10735\n10736\n10737\n10738\n10739\n10740\n10741\n10742\n10743\n10744\n10745\n10746\n10747\n10748\n10749\n10750\n10751\n10752\n10753\n10754\n10755\n10756\n10757\n10758\n10759\n10760\n10761\n10762\n10763\n10764\n10765\n10766\n10767\n10768\n10769\n10770\n10771\n10772\n10773\n10774\n10775\n10776\n10777\n10778\n10779\n10780\n10781\n10782\n10783\n10784\n10785\n10786\n10787\n10788\n10789\n10790\n10791\n10792\n10793\n10794\n10795\n10796\n10797\n10798\n10799\n10800\n10801\n10802\n10803\n10804\n10805\n10806\n10807\n10808\n10809\n10810\n10811\n10812\n10813\n10814\n10815\n10816\n10817\n10818\n10819\n10820\n10821\n10822\n10823\n10824\n10825\n10826\n10827\n10828\n10829\n10830\n10831\n10832\n10833\n10834\n10835\n10836\n10837\n10838\n10839\n10840\n10841\n10842\n10843\n10844\n10845\n10846\n10847\n10848\n10849\n10850\n10851\n10852\n10853\n10854\n10855\n10856\n10857\n10858\n10859\n10860\n10861\n10862\n10863\n10864\n10865\n10866\n10867\n10868\n10869\n10870\n10871\n10872\n10873\n10874\n10875\n10876\n10877\n10878\n10879\n10880\n10881\n10882\n10883\n10884\n10885\n10886\n10887\n10888\n10889\n10890\n10891\n10892\n10893\n10894\n10895\n10896\n10897\n10898\n10899\n10900\n10901\n10902\n10903\n10904\n10905\n10906\n10907\n10908\n10909\n10910\n10911\n10912\n10913\n10914\n10915\n10916\n10917\n10918\n10919\n10920\n10921\n10922\n10923\n10924\n10925\n10926\n10927\n10928\n10929\n10930\n10931\n10932\n10933\n10934\n10935\n10936\n10937\n10938\n10939\n10940\n10941\n10942\n10943\n10944\n10945\n10946\n10947\n10948\n10949\n10950\n10951\n10952\n10953\n10954\n10955\n10956\n10957\n10958\n10959\n10960\n10961\n10962\n10963\n10964\n10965\n10966\n10967\n10968\n10969\n10970\n10971\n10972\n10973\n10974\n10975\n10976\n10977\n10978\n10979\n10980\n10981\n10982\n10983\n10984\n10985\n10986\n10987\n10988\n10989\n10990\n10991\n10992\n10993\n10994\n10995\n10996\n10997\n10998\n10999\n11000\n11001\n11002\n11003\n11004\n11005\n11006\n11007\n11008\n11009\n11010\n11011\n11012\n11013\n11014\n11015\n11016\n11017\n11018\n11019\n11020\n11021\n11022\n11023\n11024\n11025\n11026\n11027\n11028\n11029\n11030\n11031\n11032\n11033\n11034\n11035\n11036\n11037\n11038\n11039\n11040\n11041\n11042\n11043\n11044\n11045\n11046\n11047\n11048\n11049\n11050\n11051\n11052\n11053\n11054\n11055\n11056\n11057\n11058\n11059\n11060\n11061\n11062\n11063\n11064\n11065\n11066\n11067\n11068\n11069\n11070\n11071\n11072\n11073\n11074\n11075\n11076\n11077\n11078\n11079\n11080\n11081\n11082\n11083\n11084\n11085\n11086\n11087\n11088\n11089\n11090\n11091\n11092\n11093\n11094\n11095\n11096\n11097\n11098\n11099\n11100\n11101\n11102\n11103\n11104\n11105\n11106\n11107\n11108\n11109\n11110\n11111\n11112\n11113\n11114\n11115\n11116\n11117\n11118\n11119\n11120\n11121\n11122\n11123\n11124\n11125\n11126\n11127\n11128\n11129\n11130\n11131\n11132\n11133\n11134\n11135\n11136\n11137\n11138\n11139\n11140\n11141\n11142\n11143\n11144\n11145\n11146\n11147\n11148\n11149\n11150\n11151\n11152\n11153\n11154\n11155\n11156\n11157\n11158\n11159\n11160\n11161\n11162\n11163\n11164\n11165\n11166\n11167\n11168\n11169\n11170\n11171\n11172\n11173\n11174\n11175\n11176\n11177\n11178\n11179\n11180\n11181\n11182\n11183\n11184\n11185\n11186\n11187\n11188\n11189\n11190\n11191\n11192\n11193\n11194\n11195\n11196\n11197\n11198\n11199\n11200\n11201\n11202\n11203\n11204\n11205\n11206\n11207\n11208\n11209\n11210\n11211\n11212\n11213\n11214\n11215\n11216\n11217\n11218\n11219\n11220\n11221\n11222\n11223\n11224\n11225\n11226\n11227\n11228\n11229\n11230\n11231\n11232\n11233\n11234\n11235\n11236\n11237\n11238\n11239\n11240\n11241\n11242\n11243\n11244\n11245\n11246\n11247\n11248\n11249\n11250\n11251\n11252\n11253\n11254\n11255\n11256\n11257\n11258\n11259\n11260\n11261\n11262\n11263\n11264\n11265\n11266\n11267\n11268\n11269\n11270\n11271\n11272\n11273\n11274\n11275\n11276\n11277\n11278\n11279\n11280\n11281\n11282\n11283\n11284\n11285\n11286\n11287\n11288\n11289\n11290\n11291\n11292\n11293\n11294\n11295\n11296\n11297\n11298\n11299\n11300\n11301\n11302\n11303\n11304\n11305\n11306\n11307\n11308\n11309\n11310\n11311\n11312\n11313\n11314\n11315\n11316\n11317\n11318\n11319\n11320\n11321\n11322\n11323\n11324\n11325\n11326\n11327\n11328\n11329\n11330\n11331\n11332\n11333\n11334\n11335\n11336\n11337\n11338\n11339\n11340\n11341\n11342\n11343\n11344\n11345\n11346\n11347\n11348\n11349\n11350\n11351\n11352\n11353\n11354\n11355\n11356\n11357\n11358\n11359\n11360\n11361\n11362\n11363\n11364\n11365\n11366\n11367\n11368\n11369\n11370\n11371\n11372\n11373\n11374\n11375\n11376\n11377\n11378\n11379\n11380\n11381\n11382\n11383\n11384\n11385\n11386\n11387\n11388\n11389\n11390\n11391\n11392\n11393\n11394\n11395\n11396\n11397\n11398\n11399\n11400\n11401\n11402\n11403\n11404\n11405\n11406\n11407\n11408\n11409\n11410\n11411\n11412\n11413\n11414\n11415\n11416\n11417\n11418\n11419\n11420\n11421\n11422\n11423\n11424\n11425\n11426\n11427\n11428\n11429\n11430\n11431\n11432\n11433\n11434\n11435\n11436\n11437\n11438\n11439\n11440\n11441\n11442\n11443\n11444\n11445\n11446\n11447\n11448\n11449\n11450\n11451\n11452\n11453\n11454\n11455\n11456\n11457\n11458\n11459\n11460\n11461\n11462\n11463\n11464\n11465\n11466\n11467\n11468\n11469\n11470\n11471\n11472\n11473\n11474\n11475\n11476\n11477\n11478\n11479\n11480\n11481\n11482\n11483\n11484\n11485\n11486\n11487\n11488\n11489\n11490\n11491\n11492\n11493\n11494\n11495\n11496\n11497\n11498\n11499\n11500\n11501\n11502\n11503\n11504\n11505\n11506\n11507\n11508\n11509\n11510\n11511\n11512\n11513\n11514\n11515\n11516\n11517\n11518\n11519\n11520\n11521\n11522\n11523\n11524\n11525\n11526\n11527\n11528\n11529\n11530\n11531\n11532\n11533\n11534\n11535\n11536\n11537\n11538\n11539\n11540\n11541\n11542\n11543\n11544\n11545\n11546\n11547\n11548\n11549\n11550\n11551\n11552\n11553\n11554\n11555\n11556\n11557\n11558\n11559\n11560\n11561\n11562\n11563\n11564\n11565\n11566\n11567\n11568\n11569\n11570\n11571\n11572\n11573\n11574\n11575\n11576\n11577\n11578\n11579\n11580\n11581\n11582\n11583\n11584\n11585\n11586\n11587\n11588\n11589\n11590\n11591\n11592\n11593\n11594\n11595\n11596\n11597\n11598\n11599\n11600\n11601\n11602\n11603\n11604\n11605\n11606\n11607\n11608\n11609\n11610\n11611\n11612\n11613\n11614\n11615\n11616\n11617\n11618\n11619\n11620\n11621\n11622\n11623\n11624\n11625\n11626\n11627\n11628\n11629\n11630\n11631\n11632\n11633\n11634\n11635\n11636\n11637\n11638\n11639\n11640\n11641\n11642\n11643\n11644\n11645\n11646\n11647\n11648\n11649\n11650\n11651\n11652\n11653\n11654\n11655\n11656\n11657\n11658\n11659\n11660\n11661\n11662\n11663\n11664\n11665\n11666\n11667\n11668\n11669\n11670\n11671\n11672\n11673\n11674\n11675\n11676\n11677\n11678\n11679\n11680\n11681\n11682\n11683\n11684\n11685\n11686\n11687\n11688\n11689\n11690\n11691\n11692\n11693\n11694\n11695\n11696\n11697\n11698\n11699\n11700\n11701\n11702\n11703\n11704\n11705\n11706\n11707\n11708\n11709\n11710\n11711\n11712\n11713\n11714\n11715\n11716\n11717\n11718\n11719\n11720\n11721\n11722\n11723\n11724\n11725\n11726\n11727\n11728\n11729\n11730\n11731\n11732\n11733\n11734\n11735\n11736\n11737\n11738\n11739\n11740\n11741\n11742\n11743\n11744\n11745\n11746\n11747\n11748\n11749\n11750\n11751\n11752\n11753\n11754\n11755\n11756\n11757\n11758\n11759\n11760\n11761\n11762\n11763\n11764\n11765\n11766\n11767\n11768\n11769\n11770\n11771\n11772\n11773\n11774\n11775\n11776\n11777\n11778\n11779\n11780\n11781\n11782\n11783\n11784\n11785\n11786\n11787\n11788\n11789\n11790\n11791\n11792\n11793\n11794\n11795\n11796\n11797\n11798\n11799\n11800\n11801\n11802\n11803\n11804\n11805\n11806\n11807\n11808\n11809\n11810\n11811\n11812\n11813\n11814\n11815\n11816\n11817\n11818\n11819\n11820\n11821\n11822\n11823\n11824\n11825\n11826\n11827\n11828\n11829\n11830\n11831\n11832\n11833\n11834\n11835\n11836\n11837\n11838\n11839\n11840\n11841\n11842\n11843\n11844\n11845\n11846\n11847\n11848\n11849\n11850\n11851\n11852\n11853\n11854\n11855\n11856\n11857\n11858\n11859\n11860\n11861\n11862\n11863\n11864\n11865\n11866\n11867\n11868\n11869\n11870\n11871\n11872\n11873\n11874\n11875\n11876\n11877\n11878\n11879\n11880\n11881\n11882\n11883\n11884\n11885\n11886\n11887\n11888\n11889\n11890\n11891\n11892\n11893\n11894\n11895\n11896\n11897\n11898\n11899\n11900\n11901\n11902\n11903\n11904\n11905\n11906\n11907\n11908\n11909\n11910\n11911\n11912\n11913\n11914\n11915\n11916\n11917\n11918\n11919\n11920\n11921\n11922\n11923\n11924\n11925\n11926\n11927\n11928\n11929\n11930\n11931\n11932\n11933\n11934\n11935\n11936\n11937\n11938\n11939\n11940\n11941\n11942\n11943\n11944\n11945\n11946\n11947\n11948\n11949\n11950\n11951\n11952\n11953\n11954\n11955\n11956\n11957\n11958\n11959\n11960\n11961\n11962\n11963\n11964\n11965\n11966\n11967\n11968\n11969\n11970\n11971\n11972\n11973\n11974\n11975\n11976\n11977\n11978\n11979\n11980\n11981\n11982\n11983\n11984\n11985\n11986\n11987\n11988\n11989\n11990\n11991\n11992\n11993\n11994\n11995\n11996\n11997\n11998\n11999\n12000\n12001\n12002\n12003\n12004\n12005\n12006\n12007\n12008\n12009\n12010\n12011\n12012\n12013\n12014\n12015\n12016\n12017\n12018\n12019\n12020\n12021\n12022\n12023\n12024\n12025\n12026\n12027\n12028\n12029\n12030\n12031\n12032\n12033\n12034\n12035\n12036\n12037\n12038\n12039\n12040\n12041\n12042\n12043\n12044\n12045\n12046\n12047\n12048\n12049\n12050\n12051\n12052\n12053\n12054\n12055\n12056\n12057\n12058\n12059\n12060\n12061\n12062\n12063\n12064\n12065\n12066\n12067\n12068\n12069\n12070\n12071\n12072\n12073\n12074\n12075\n12076\n12077\n12078\n12079\n12080\n12081\n12082\n12083\n12084\n12085\n12086\n12087\n12088\n12089\n12090\n12091\n12092\n12093\n12094\n12095\n12096\n12097\n12098\n12099\n12100\n12101\n12102\n12103\n12104\n12105\n12106\n12107\n12108\n12109\n12110\n12111\n12112\n12113\n12114\n12115\n12116\n12117\n12118\n12119\n12120\n12121\n12122\n12123\n12124\n12125\n12126\n12127\n12128\n12129\n12130\n12131\n12132\n12133\n12134\n12135\n12136\n12137\n12138\n12139\n12140\n12141\n12142\n12143\n12144\n12145\n12146\n12147\n12148\n12149\n12150\n12151\n12152\n12153\n12154\n12155\n12156\n12157\n12158\n12159\n12160\n12161\n12162\n12163\n12164\n12165\n12166\n12167\n12168\n12169\n12170\n12171\n12172\n12173\n12174\n12175\n12176\n12177\n12178\n12179\n12180\n12181\n12182\n12183\n12184\n12185\n12186\n12187\n12188\n12189\n12190\n12191\n12192\n12193\n12194\n12195\n12196\n12197\n12198\n12199\n12200\n12201\n12202\n12203\n12204\n12205\n12206\n12207\n12208\n12209\n12210\n12211\n12212\n12213\n12214\n12215\n12216\n12217\n12218\n12219\n12220\n12221\n12222\n12223\n12224\n12225\n12226\n12227\n12228\n12229\n12230\n12231\n12232\n12233\n12234\n12235\n12236\n12237\n12238\n12239\n12240\n12241\n12242\n12243\n12244\n12245\n12246\n12247\n12248\n12249\n12250\n12251\n12252\n12253\n12254\n12255\n12256\n12257\n12258\n12259\n12260\n12261\n12262\n12263\n12264\n12265\n12266\n12267\n12268\n12269\n12270\n12271\n12272\n12273\n12274\n12275\n12276\n12277\n12278\n12279\n12280\n12281\n12282\n12283\n12284\n12285\n12286\n12287\n12288\n12289\n12290\n12291\n12292\n12293\n12294\n12295\n12296\n12297\n12298\n12299\n12300\n12301\n12302\n12303\n12304\n12305\n12306\n12307\n12308\n12309\n12310\n12311\n12312\n12313\n12314\n12315\n12316\n12317\n12318\n12319\n12320\n12321\n12322\n12323\n12324\n12325\n12326\n12327\n12328\n12329\n12330\n12331\n12332\n12333\n12334\n12335\n12336\n12337\n12338\n12339\n12340\n12341\n12342\n12343\n12344\n12345\n12346\n12347\n12348\n12349\n12350\n12351\n12352\n12353\n12354\n12355\n12356\n12357\n12358\n12359\n12360\n12361\n12362\n12363\n12364\n12365\n12366\n12367\n12368\n12369\n12370\n12371\n12372\n12373\n12374\n12375\n12376\n12377\n12378\n12379\n12380\n12381\n12382\n12383\n12384\n12385\n12386\n12387\n12388\n12389\n12390\n12391\n12392\n12393\n12394\n12395\n12396\n12397\n12398\n12399\n12400\n12401\n12402\n12403\n12404\n12405\n12406\n12407\n12408\n12409\n12410\n12411\n12412\n12413\n12414\n12415\n12416\n12417\n12418\n12419\n12420\n12421\n12422\n12423\n12424\n12425\n12426\n12427\n12428\n12429\n12430\n12431\n12432\n12433\n12434\n12435\n12436\n12437\n12438\n12439\n12440\n12441\n12442\n12443\n12444\n12445\n12446\n12447\n12448\n12449\n12450\n12451\n12452\n12453\n12454\n12455\n12456\n12457\n12458\n12459\n12460\n12461\n12462\n12463\n12464\n12465\n12466\n12467\n12468\n12469\n12470\n12471\n12472\n12473\n12474\n12475\n12476\n12477\n12478\n12479\n12480\n12481\n12482\n12483\n12484\n12485\n12486\n12487\n12488\n12489\n12490\n12491\n12492\n12493\n12494\n12495\n12496\n12497\n12498\n12499\n12500\n12501\n12502\n12503\n12504\n12505\n12506\n12507\n12508\n12509\n12510\n12511\n12512\n12513\n12514\n12515\n12516\n12517\n12518\n12519\n12520\n12521\n12522\n12523\n12524\n12525\n12526\n12527\n12528\n12529\n12530\n12531\n12532\n12533\n12534\n12535\n12536\n12537\n12538\n12539\n12540\n12541\n12542\n12543\n12544\n12545\n12546\n12547\n12548\n12549\n12550\n12551\n12552\n12553\n12554\n12555\n12556\n12557\n12558\n12559\n12560\n12561\n12562\n12563\n12564\n12565\n12566\n12567\n12568\n12569\n12570\n12571\n12572\n12573\n12574\n12575\n12576\n12577\n12578\n12579\n12580\n12581\n12582\n12583\n12584\n12585\n12586\n12587\n12588\n12589\n12590\n12591\n12592\n12593\n12594\n12595\n12596\n12597\n12598\n12599\n12600\n12601\n12602\n12603\n12604\n12605\n12606\n12607\n12608\n12609\n12610\n12611\n12612\n12613\n12614\n12615\n12616\n12617\n12618\n12619\n12620\n12621\n12622\n12623\n12624\n12625\n12626\n12627\n12628\n12629\n12630\n12631\n12632\n12633\n12634\n12635\n12636\n12637\n12638\n12639\n12640\n12641\n12642\n12643\n12644\n12645\n12646\n12647\n12648\n12649\n12650\n12651\n12652\n12653\n12654\n12655\n12656\n12657\n12658\n12659\n12660\n12661\n12662\n12663\n12664\n12665\n12666\n12667\n12668\n12669\n12670\n12671\n12672\n12673\n12674\n12675\n12676\n12677\n12678\n12679\n12680\n12681\n12682\n12683\n12684\n12685\n12686\n12687\n12688\n12689\n12690\n12691\n12692\n12693\n12694\n12695\n12696\n12697\n12698\n12699\n12700\n12701\n12702\n12703\n12704\n12705\n12706\n12707\n12708\n12709\n12710\n12711\n12712\n12713\n12714\n12715\n12716\n12717\n12718\n12719\n12720\n12721\n12722\n12723\n12724\n12725\n12726\n12727\n12728\n12729\n12730\n12731\n12732\n12733\n12734\n12735\n12736\n12737\n12738\n12739\n12740\n12741\n12742\n12743\n12744\n12745\n12746\n12747\n12748\n12749\n12750\n12751\n12752\n12753\n12754\n12755\n12756\n12757\n12758\n12759\n12760\n12761\n12762\n12763\n12764\n12765\n12766\n12767\n12768\n12769\n12770\n12771\n12772\n12773\n12774\n12775\n12776\n12777\n12778\n12779\n12780\n12781\n12782\n12783\n12784\n12785\n12786\n12787\n12788\n12789\n12790\n12791\n12792\n12793\n12794\n12795\n12796\n12797\n12798\n12799\n12800\n12801\n12802\n12803\n12804\n12805\n12806\n12807\n12808\n12809\n12810\n12811\n12812\n12813\n12814\n12815\n12816\n12817\n12818\n12819\n12820\n12821\n12822\n12823\n12824\n12825\n12826\n12827\n12828\n12829\n12830\n12831\n12832\n12833\n12834\n12835\n12836\n12837\n12838\n12839\n12840\n12841\n12842\n12843\n12844\n12845\n12846\n12847\n12848\n12849\n12850\n12851\n12852\n12853\n12854\n12855\n12856\n12857\n12858\n12859\n12860\n12861\n12862\n12863\n12864\n12865\n12866\n12867\n12868\n12869\n12870\n12871\n12872\n12873\n12874\n12875\n12876\n12877\n12878\n12879\n12880\n12881\n12882\n12883\n12884\n12885\n12886\n12887\n12888\n12889\n12890\n12891\n12892\n12893\n12894\n12895\n12896\n12897\n12898\n12899\n12900\n12901\n12902\n12903\n12904\n12905\n12906\n12907\n12908\n12909\n12910\n12911\n12912\n12913\n12914\n12915\n12916\n12917\n12918\n12919\n12920\n12921\n12922\n12923\n12924\n12925\n12926\n12927\n12928\n12929\n12930\n12931\n12932\n12933\n12934\n12935\n12936\n12937\n12938\n12939\n12940\n12941\n12942\n12943\n12944\n12945\n12946\n12947\n12948\n12949\n12950\n12951\n12952\n12953\n12954\n12955\n12956\n12957\n12958\n12959\n12960\n12961\n12962\n12963\n12964\n12965\n12966\n12967\n12968\n12969\n12970\n12971\n12972\n12973\n12974\n12975\n12976\n12977\n12978\n12979\n12980\n12981\n12982\n12983\n12984\n12985\n12986\n12987\n12988\n12989\n12990\n12991\n12992\n12993\n12994\n12995\n12996\n12997\n12998\n12999\n13000\n13001\n13002\n13003\n13004\n13005\n13006\n13007\n13008\n13009\n13010\n13011\n13012\n13013\n13014\n13015\n13016\n13017\n13018\n13019\n13020\n13021\n13022\n13023\n13024\n13025\n13026\n13027\n13028\n13029\n13030\n13031\n13032\n13033\n13034\n13035\n13036\n13037\n13038\n13039\n13040\n13041\n13042\n13043\n13044\n13045\n13046\n13047\n13048\n13049\n13050\n13051\n13052\n13053\n13054\n13055\n13056\n13057\n13058\n13059\n13060\n13061\n13062\n13063\n13064\n13065\n13066\n13067\n13068\n13069\n13070\n13071\n13072\n13073\n13074\n13075\n13076\n13077\n13078\n13079\n13080\n13081\n13082\n13083\n13084\n13085\n13086\n13087\n13088\n13089\n13090\n13091\n13092\n13093\n13094\n13095\n13096\n13097\n13098\n13099\n13100\n13101\n13102\n13103\n13104\n13105\n13106\n13107\n13108\n13109\n13110\n13111\n13112\n13113\n13114\n13115\n13116\n13117\n13118\n13119\n13120\n13121\n13122\n13123\n13124\n13125\n13126\n13127\n13128\n13129\n13130\n13131\n13132\n13133\n13134\n13135\n13136\n13137\n13138\n13139\n13140\n13141\n13142\n13143\n13144\n13145\n13146\n13147\n13148\n13149\n13150\n13151\n13152\n13153\n13154\n13155\n13156\n13157\n13158\n13159\n13160\n13161\n13162\n13163\n13164\n13165\n13166\n13167\n13168\n13169\n13170\n13171\n13172\n13173\n13174\n13175\n13176\n13177\n13178\n13179\n13180\n13181\n13182\n13183\n13184\n13185\n13186\n13187\n13188\n13189\n13190\n13191\n13192\n13193\n13194\n13195\n13196\n13197\n13198\n13199\n13200\n13201\n13202\n13203\n13204\n13205\n13206\n13207\n13208\n13209\n13210\n13211\n13212\n13213\n13214\n13215\n13216\n13217\n13218\n13219\n13220\n13221\n13222\n13223\n13224\n13225\n13226\n13227\n13228\n13229\n13230\n13231\n13232\n13233\n13234\n13235\n13236\n13237\n13238\n13239\n13240\n13241\n13242\n13243\n13244\n13245\n13246\n13247\n13248\n13249\n13250\n13251\n13252\n13253\n13254\n13255\n13256\n13257\n13258\n13259\n13260\n13261\n13262\n13263\n13264\n13265\n13266\n13267\n13268\n13269\n13270\n13271\n13272\n13273\n13274\n13275\n13276\n13277\n13278\n13279\n13280\n13281\n13282\n13283\n13284\n13285\n13286\n13287\n13288\n13289\n13290\n13291\n13292\n13293\n13294\n13295\n13296\n13297\n13298\n13299\n13300\n13301\n13302\n13303\n13304\n13305\n13306\n13307\n13308\n13309\n13310\n13311\n13312\n13313\n13314\n13315\n13316\n13317\n13318\n13319\n13320\n13321\n13322\n13323\n13324\n13325\n13326\n13327\n13328\n13329\n13330\n13331\n13332\n13333\n13334\n13335\n13336\n13337\n13338\n13339\n13340\n13341\n13342\n13343\n13344\n13345\n13346\n13347\n13348\n13349\n13350\n13351\n13352\n13353\n13354\n13355\n13356\n13357\n13358\n13359\n13360\n13361\n13362\n13363\n13364\n13365\n13366\n13367\n13368\n13369\n13370\n13371\n13372\n13373\n13374\n13375\n13376\n13377\n13378\n13379\n13380\n13381\n13382\n13383\n13384\n13385\n13386\n13387\n13388\n13389\n13390\n13391\n13392\n13393\n13394\n13395\n13396\n13397\n13398\n13399\n13400\n13401\n13402\n13403\n13404\n13405\n13406\n13407\n13408\n13409\n13410\n13411\n13412\n13413\n13414\n13415\n13416\n13417\n13418\n13419\n13420\n13421\n13422\n13423\n13424\n13425\n13426\n13427\n13428\n13429\n13430\n13431\n13432\n13433\n13434\n13435\n13436\n13437\n13438\n13439\n13440\n13441\n13442\n13443\n13444\n13445\n13446\n13447\n13448\n13449\n13450\n13451\n13452\n13453\n13454\n13455\n13456\n13457\n13458\n13459\n13460\n13461\n13462\n13463\n13464\n13465\n13466\n13467\n13468\n13469\n13470\n13471\n13472\n13473\n13474\n13475\n13476\n13477\n13478\n13479\n13480\n13481\n13482\n13483\n13484\n13485\n13486\n13487\n13488\n13489\n13490\n13491\n13492\n13493\n13494\n13495\n13496\n13497\n13498\n13499\n13500\n13501\n13502\n13503\n13504\n13505\n13506\n13507\n13508\n13509\n13510\n13511\n13512\n13513\n13514\n13515\n13516\n13517\n13518\n13519\n13520\n13521\n13522\n13523\n13524\n13525\n13526\n13527\n13528\n13529\n13530\n13531\n13532\n13533\n13534\n13535\n13536\n13537\n13538\n13539\n13540\n13541\n13542\n13543\n13544\n13545\n13546\n13547\n13548\n13549\n13550\n13551\n13552\n13553\n13554\n13555\n13556\n13557\n13558\n13559\n13560\n13561\n13562\n13563\n13564\n13565\n13566\n13567\n13568\n13569\n13570\n13571\n13572\n13573\n13574\n13575\n13576\n13577\n13578\n13579\n13580\n13581\n13582\n13583\n13584\n13585\n13586\n13587\n13588\n13589\n13590\n13591\n13592\n13593\n13594\n13595\n13596\n13597\n13598\n13599\n13600\n13601\n13602\n13603\n13604\n13605\n13606\n13607\n13608\n13609\n13610\n13611\n13612\n13613\n13614\n13615\n13616\n13617\n13618\n13619\n13620\n13621\n13622\n13623\n13624\n13625\n13626\n13627\n13628\n13629\n13630\n13631\n13632\n13633\n13634\n13635\n13636\n13637\n13638\n13639\n13640\n13641\n13642\n13643\n13644\n13645\n13646\n13647\n13648\n13649\n13650\n13651\n13652\n13653\n13654\n13655\n13656\n13657\n13658\n13659\n13660\n13661\n13662\n13663\n13664\n13665\n13666\n13667\n13668\n13669\n13670\n13671\n13672\n13673\n13674\n13675\n13676\n13677\n13678\n13679\n13680\n13681\n13682\n13683\n13684\n13685\n13686\n13687\n13688\n13689\n13690\n13691\n13692\n13693\n13694\n13695\n13696\n13697\n13698\n13699\n13700\n13701\n13702\n13703\n13704\n13705\n13706\n13707\n13708\n13709\n13710\n13711\n13712\n13713\n13714\n13715\n13716\n13717\n13718\n13719\n13720\n13721\n13722\n13723\n13724\n13725\n13726\n13727\n13728\n13729\n13730\n13731\n13732\n13733\n13734\n13735\n13736\n13737\n13738\n13739\n13740\n13741\n13742\n13743\n13744\n13745\n13746\n13747\n13748\n13749\n13750\n13751\n13752\n13753\n13754\n13755\n13756\n13757\n13758\n13759\n13760\n13761\n13762\n13763\n13764\n13765\n13766\n13767\n13768\n13769\n13770\n13771\n13772\n13773\n13774\n13775\n13776\n13777\n13778\n13779\n13780\n13781\n13782\n13783\n13784\n13785\n13786\n13787\n13788\n13789\n13790\n13791\n13792\n13793\n13794\n13795\n13796\n13797\n13798\n13799\n13800\n13801\n13802\n13803\n13804\n13805\n13806\n13807\n13808\n13809\n13810\n13811\n13812\n13813\n13814\n13815\n13816\n13817\n13818\n13819\n13820\n13821\n13822\n13823\n13824\n13825\n13826\n13827\n13828\n13829\n13830\n13831\n13832\n13833\n13834\n13835\n13836\n13837\n13838\n13839\n13840\n13841\n13842\n13843\n13844\n13845\n13846\n13847\n13848\n13849\n13850\n13851\n13852\n13853\n13854\n13855\n13856\n13857\n13858\n13859\n13860\n13861\n13862\n13863\n13864\n13865\n13866\n13867\n13868\n13869\n13870\n13871\n13872\n13873\n13874\n13875\n13876\n13877\n13878\n13879\n13880\n13881\n13882\n13883\n13884\n13885\n13886\n13887\n13888\n13889\n13890\n13891\n13892\n13893\n13894\n13895\n13896\n13897\n13898\n13899\n13900\n13901\n13902\n13903\n13904\n13905\n13906\n13907\n13908\n13909\n13910\n13911\n13912\n13913\n13914\n13915\n13916\n13917\n13918\n13919\n13920\n13921\n13922\n13923\n13924\n13925\n13926\n13927\n13928\n13929\n13930\n13931\n13932\n13933\n13934\n13935\n13936\n13937\n13938\n13939\n13940\n13941\n13942\n13943\n13944\n13945\n13946\n13947\n13948\n13949\n13950\n13951\n13952\n13953\n13954\n13955\n13956\n13957\n13958\n13959\n13960\n13961\n13962\n13963\n13964\n13965\n13966\n13967\n13968\n13969\n13970\n13971\n13972\n13973\n13974\n13975\n13976\n13977\n13978\n13979\n13980\n13981\n13982\n13983\n13984\n13985\n13986\n13987\n13988\n13989\n13990\n13991\n13992\n13993\n13994\n13995\n13996\n13997\n13998\n13999\n14000\n14001\n14002\n14003\n14004\n14005\n14006\n14007\n14008\n14009\n14010\n14011\n14012\n14013\n14014\n14015\n14016\n14017\n14018\n14019\n14020\n14021\n14022\n14023\n14024\n14025\n14026\n14027\n14028\n14029\n14030\n14031\n14032\n14033\n14034\n14035\n14036\n14037\n14038\n14039\n14040\n14041\n14042\n14043\n14044\n14045\n14046\n14047\n14048\n14049\n14050\n14051\n14052\n14053\n14054\n14055\n14056\n14057\n14058\n14059\n14060\n14061\n14062\n14063\n14064\n14065\n14066\n14067\n14068\n14069\n14070\n14071\n14072\n14073\n14074\n14075\n14076\n14077\n14078\n14079\n14080\n14081\n14082\n14083\n14084\n14085\n14086\n14087\n14088\n14089\n14090\n14091\n14092\n14093\n14094\n14095\n14096\n14097\n14098\n14099\n14100\n14101\n14102\n14103\n14104\n14105\n14106\n14107\n14108\n14109\n14110\n14111\n14112\n14113\n14114\n14115\n14116\n14117\n14118\n14119\n14120\n14121\n14122\n14123\n14124\n14125\n14126\n14127\n14128\n14129\n14130\n14131\n14132\n14133\n14134\n14135\n14136\n14137\n14138\n14139\n14140\n14141\n14142\n14143\n14144\n14145\n14146\n14147\n14148\n14149\n14150\n14151\n14152\n14153\n14154\n14155\n14156\n14157\n14158\n14159\n14160\n14161\n14162\n14163\n14164\n14165\n14166\n14167\n14168\n14169\n14170\n14171\n14172\n14173\n14174\n14175\n14176\n14177\n14178\n14179\n14180\n14181\n14182\n14183\n14184\n14185\n14186\n14187\n14188\n14189\n14190\n14191\n14192\n14193\n14194\n14195\n14196\n14197\n14198\n14199\n14200\n14201\n14202\n14203\n14204\n14205\n14206\n14207\n14208\n14209\n14210\n14211\n14212\n14213\n14214\n14215\n14216\n14217\n14218\n14219\n14220\n14221\n14222\n14223\n14224\n14225\n14226\n14227\n14228\n14229\n14230\n14231\n14232\n14233\n14234\n14235\n14236\n14237\n14238\n14239\n14240\n14241\n14242\n14243\n14244\n14245\n14246\n14247\n14248\n14249\n14250\n14251\n14252\n14253\n14254\n14255\n14256\n14257\n14258\n14259\n14260\n14261\n14262\n14263\n14264\n14265\n14266\n14267\n14268\n14269\n14270\n14271\n14272\n14273\n14274\n14275\n14276\n14277\n14278\n14279\n14280\n14281\n14282\n14283\n14284\n14285\n14286\n14287\n14288\n14289\n14290\n14291\n14292\n14293\n14294\n14295\n14296\n14297\n14298\n14299\n14300\n14301\n14302\n14303\n14304\n14305\n14306\n14307\n14308\n14309\n14310\n14311\n14312\n14313\n14314\n14315\n14316\n14317\n14318\n14319\n14320\n14321\n14322\n14323\n14324\n14325\n14326\n14327\n14328\n14329\n14330\n14331\n14332\n14333\n14334\n14335\n14336\n14337\n14338\n14339\n14340\n14341\n14342\n14343\n14344\n14345\n14346\n14347\n14348\n14349\n14350\n14351\n14352\n14353\n14354\n14355\n14356\n14357\n14358\n14359\n14360\n14361\n14362\n14363\n14364\n14365\n14366\n14367\n14368\n14369\n14370\n14371\n14372\n14373\n14374\n14375\n14376\n14377\n14378\n14379\n14380\n14381\n14382\n14383\n14384\n14385\n14386\n14387\n14388\n14389\n14390\n14391\n14392\n14393\n14394\n14395\n14396\n14397\n14398\n14399\n14400\n14401\n14402\n14403\n14404\n14405\n14406\n14407\n14408\n14409\n14410\n14411\n14412\n14413\n14414\n14415\n14416\n14417\n14418\n14419\n14420\n14421\n14422\n14423\n14424\n14425\n14426\n14427\n14428\n14429\n14430\n14431\n14432\n14433\n14434\n14435\n14436\n14437\n14438\n14439\n14440\n14441\n14442\n14443\n14444\n14445\n14446\n14447\n14448\n14449\n14450\n14451\n14452\n14453\n14454\n14455\n14456\n14457\n14458\n14459\n14460\n14461\n14462\n14463\n14464\n14465\n14466\n14467\n14468\n14469\n14470\n14471\n14472\n14473\n14474\n14475\n14476\n14477\n14478\n14479\n14480\n14481\n14482\n14483\n14484\n14485\n14486\n14487\n14488\n14489\n14490\n14491\n14492\n14493\n14494\n14495\n14496\n14497\n14498\n14499\n14500\n14501\n14502\n14503\n14504\n14505\n14506\n14507\n14508\n14509\n14510\n14511\n14512\n14513\n14514\n14515\n14516\n14517\n14518\n14519\n14520\n14521\n14522\n14523\n14524\n14525\n14526\n14527\n14528\n14529\n14530\n14531\n14532\n14533\n14534\n14535\n14536\n14537\n14538\n14539\n14540\n14541\n14542\n14543\n14544\n14545\n14546\n14547\n14548\n14549\n14550\n14551\n14552\n14553\n14554\n14555\n14556\n14557\n14558\n14559\n14560\n14561\n14562\n14563\n14564\n14565\n14566\n14567\n14568\n14569\n14570\n14571\n14572\n14573\n14574\n14575\n14576\n14577\n14578\n14579\n14580\n14581\n14582\n14583\n14584\n14585\n14586\n14587\n14588\n14589\n14590\n14591\n14592\n14593\n14594\n14595\n14596\n14597\n14598\n14599\n14600\n14601\n14602\n14603\n14604\n14605\n14606\n14607\n14608\n14609\n14610\n14611\n14612\n14613\n14614\n14615\n14616\n14617\n14618\n14619\n14620\n14621\n14622\n14623\n14624\n14625\n14626\n14627\n14628\n14629\n14630\n14631\n14632\n14633\n14634\n14635\n14636\n14637\n14638\n14639\n14640\n14641\n14642\n14643\n14644\n14645\n14646\n14647\n14648\n14649\n14650\n14651\n14652\n14653\n14654\n14655\n14656\n14657\n14658\n14659\n14660\n14661\n14662\n14663\n14664\n14665\n14666\n14667\n14668\n14669\n14670\n14671\n14672\n14673\n14674\n14675\n14676\n14677\n14678\n14679\n14680\n14681\n14682\n14683\n14684\n14685\n14686\n14687\n14688\n14689\n14690\n14691\n14692\n14693\n14694\n14695\n14696\n14697\n14698\n14699\n14700\n14701\n14702\n14703\n14704\n14705\n14706\n14707\n14708\n14709\n14710\n14711\n14712\n14713\n14714\n14715\n14716\n14717\n14718\n14719\n14720\n14721\n14722\n14723\n14724\n14725\n14726\n14727\n14728\n14729\n14730\n14731\n14732\n14733\n14734\n14735\n14736\n14737\n14738\n14739\n14740\n14741\n14742\n14743\n14744\n14745\n14746\n14747\n14748\n14749\n14750\n14751\n14752\n14753\n14754\n14755\n14756\n14757\n14758\n14759\n14760\n14761\n14762\n14763\n14764\n14765\n14766\n14767\n14768\n14769\n14770\n14771\n14772\n14773\n14774\n14775\n14776\n14777\n14778\n14779\n14780\n14781\n14782\n14783\n14784\n14785\n14786\n14787\n14788\n14789\n14790\n14791\n14792\n14793\n14794\n14795\n14796\n14797\n14798\n14799\n14800\n14801\n14802\n14803\n14804\n14805\n14806\n14807\n14808\n14809\n14810\n14811\n14812\n14813\n14814\n14815\n14816\n14817\n14818\n14819\n14820\n14821\n14822\n14823\n14824\n14825\n14826\n14827\n14828\n14829\n14830\n14831\n14832\n14833\n14834\n14835\n14836\n14837\n14838\n14839\n14840\n14841\n14842\n14843\n14844\n14845\n14846\n14847\n14848\n14849\n14850\n14851\n14852\n14853\n14854\n14855\n14856\n14857\n14858\n14859\n14860\n14861\n14862\n14863\n14864\n14865\n14866\n14867\n14868\n14869\n14870\n14871\n14872\n14873\n14874\n14875\n14876\n14877\n14878\n14879\n14880\n14881\n14882\n14883\n14884\n14885\n14886\n14887\n14888\n14889\n14890\n14891\n14892\n14893\n14894\n14895\n14896\n14897\n14898\n14899\n14900\n14901\n14902\n14903\n14904\n14905\n14906\n14907\n14908\n14909\n14910\n14911\n14912\n14913\n14914\n14915\n14916\n14917\n14918\n14919\n14920\n14921\n14922\n14923\n14924\n14925\n14926\n14927\n14928\n14929\n14930\n14931\n14932\n14933\n14934\n14935\n14936\n14937\n14938\n14939\n14940\n14941\n14942\n14943\n14944\n14945\n14946\n14947\n14948\n14949\n14950\n14951\n14952\n14953\n14954\n14955\n14956\n14957\n14958\n14959\n14960\n14961\n14962\n14963\n14964\n14965\n14966\n14967\n14968\n14969\n14970\n14971\n14972\n14973\n14974\n14975\n14976\n14977\n14978\n14979\n14980\n14981\n14982\n14983\n14984\n14985\n14986\n14987\n14988\n14989\n14990\n14991\n14992\n14993\n14994\n14995\n14996\n14997\n14998\n14999\n15000\n15001\n15002\n15003\n15004\n15005\n15006\n15007\n15008\n15009\n15010\n15011\n15012\n15013\n15014\n15015\n15016\n15017\n15018\n15019\n15020\n15021\n15022\n15023\n15024\n15025\n15026\n15027\n15028\n15029\n15030\n15031\n15032\n15033\n15034\n15035\n15036\n15037\n15038\n15039\n15040\n15041\n15042\n15043\n15044\n15045\n15046\n15047\n15048\n15049\n15050\n15051\n15052\n15053\n15054\n15055\n15056\n15057\n15058\n15059\n15060\n15061\n15062\n15063\n15064\n15065\n15066\n15067\n15068\n15069\n15070\n15071\n15072\n15073\n15074\n15075\n15076\n15077\n15078\n15079\n15080\n15081\n15082\n15083\n15084\n15085\n15086\n15087\n15088\n15089\n15090\n15091\n15092\n15093\n15094\n15095\n15096\n15097\n15098\n15099\n15100\n15101\n15102\n15103\n15104\n15105\n15106\n15107\n15108\n15109\n15110\n15111\n15112\n15113\n15114\n15115\n15116\n15117\n15118\n15119\n15120\n15121\n15122\n15123\n15124\n15125\n15126\n15127\n15128\n15129\n15130\n15131\n15132\n15133\n15134\n15135\n15136\n15137\n15138\n15139\n15140\n15141\n15142\n15143\n15144\n15145\n15146\n15147\n15148\n15149\n15150\n15151\n15152\n15153\n15154\n15155\n15156\n15157\n15158\n15159\n15160\n15161\n15162\n15163\n15164\n15165\n15166\n15167\n15168\n15169\n15170\n15171\n15172\n15173\n15174\n15175\n15176\n15177\n15178\n15179\n15180\n15181\n15182\n15183\n15184\n15185\n15186\n15187\n15188\n15189\n15190\n15191\n15192\n15193\n15194\n15195\n15196\n15197\n15198\n15199\n15200\n15201\n15202\n15203\n15204\n15205\n15206\n15207\n15208\n15209\n15210\n15211\n15212\n15213\n15214\n15215\n15216\n15217\n15218\n15219\n15220\n15221\n15222\n15223\n15224\n15225\n15226\n15227\n15228\n15229\n15230\n15231\n15232\n15233\n15234\n15235\n15236\n15237\n15238\n15239\n15240\n15241\n15242\n15243\n15244\n15245\n15246\n15247\n15248\n15249\n15250\n15251\n15252\n15253\n15254\n15255\n15256\n15257\n15258\n15259\n15260\n15261\n15262\n15263\n15264\n15265\n15266\n15267\n15268\n15269\n15270\n15271\n15272\n15273\n15274\n15275\n15276\n15277\n15278\n15279\n15280\n15281\n15282\n15283\n15284\n15285\n15286\n15287\n15288\n15289\n15290\n15291\n15292\n15293\n15294\n15295\n15296\n15297\n15298\n15299\n15300\n15301\n15302\n15303\n15304\n15305\n15306\n15307\n15308\n15309\n15310\n15311\n15312\n15313\n15314\n15315\n15316\n15317\n15318\n15319\n15320\n15321\n15322\n15323\n15324\n15325\n15326\n15327\n15328\n15329\n15330\n15331\n15332\n15333\n15334\n15335\n15336\n15337\n15338\n15339\n15340\n15341\n15342\n15343\n15344\n15345\n15346\n15347\n15348\n15349\n15350\n15351\n15352\n15353\n15354\n15355\n15356\n15357\n15358\n15359\n15360\n15361\n15362\n15363\n15364\n15365\n15366\n15367\n15368\n15369\n15370\n15371\n15372\n15373\n15374\n15375\n15376\n15377\n15378\n15379\n15380\n15381\n15382\n15383\n15384\n15385\n15386\n15387\n15388\n15389\n15390\n15391\n15392\n15393\n15394\n15395\n15396\n15397\n15398\n15399\n15400\n15401\n15402\n15403\n15404\n15405\n15406\n15407\n15408\n15409\n15410\n15411\n15412\n15413\n15414\n15415\n15416\n15417\n15418\n15419\n15420\n15421\n15422\n15423\n15424\n15425\n15426\n15427\n15428\n15429\n15430\n15431\n15432\n15433\n15434\n15435\n15436\n15437\n15438\n15439\n15440\n15441\n15442\n15443\n15444\n15445\n15446\n15447\n15448\n15449\n15450\n15451\n15452\n15453\n15454\n15455\n15456\n15457\n15458\n15459\n15460\n15461\n15462\n15463\n15464\n15465\n15466\n15467\n15468\n15469\n15470\n15471\n15472\n15473\n15474\n15475\n15476\n15477\n15478\n15479\n15480\n15481\n15482\n15483\n15484\n15485\n15486\n15487\n15488\n15489\n15490\n15491\n15492\n15493\n15494\n15495\n15496\n15497\n15498\n15499\n15500\n15501\n15502\n15503\n15504\n15505\n15506\n15507\n15508\n15509\n15510\n15511\n15512\n15513\n15514\n15515\n15516\n15517\n15518\n15519\n15520\n15521\n15522\n15523\n15524\n15525\n15526\n15527\n15528\n15529\n15530\n15531\n15532\n15533\n15534\n15535\n15536\n15537\n15538\n15539\n15540\n15541\n15542\n15543\n15544\n15545\n15546\n15547\n15548\n15549\n15550\n15551\n15552\n15553\n15554\n15555\n15556\n15557\n15558\n15559\n15560\n15561\n15562\n15563\n15564\n15565\n15566\n15567\n15568\n15569\n15570\n15571\n15572\n15573\n15574\n15575\n15576\n15577\n15578\n15579\n15580\n15581\n15582\n15583\n15584\n15585\n15586\n15587\n15588\n15589\n15590\n15591\n15592\n15593\n15594\n15595\n15596\n15597\n15598\n15599\n15600\n15601\n15602\n15603\n15604\n15605\n15606\n15607\n15608\n15609\n15610\n15611\n15612\n15613\n15614\n15615\n15616\n15617\n15618\n15619\n15620\n15621\n15622\n15623\n15624\n15625\n15626\n15627\n15628\n15629\n15630\n15631\n15632\n15633\n15634\n15635\n15636\n15637\n15638\n15639\n15640\n15641\n15642\n15643\n15644\n15645\n15646\n15647\n15648\n15649\n15650\n15651\n15652\n15653\n15654\n15655\n15656\n15657\n15658\n15659\n15660\n15661\n15662\n15663\n15664\n15665\n15666\n15667\n15668\n15669\n15670\n15671\n15672\n15673\n15674\n15675\n15676\n15677\n15678\n15679\n15680\n15681\n15682\n15683\n15684\n15685\n15686\n15687\n15688\n15689\n15690\n15691\n15692\n15693\n15694\n15695\n15696\n15697\n15698\n15699\n15700\n15701\n15702\n15703\n15704\n15705\n15706\n15707\n15708\n15709\n15710\n15711\n15712\n15713\n15714\n15715\n15716\n15717\n15718\n15719\n15720\n15721\n15722\n15723\n15724\n15725\n15726\n15727\n15728\n15729\n15730\n15731\n15732\n15733\n15734\n15735\n15736\n15737\n15738\n15739\n15740\n15741\n15742\n15743\n15744\n15745\n15746\n15747\n15748\n15749\n15750\n15751\n15752\n15753\n15754\n15755\n15756\n15757\n15758\n15759\n15760\n15761\n15762\n15763\n15764\n15765\n15766\n15767\n15768\n15769\n15770\n15771\n15772\n15773\n15774\n15775\n15776\n15777\n15778\n15779\n15780\n15781\n15782\n15783\n15784\n15785\n15786\n15787\n15788\n15789\n15790\n15791\n15792\n15793\n15794\n15795\n15796\n15797\n15798\n15799\n15800\n15801\n15802\n15803\n15804\n15805\n15806\n15807\n15808\n15809\n15810\n15811\n15812\n15813\n15814\n15815\n15816\n15817\n15818\n15819\n15820\n15821\n15822\n15823\n15824\n15825\n15826\n15827\n15828\n15829\n15830\n15831\n15832\n15833\n15834\n15835\n15836\n15837\n15838\n15839\n15840\n15841\n15842\n15843\n15844\n15845\n15846\n15847\n15848\n15849\n15850\n15851\n15852\n15853\n15854\n15855\n15856\n15857\n15858\n15859\n15860\n15861\n15862\n15863\n15864\n15865\n15866\n15867\n15868\n15869\n15870\n15871\n15872\n15873\n15874\n15875\n15876\n15877\n15878\n15879\n15880\n15881\n15882\n15883\n15884\n15885\n15886\n15887\n15888\n15889\n15890\n15891\n15892\n15893\n15894\n15895\n15896\n15897\n15898\n15899\n15900\n15901\n15902\n15903\n15904\n15905\n15906\n15907\n15908\n15909\n15910\n15911\n15912\n15913\n15914\n15915\n15916\n15917\n15918\n15919\n15920\n15921\n15922\n15923\n15924\n15925\n15926\n15927\n15928\n15929\n15930\n15931\n15932\n15933\n15934\n15935\n15936\n15937\n15938\n15939\n15940\n15941\n15942\n15943\n15944\n15945\n15946\n15947\n15948\n15949\n15950\n15951\n15952\n15953\n15954\n15955\n15956\n15957\n15958\n15959\n15960\n15961\n15962\n15963\n15964\n15965\n15966\n15967\n15968\n15969\n15970\n15971\n15972\n15973\n15974\n15975\n15976\n15977\n15978\n15979\n15980\n15981\n15982\n15983\n15984\n15985\n15986\n15987\n15988\n15989\n15990\n15991\n15992\n15993\n15994\n15995\n15996\n15997\n15998\n15999\n16000\n16001\n16002\n16003\n16004\n16005\n16006\n16007\n16008\n16009\n16010\n16011\n16012\n16013\n16014\n16015\n16016\n16017\n16018\n16019\n16020\n16021\n16022\n16023\n16024\n16025\n16026\n16027\n16028\n16029\n16030\n16031\n16032\n16033\n16034\n16035\n16036\n16037\n16038\n16039\n16040\n16041\n16042\n16043\n16044\n16045\n16046\n16047\n16048\n16049\n16050\n16051\n16052\n16053\n16054\n16055\n16056\n16057\n16058\n16059\n16060\n16061\n16062\n16063\n16064\n16065\n16066\n16067\n16068\n16069\n16070\n16071\n16072\n16073\n16074\n16075\n16076\n16077\n16078\n16079\n16080\n16081\n16082\n16083\n16084\n16085\n16086\n16087\n16088\n16089\n16090\n16091\n16092\n16093\n16094\n16095\n16096\n16097\n16098\n16099\n16100\n16101\n16102\n16103\n16104\n16105\n16106\n16107\n16108\n16109\n16110\n16111\n16112\n16113\n16114\n16115\n16116\n16117\n16118\n16119\n16120\n16121\n16122\n16123\n16124\n16125\n16126\n16127\n16128\n16129\n16130\n16131\n16132\n16133\n16134\n16135\n16136\n16137\n16138\n16139\n16140\n16141\n16142\n16143\n16144\n16145\n16146\n16147\n16148\n16149\n16150\n16151\n16152\n16153\n16154\n16155\n16156\n16157\n16158\n16159\n16160\n16161\n16162\n16163\n16164\n16165\n16166\n16167\n16168\n16169\n16170\n16171\n16172\n16173\n16174\n16175\n16176\n16177\n16178\n16179\n16180\n16181\n16182\n16183\n16184\n16185\n16186\n16187\n16188\n16189\n16190\n16191\n16192\n16193\n16194\n16195\n16196\n16197\n16198\n16199\n16200\n16201\n16202\n16203\n16204\n16205\n16206\n16207\n16208\n16209\n16210\n16211\n16212\n16213\n16214\n16215\n16216\n16217\n16218\n16219\n16220\n16221\n16222\n16223\n16224\n16225\n16226\n16227\n16228\n16229\n16230\n16231\n16232\n16233\n16234\n16235\n16236\n16237\n16238\n16239\n16240\n16241\n16242\n16243\n16244\n16245\n16246\n16247\n16248\n16249\n16250\n16251\n16252\n16253\n16254\n16255\n16256\n16257\n16258\n16259\n16260\n16261\n16262\n16263\n16264\n16265\n16266\n16267\n16268\n16269\n16270\n16271\n16272\n16273\n16274\n16275\n16276\n16277\n16278\n16279\n16280\n16281\n16282\n16283\n16284\n16285\n16286\n16287\n16288\n16289\n16290\n16291\n16292\n16293\n16294\n16295\n16296\n16297\n16298\n16299\n16300\n16301\n16302\n16303\n16304\n16305\n16306\n16307\n16308\n16309\n16310\n16311\n16312\n16313\n16314\n16315\n16316\n16317\n16318\n16319\n16320\n16321\n16322\n16323\n16324\n16325\n16326\n16327\n16328\n16329\n16330\n16331\n16332\n16333\n16334\n16335\n16336\n16337\n16338\n16339\n16340\n16341\n16342\n16343\n16344\n16345\n16346\n16347\n16348\n16349\n16350\n16351\n16352\n16353\n16354\n16355\n16356\n16357\n16358\n16359\n16360\n16361\n16362\n16363\n16364\n16365\n16366\n16367\n16368\n16369\n16370\n16371\n16372\n16373\n16374\n16375\n16376\n16377\n16378\n16379\n16380\n16381\n16382\n16383\n16384\n16385\n16386\n16387\n16388\n16389\n16390\n16391\n16392\n16393\n16394\n16395\n16396\n16397\n16398\n16399\n16400\n16401\n16402\n16403\n16404\n16405\n16406\n16407\n16408\n16409\n16410\n16411\n16412\n16413\n16414\n16415\n16416\n16417\n16418\n16419\n16420\n16421\n16422\n16423\n16424\n16425\n16426\n16427\n16428\n16429\n16430\n16431\n16432\n16433\n16434\n16435\n16436\n16437\n16438\n16439\n16440\n16441\n16442\n16443\n16444\n16445\n16446\n16447\n16448\n16449\n16450\n16451\n16452\n16453\n16454\n16455\n16456\n16457\n16458\n16459\n16460\n16461\n16462\n16463\n16464\n16465\n16466\n16467\n16468\n16469\n16470\n16471\n16472\n16473\n16474\n16475\n16476\n16477\n16478\n16479\n16480\n16481\n16482\n16483\n16484\n16485\n16486\n16487\n16488\n16489\n16490\n16491\n16492\n16493\n16494\n16495\n16496\n16497\n16498\n16499\n16500\n16501\n16502\n16503\n16504\n16505\n16506\n16507\n16508\n16509\n16510\n16511\n16512\n16513\n16514\n16515\n16516\n16517\n16518\n16519\n16520\n16521\n16522\n16523\n16524\n16525\n16526\n16527\n16528\n16529\n16530\n16531\n16532\n16533\n16534\n16535\n16536\n16537\n16538\n16539\n16540\n16541\n16542\n16543\n16544\n16545\n16546\n16547\n16548\n16549\n16550\n16551\n16552\n16553\n16554\n16555\n16556\n16557\n16558\n16559\n16560\n16561\n16562\n16563\n16564\n16565\n16566\n16567\n16568\n16569\n16570\n16571\n16572\n16573\n16574\n16575\n16576\n16577\n16578\n16579\n16580\n16581\n16582\n16583\n16584\n16585\n16586\n16587\n16588\n16589\n16590\n16591\n16592\n16593\n16594\n16595\n16596\n16597\n16598\n16599\n16600\n16601\n16602\n16603\n16604\n16605\n16606\n16607\n16608\n16609\n16610\n16611\n16612\n16613\n16614\n16615\n16616\n16617\n16618\n16619\n16620\n16621\n16622\n16623\n16624\n16625\n16626\n16627\n16628\n16629\n16630\n16631\n16632\n16633\n16634\n16635\n16636\n16637\n16638\n16639\n16640\n16641\n16642\n16643\n16644\n16645\n16646\n16647\n16648\n16649\n16650\n16651\n16652\n16653\n16654\n16655\n16656\n16657\n16658\n16659\n16660\n16661\n16662\n16663\n16664\n16665\n16666\n16667\n16668\n16669\n16670\n16671\n16672\n16673\n16674\n16675\n16676\n16677\n16678\n16679\n16680\n16681\n16682\n16683\n16684\n16685\n16686\n16687\n16688\n16689\n16690\n16691\n16692\n16693\n16694\n16695\n16696\n16697\n16698\n16699\n16700\n16701\n16702\n16703\n16704\n16705\n16706\n16707\n16708\n16709\n16710\n16711\n16712\n16713\n16714\n16715\n16716\n16717\n16718\n16719\n16720\n16721\n16722\n16723\n16724\n16725\n16726\n16727\n16728\n16729\n16730\n16731\n16732\n16733\n16734\n16735\n16736\n16737\n16738\n16739\n16740\n16741\n16742\n16743\n16744\n16745\n16746\n16747\n16748\n16749\n16750\n16751\n16752\n16753\n16754\n16755\n16756\n16757\n16758\n16759\n16760\n16761\n16762\n16763\n16764\n16765\n16766\n16767\n16768\n16769\n16770\n16771\n16772\n16773\n16774\n16775\n16776\n16777\n16778\n16779\n16780\n16781\n16782\n16783\n16784\n16785\n16786\n16787\n16788\n16789\n16790\n16791\n16792\n16793\n16794\n16795\n16796\n16797\n16798\n16799\n16800\n16801\n16802\n16803\n16804\n16805\n16806\n16807\n16808\n16809\n16810\n16811\n16812\n16813\n16814\n16815\n16816\n16817\n16818\n16819\n16820\n16821\n16822\n16823\n16824\n16825\n16826\n16827\n16828\n16829\n16830\n16831\n16832\n16833\n16834\n16835\n16836\n16837\n16838\n16839\n16840\n16841\n16842\n16843\n16844\n16845\n16846\n16847\n16848\n16849\n16850\n16851\n16852\n16853\n16854\n16855\n16856\n16857\n16858\n16859\n16860\n16861\n16862\n16863\n16864\n16865\n16866\n16867\n16868\n16869\n16870\n16871\n16872\n16873\n16874\n16875\n16876\n16877\n16878\n16879\n16880\n16881\n16882\n16883\n16884\n16885\n16886\n16887\n16888\n16889\n16890\n16891\n16892\n16893\n16894\n16895\n16896\n16897\n16898\n16899\n16900\n16901\n16902\n16903\n16904\n16905\n16906\n16907\n16908\n16909\n16910\n16911\n16912\n16913\n16914\n16915\n16916\n16917\n16918\n16919\n16920\n16921\n16922\n16923\n16924\n16925\n16926\n16927\n16928\n16929\n16930\n16931\n16932\n16933\n16934\n16935\n16936\n16937\n16938\n16939\n16940\n16941\n16942\n16943\n16944\n16945\n16946\n16947\n16948\n16949\n16950\n16951\n16952\n16953\n16954\n16955\n16956\n16957\n16958\n16959\n16960\n16961\n16962\n16963\n16964\n16965\n16966\n16967\n16968\n16969\n16970\n16971\n16972\n16973\n16974\n16975\n16976\n16977\n16978\n16979\n16980\n16981\n16982\n16983\n16984\n16985\n16986\n16987\n16988\n16989\n16990\n16991\n16992\n16993\n16994\n16995\n16996\n16997\n16998\n16999\n17000\n17001\n17002\n17003\n17004\n17005\n17006\n17007\n17008\n17009\n17010\n17011\n17012\n17013\n17014\n17015\n17016\n17017\n17018\n17019\n17020\n17021\n17022\n17023\n17024\n17025\n17026\n17027\n17028\n17029\n17030\n17031\n17032\n17033\n17034\n17035\n17036\n17037\n17038\n17039\n17040\n17041\n17042\n17043\n17044\n17045\n17046\n17047\n17048\n17049\n17050\n17051\n17052\n17053\n17054\n17055\n17056\n17057\n17058\n17059\n17060\n17061\n17062\n17063\n17064\n17065\n17066\n17067\n17068\n17069\n17070\n17071\n17072\n17073\n17074\n17075\n17076\n17077\n17078\n17079\n17080\n17081\n17082\n17083\n17084\n17085\n17086\n17087\n17088\n17089\n17090\n17091\n17092\n17093\n17094\n17095\n17096\n17097\n17098\n17099\n17100\n17101\n17102\n17103\n17104\n17105\n17106\n17107\n17108\n17109\n17110\n17111\n17112\n17113\n17114\n17115\n17116\n17117\n17118\n17119\n17120\n17121\n17122\n17123\n17124\n17125\n17126\n17127\n17128\n17129\n17130\n17131\n17132\n17133\n17134\n17135\n17136\n17137\n17138\n17139\n17140\n17141\n17142\n17143\n17144\n17145\n17146\n17147\n17148\n17149\n17150\n17151\n17152\n17153\n17154\n17155\n17156\n17157\n17158\n17159\n17160\n17161\n17162\n17163\n17164\n17165\n17166\n17167\n17168\n17169\n17170\n17171\n17172\n17173\n17174\n17175\n17176\n17177\n17178\n17179\n17180\n17181\n17182\n17183\n17184\n17185\n17186\n17187\n17188\n17189\n17190\n17191\n17192\n17193\n17194\n17195\n17196\n17197\n17198\n17199\n17200\n17201\n17202\n17203\n17204\n17205\n17206\n17207\n17208\n17209\n17210\n17211\n17212\n17213\n17214\n17215\n17216\n17217\n17218\n17219\n17220\n17221\n17222\n17223\n17224\n17225\n17226\n17227\n17228\n17229\n17230\n17231\n17232\n17233\n17234\n17235\n17236\n17237\n17238\n17239\n17240\n17241\n17242\n17243\n17244\n17245\n17246\n17247\n17248\n17249\n17250\n17251\n17252\n17253\n17254\n17255\n17256\n17257\n17258\n17259\n17260\n17261\n17262\n17263\n17264\n17265\n17266\n17267\n17268\n17269\n17270\n17271\n17272\n17273\n17274\n17275\n17276\n17277\n17278\n17279\n17280\n17281\n17282\n17283\n17284\n17285\n17286\n17287\n17288\n17289\n17290\n17291\n17292\n17293\n17294\n17295\n17296\n17297\n17298\n17299\n17300\n17301\n17302\n17303\n17304\n17305\n17306\n17307\n17308\n17309\n17310\n17311\n17312\n17313\n17314\n17315\n17316\n17317\n17318\n17319\n17320\n17321\n17322\n17323\n17324\n17325\n17326\n17327\n17328\n17329\n17330\n17331\n17332\n17333\n17334\n17335\n17336\n17337\n17338\n17339\n17340\n17341\n17342\n17343\n17344\n17345\n17346\n17347\n17348\n17349\n17350\n17351\n17352\n17353\n17354\n17355\n17356\n17357\n17358\n17359\n17360\n17361\n17362\n17363\n17364\n17365\n17366\n17367\n17368\n17369\n17370\n17371\n17372\n17373\n17374\n17375\n17376\n17377\n17378\n17379\n17380\n17381\n17382\n17383\n17384\n17385\n17386\n17387\n17388\n17389\n17390\n17391\n17392\n17393\n17394\n17395\n17396\n17397\n17398\n17399\n17400\n17401\n17402\n17403\n17404\n17405\n17406\n17407\n17408\n17409\n17410\n17411\n17412\n17413\n17414\n17415\n17416\n17417\n17418\n17419\n17420\n17421\n17422\n17423\n17424\n17425\n17426\n17427\n17428\n17429\n17430\n17431\n17432\n17433\n17434\n17435\n17436\n17437\n17438\n17439\n17440\n17441\n17442\n17443\n17444\n17445\n17446\n17447\n17448\n17449\n17450\n17451\n17452\n17453\n17454\n17455\n17456\n17457\n17458\n17459\n17460\n17461\n17462\n17463\n17464\n17465\n17466\n17467\n17468\n17469\n17470\n17471\n17472\n17473\n17474\n17475\n17476\n17477\n17478\n17479\n17480\n17481\n17482\n17483\n17484\n17485\n17486\n17487\n17488\n17489\n17490\n17491\n17492\n17493\n17494\n17495\n17496\n17497\n17498\n17499\n17500\n17501\n17502\n17503\n17504\n17505\n17506\n17507\n17508\n17509\n17510\n17511\n17512\n17513\n17514\n17515\n17516\n17517\n17518\n17519\n17520\n17521\n17522\n17523\n17524\n17525\n17526\n17527\n17528\n17529\n17530\n17531\n17532\n17533\n17534\n17535\n17536\n17537\n17538\n17539\n17540\n17541\n17542\n17543\n17544\n17545\n17546\n17547\n17548\n17549\n17550\n17551\n17552\n17553\n17554\n17555\n17556\n17557\n17558\n17559\n17560\n17561\n17562\n17563\n17564\n17565\n17566\n17567\n17568\n17569\n17570\n17571\n17572\n17573\n17574\n17575\n17576\n17577\n17578\n17579\n17580\n17581\n17582\n17583\n17584\n17585\n17586\n17587\n17588\n17589\n17590\n17591\n17592\n17593\n17594\n17595\n17596\n17597\n17598\n17599\n17600\n17601\n17602\n17603\n17604\n17605\n17606\n17607\n17608\n17609\n17610\n17611\n17612\n17613\n17614\n17615\n17616\n17617\n17618\n17619\n17620\n17621\n17622\n17623\n17624\n17625\n17626\n17627\n17628\n17629\n17630\n17631\n17632\n17633\n17634\n17635\n17636\n17637\n17638\n17639\n17640\n17641\n17642\n17643\n17644\n17645\n17646\n17647\n17648\n17649\n17650\n17651\n17652\n17653\n17654\n17655\n17656\n17657\n17658\n17659\n17660\n17661\n17662\n17663\n17664\n17665\n17666\n17667\n17668\n17669\n17670\n17671\n17672\n17673\n17674\n17675\n17676\n17677\n17678\n17679\n17680\n17681\n17682\n17683\n17684\n17685\n17686\n17687\n17688\n17689\n17690\n17691\n17692\n17693\n17694\n17695\n17696\n17697\n17698\n17699\n17700\n17701\n17702\n17703\n17704\n17705\n17706\n17707\n17708\n17709\n17710\n17711\n17712\n17713\n17714\n17715\n17716\n17717\n17718\n17719\n17720\n17721\n17722\n17723\n17724\n17725\n17726\n17727\n17728\n17729\n17730\n17731\n17732\n17733\n17734\n17735\n17736\n17737\n17738\n17739\n17740\n17741\n17742\n17743\n17744\n17745\n17746\n17747\n17748\n17749\n17750\n17751\n17752\n17753\n17754\n17755\n17756\n17757\n17758\n17759\n17760\n17761\n17762\n17763\n17764\n17765\n17766\n17767\n17768\n17769\n17770\n17771\n17772\n17773\n17774\n17775\n17776\n17777\n17778\n17779\n17780\n17781\n17782\n17783\n17784\n17785\n17786\n17787\n17788\n17789\n17790\n17791\n17792\n17793\n17794\n17795\n17796\n17797\n17798\n17799\n17800\n17801\n17802\n17803\n17804\n17805\n17806\n17807\n17808\n17809\n17810\n17811\n17812\n17813\n17814\n17815\n17816\n17817\n17818\n17819\n17820\n17821\n17822\n17823\n17824\n17825\n17826\n17827\n17828\n17829\n17830\n17831\n17832\n17833\n17834\n17835\n17836\n17837\n17838\n17839\n17840\n17841\n17842\n17843\n17844\n17845\n17846\n17847\n17848\n17849\n17850\n17851\n17852\n17853\n17854\n17855\n17856\n17857\n17858\n17859\n17860\n17861\n17862\n17863\n17864\n17865\n17866\n17867\n17868\n17869\n17870\n17871\n17872\n17873\n17874\n17875\n17876\n17877\n17878\n17879\n17880\n17881\n17882\n17883\n17884\n17885\n17886\n17887\n17888\n17889\n17890\n17891\n17892\n17893\n17894\n17895\n17896\n17897\n17898\n17899\n17900\n17901\n17902\n17903\n17904\n17905\n17906\n17907\n17908\n17909\n17910\n17911\n17912\n17913\n17914\n17915\n17916\n17917\n17918\n17919\n17920\n17921\n17922\n17923\n17924\n17925\n17926\n17927\n17928\n17929\n17930\n17931\n17932\n17933\n17934\n17935\n17936\n17937\n17938\n17939\n17940\n17941\n17942\n17943\n17944\n17945\n17946\n17947\n17948\n17949\n17950\n17951\n17952\n17953\n17954\n17955\n17956\n17957\n17958\n17959\n17960\n17961\n17962\n17963\n17964\n17965\n17966\n17967\n17968\n17969\n17970\n17971\n17972\n17973\n17974\n17975\n17976\n17977\n17978\n17979\n17980\n17981\n17982\n17983\n17984\n17985\n17986\n17987\n17988\n17989\n17990\n17991\n17992\n17993\n17994\n17995\n17996\n17997\n17998\n17999\n18000\n18001\n18002\n18003\n18004\n18005\n18006\n18007\n18008\n18009\n18010\n18011\n18012\n18013\n18014\n18015\n18016\n18017\n18018\n18019\n18020\n18021\n18022\n18023\n18024\n18025\n18026\n18027\n18028\n18029\n18030\n18031\n18032\n18033\n18034\n18035\n18036\n18037\n18038\n18039\n18040\n18041\n18042\n18043\n18044\n18045\n18046\n18047\n18048\n18049\n18050\n18051\n18052\n18053\n18054\n18055\n18056\n18057\n18058\n18059\n18060\n18061\n18062\n18063\n18064\n18065\n18066\n18067\n18068\n18069\n18070\n18071\n18072\n18073\n18074\n18075\n18076\n18077\n18078\n18079\n18080\n18081\n18082\n18083\n18084\n18085\n18086\n18087\n18088\n18089\n18090\n18091\n18092\n18093\n18094\n18095\n18096\n18097\n18098\n18099\n18100\n18101\n18102\n18103\n18104\n18105\n18106\n18107\n18108\n18109\n18110\n18111\n18112\n18113\n18114\n18115\n18116\n18117\n18118\n18119\n18120\n18121\n18122\n18123\n18124\n18125\n18126\n18127\n18128\n18129\n18130\n18131\n18132\n18133\n18134\n18135\n18136\n18137\n18138\n18139\n18140\n18141\n18142\n18143\n18144\n18145\n18146\n18147\n18148\n18149\n18150\n18151\n18152\n18153\n18154\n18155\n18156\n18157\n18158\n18159\n18160\n18161\n18162\n18163\n18164\n18165\n18166\n18167\n18168\n18169\n18170\n18171\n18172\n18173\n18174\n18175\n18176\n18177\n18178\n18179\n18180\n18181\n18182\n18183\n18184\n18185\n18186\n18187\n18188\n18189\n18190\n18191\n18192\n18193\n18194\n18195\n18196\n18197\n18198\n18199\n18200\n18201\n18202\n18203\n18204\n18205\n18206\n18207\n18208\n18209\n18210\n18211\n18212\n18213\n18214\n18215\n18216\n18217\n18218\n18219\n18220\n18221\n18222\n18223\n18224\n18225\n18226\n18227\n18228\n18229\n18230\n18231\n18232\n18233\n18234\n18235\n18236\n18237\n18238\n18239\n18240\n18241\n18242\n18243\n18244\n18245\n18246\n18247\n18248\n18249\n18250\n18251\n18252\n18253\n18254\n18255\n18256\n18257\n18258\n18259\n18260\n18261\n18262\n18263\n18264\n18265\n18266\n18267\n18268\n18269\n18270\n18271\n18272\n18273\n18274\n18275\n18276\n18277\n18278\n18279\n18280\n18281\n18282\n18283\n18284\n18285\n18286\n18287\n18288\n18289\n18290\n18291\n18292\n18293\n18294\n18295\n18296\n18297\n18298\n18299\n18300\n18301\n18302\n18303\n18304\n18305\n18306\n18307\n18308\n18309\n18310\n18311\n18312\n18313\n18314\n18315\n18316\n18317\n18318\n18319\n18320\n18321\n18322\n18323\n18324\n18325\n18326\n18327\n18328\n18329\n18330\n18331\n18332\n18333\n18334\n18335\n18336\n18337\n18338\n18339\n18340\n18341\n18342\n18343\n18344\n18345\n18346\n18347\n18348\n18349\n18350\n18351\n18352\n18353\n18354\n18355\n18356\n18357\n18358\n18359\n18360\n18361\n18362\n18363\n18364\n18365\n18366\n18367\n18368\n18369\n18370\n18371\n18372\n18373\n18374\n18375\n18376\n18377\n18378\n18379\n18380\n18381\n18382\n18383\n18384\n18385\n18386\n18387\n18388\n18389\n18390\n18391\n18392\n18393\n18394\n18395\n18396\n18397\n18398\n18399\n18400\n18401\n18402\n18403\n18404\n18405\n18406\n18407\n18408\n18409\n18410\n18411\n18412\n18413\n18414\n18415\n18416\n18417\n18418\n18419\n18420\n18421\n18422\n18423\n18424\n18425\n18426\n18427\n18428\n18429\n18430\n18431\n18432\n18433\n18434\n18435\n18436\n18437\n18438\n18439\n18440\n18441\n18442\n18443\n18444\n18445\n18446\n18447\n18448\n18449\n18450\n18451\n18452\n18453\n18454\n18455\n18456\n18457\n18458\n18459\n18460\n18461\n18462\n18463\n18464\n18465\n18466\n18467\n18468\n18469\n18470\n18471\n18472\n18473\n18474\n18475\n18476\n18477\n18478\n18479\n18480\n18481\n18482\n18483\n18484\n18485\n18486\n18487\n18488\n18489\n18490\n18491\n18492\n18493\n18494\n18495\n18496\n18497\n18498\n18499\n18500\n18501\n18502\n18503\n18504\n18505\n18506\n18507\n18508\n18509\n18510\n18511\n18512\n18513\n18514\n18515\n18516\n18517\n18518\n18519\n18520\n18521\n18522\n18523\n18524\n18525\n18526\n18527\n18528\n18529\n18530\n18531\n18532\n18533\n18534\n18535\n18536\n18537\n18538\n18539\n18540\n18541\n18542\n18543\n18544\n18545\n18546\n18547\n18548\n18549\n18550\n18551\n18552\n18553\n18554\n18555\n18556\n18557\n18558\n18559\n18560\n18561\n18562\n18563\n18564\n18565\n18566\n18567\n18568\n18569\n18570\n18571\n18572\n18573\n18574\n18575\n18576\n18577\n18578\n18579\n18580\n18581\n18582\n18583\n18584\n18585\n18586\n18587\n18588\n18589\n18590\n18591\n18592\n18593\n18594\n18595\n18596\n18597\n18598\n18599\n18600\n18601\n18602\n18603\n18604\n18605\n18606\n18607\n18608\n18609\n18610\n18611\n18612\n18613\n18614\n18615\n18616\n18617\n18618\n18619\n18620\n18621\n18622\n18623\n18624\n18625\n18626\n18627\n18628\n18629\n18630\n18631\n18632\n18633\n18634\n18635\n18636\n18637\n18638\n18639\n18640\n18641\n18642\n18643\n18644\n18645\n18646\n18647\n18648\n18649\n18650\n18651\n18652\n18653\n18654\n18655\n18656\n18657\n18658\n18659\n18660\n18661\n18662\n18663\n18664\n18665\n18666\n18667\n18668\n18669\n18670\n18671\n18672\n18673\n18674\n18675\n18676\n18677\n18678\n18679\n18680\n18681\n18682\n18683\n18684\n18685\n18686\n18687\n18688\n18689\n18690\n18691\n18692\n18693\n18694\n18695\n18696\n18697\n18698\n18699\n18700\n18701\n18702\n18703\n18704\n18705\n18706\n18707\n18708\n18709\n18710\n18711\n18712\n18713\n18714\n18715\n18716\n18717\n18718\n18719\n18720\n18721\n18722\n18723\n18724\n18725\n18726\n18727\n18728\n18729\n18730\n18731\n18732\n18733\n18734\n18735\n18736\n18737\n18738\n18739\n18740\n18741\n18742\n18743\n18744\n18745\n18746\n18747\n18748\n18749\n18750\n18751\n18752\n18753\n18754\n18755\n18756\n18757\n18758\n18759\n18760\n18761\n18762\n18763\n18764\n18765\n18766\n18767\n18768\n18769\n18770\n18771\n18772\n18773\n18774\n18775\n18776\n18777\n18778\n18779\n18780\n18781\n18782\n18783\n18784\n18785\n18786\n18787\n18788\n18789\n18790\n18791\n18792\n18793\n18794\n18795\n18796\n18797\n18798\n18799\n18800\n18801\n18802\n18803\n18804\n18805\n18806\n18807\n18808\n18809\n18810\n18811\n18812\n18813\n18814\n18815\n18816\n18817\n18818\n18819\n18820\n18821\n18822\n18823\n18824\n18825\n18826\n18827\n18828\n18829\n18830\n18831\n18832\n18833\n18834\n18835\n18836\n18837\n18838\n18839\n18840\n18841\n18842\n18843\n18844\n18845\n18846\n18847\n18848\n18849\n18850\n18851\n18852\n18853\n18854\n18855\n18856\n18857\n18858\n18859\n18860\n18861\n18862\n18863\n18864\n18865\n18866\n18867\n18868\n18869\n18870\n18871\n18872\n18873\n18874\n18875\n18876\n18877\n18878\n18879\n18880\n18881\n18882\n18883\n18884\n18885\n18886\n18887\n18888\n18889\n18890\n18891\n18892\n18893\n18894\n18895\n18896\n18897\n18898\n18899\n18900\n18901\n18902\n18903\n18904\n18905\n18906\n18907\n18908\n18909\n18910\n18911\n18912\n18913\n18914\n18915\n18916\n18917\n18918\n18919\n18920\n18921\n18922\n18923\n18924\n18925\n18926\n18927\n18928\n18929\n18930\n18931\n18932\n18933\n18934\n18935\n18936\n18937\n18938\n18939\n18940\n18941\n18942\n18943\n18944\n18945\n18946\n18947\n18948\n18949\n18950\n18951\n18952\n18953\n18954\n18955\n18956\n18957\n18958\n18959\n18960\n18961\n18962\n18963\n18964\n18965\n18966\n18967\n18968\n18969\n18970\n18971\n18972\n18973\n18974\n18975\n18976\n18977\n18978\n18979\n18980\n18981\n18982\n18983\n18984\n18985\n18986\n18987\n18988\n18989\n18990\n18991\n18992\n18993\n18994\n18995\n18996\n18997\n18998\n18999\n19000\n19001\n19002\n19003\n19004\n19005\n19006\n19007\n19008\n19009\n19010\n19011\n19012\n19013\n19014\n19015\n19016\n19017\n19018\n19019\n19020\n19021\n19022\n19023\n19024\n19025\n19026\n19027\n19028\n19029\n19030\n19031\n19032\n19033\n19034\n19035\n19036\n19037\n19038\n19039\n19040\n19041\n19042\n19043\n19044\n19045\n19046\n19047\n19048\n19049\n19050\n19051\n19052\n19053\n19054\n19055\n19056\n19057\n19058\n19059\n19060\n19061\n19062\n19063\n19064\n19065\n19066\n19067\n19068\n19069\n19070\n19071\n19072\n19073\n19074\n19075\n19076\n19077\n19078\n19079\n19080\n19081\n19082\n19083\n19084\n19085\n19086\n19087\n19088\n19089\n19090\n19091\n19092\n19093\n19094\n19095\n19096\n19097\n19098\n19099\n19100\n19101\n19102\n19103\n19104\n19105\n19106\n19107\n19108\n19109\n19110\n19111\n19112\n19113\n19114\n19115\n19116\n19117\n19118\n19119\n19120\n19121\n19122\n19123\n19124\n19125\n19126\n19127\n19128\n19129\n19130\n19131\n19132\n19133\n19134\n19135\n19136\n19137\n19138\n19139\n19140\n19141\n19142\n19143\n19144\n19145\n19146\n19147\n19148\n19149\n19150\n19151\n19152\n19153\n19154\n19155\n19156\n19157\n19158\n19159\n19160\n19161\n19162\n19163\n19164\n19165\n19166\n19167\n19168\n19169\n19170\n19171\n19172\n19173\n19174\n19175\n19176\n19177\n19178\n19179\n19180\n19181\n19182\n19183\n19184\n19185\n19186\n19187\n19188\n19189\n19190\n19191\n19192\n19193\n19194\n19195\n19196\n19197\n19198\n19199\n19200\n19201\n19202\n19203\n19204\n19205\n19206\n19207\n19208\n19209\n19210\n19211\n19212\n19213\n19214\n19215\n19216\n19217\n19218\n19219\n19220\n19221\n19222\n19223\n19224\n19225\n19226\n19227\n19228\n19229\n19230\n19231\n19232\n19233\n19234\n19235\n19236\n19237\n19238\n19239\n19240\n19241\n19242\n19243\n19244\n19245\n19246\n19247\n19248\n19249\n19250\n19251\n19252\n19253\n19254\n19255\n19256\n19257\n19258\n19259\n19260\n19261\n19262\n19263\n19264\n19265\n19266\n19267\n19268\n19269\n19270\n19271\n19272\n19273\n19274\n19275\n19276\n19277\n19278\n19279\n19280\n19281\n19282\n19283\n19284\n19285\n19286\n19287\n19288\n19289\n19290\n19291\n19292\n19293\n19294\n19295\n19296\n19297\n19298\n19299\n19300\n19301\n19302\n19303\n19304\n19305\n19306\n19307\n19308\n19309\n19310\n19311\n19312\n19313\n19314\n19315\n19316\n19317\n19318\n19319\n19320\n19321\n19322\n19323\n19324\n19325\n19326\n19327\n19328\n19329\n19330\n19331\n19332\n19333\n19334\n19335\n19336\n19337\n19338\n19339\n19340\n19341\n19342\n19343\n19344\n19345\n19346\n19347\n19348\n19349\n19350\n19351\n19352\n19353\n19354\n19355\n19356\n19357\n19358\n19359\n19360\n19361\n19362\n19363\n19364\n19365\n19366\n19367\n19368\n19369\n19370\n19371\n19372\n19373\n19374\n19375\n19376\n19377\n19378\n19379\n19380\n19381\n19382\n19383\n19384\n19385\n19386\n19387\n19388\n19389\n19390\n19391\n19392\n19393\n19394\n19395\n19396\n19397\n19398\n19399\n19400\n19401\n19402\n19403\n19404\n19405\n19406\n19407\n19408\n19409\n19410\n19411\n19412\n19413\n19414\n19415\n19416\n19417\n19418\n19419\n19420\n19421\n19422\n19423\n19424\n19425\n19426\n19427\n19428\n19429\n19430\n19431\n19432\n19433\n19434\n19435\n19436\n19437\n19438\n19439\n19440\n19441\n19442\n19443\n19444\n19445\n19446\n19447\n19448\n19449\n19450\n19451\n19452\n19453\n19454\n19455\n19456\n19457\n19458\n19459\n19460\n19461\n19462\n19463\n19464\n19465\n19466\n19467\n19468\n19469\n19470\n19471\n19472\n19473\n19474\n19475\n19476\n19477\n19478\n19479\n19480\n19481\n19482\n19483\n19484\n19485\n19486\n19487\n19488\n19489\n19490\n19491\n19492\n19493\n19494\n19495\n19496\n19497\n19498\n19499\n19500\n19501\n19502\n19503\n19504\n19505\n19506\n19507\n19508\n19509\n19510\n19511\n19512\n19513\n19514\n19515\n19516\n19517\n19518\n19519\n19520\n19521\n19522\n19523\n19524\n19525\n19526\n19527\n19528\n19529\n19530\n19531\n19532\n19533\n19534\n19535\n19536\n19537\n19538\n19539\n19540\n19541\n19542\n19543\n19544\n19545\n19546\n19547\n19548\n19549\n19550\n19551\n19552\n19553\n19554\n19555\n19556\n19557\n19558\n19559\n19560\n19561\n19562\n19563\n19564\n19565\n19566\n19567\n19568\n19569\n19570\n19571\n19572\n19573\n19574\n19575\n19576\n19577\n19578\n19579\n19580\n19581\n19582\n19583\n19584\n19585\n19586\n19587\n19588\n19589\n19590\n19591\n19592\n19593\n19594\n19595\n19596\n19597\n19598\n19599\n19600\n19601\n19602\n19603\n19604\n19605\n19606\n19607\n19608\n19609\n19610\n19611\n19612\n19613\n19614\n19615\n19616\n19617\n19618\n19619\n19620\n19621\n19622\n19623\n19624\n19625\n19626\n19627\n19628\n19629\n19630\n19631\n19632\n19633\n19634\n19635\n19636\n19637\n19638\n19639\n19640\n19641\n19642\n19643\n19644\n19645\n19646\n19647\n19648\n19649\n19650\n19651\n19652\n19653\n19654\n19655\n19656\n19657\n19658\n19659\n19660\n19661\n19662\n19663\n19664\n19665\n19666\n19667\n19668\n19669\n19670\n19671\n19672\n19673\n19674\n19675\n19676\n19677\n19678\n19679\n19680\n19681\n19682\n19683\n19684\n19685\n19686\n19687\n19688\n19689\n19690\n19691\n19692\n19693\n19694\n19695\n19696\n19697\n19698\n19699\n19700\n19701\n19702\n19703\n19704\n19705\n19706\n19707\n19708\n19709\n19710\n19711\n19712\n19713\n19714\n19715\n19716\n19717\n19718\n19719\n19720\n19721\n19722\n19723\n19724\n19725\n19726\n19727\n19728\n19729\n19730\n19731\n19732\n19733\n19734\n19735\n19736\n19737\n19738\n19739\n19740\n19741\n19742\n19743\n19744\n19745\n19746\n19747\n19748\n19749\n19750\n19751\n19752\n19753\n19754\n19755\n19756\n19757\n19758\n19759\n19760\n19761\n19762\n19763\n19764\n19765\n19766\n19767\n19768\n19769\n19770\n19771\n19772\n19773\n19774\n19775\n19776\n19777\n19778\n19779\n19780\n19781\n19782\n19783\n19784\n19785\n19786\n19787\n19788\n19789\n19790\n19791\n19792\n19793\n19794\n19795\n19796\n19797\n19798\n19799\n19800\n19801\n19802\n19803\n19804\n19805\n19806\n19807\n19808\n19809\n19810\n19811\n19812\n19813\n19814\n19815\n19816\n19817\n19818\n19819\n19820\n19821\n19822\n19823\n19824\n19825\n19826\n19827\n19828\n19829\n19830\n19831\n19832\n19833\n19834\n19835\n19836\n19837\n19838\n19839\n19840\n19841\n19842\n19843\n19844\n19845\n19846\n19847\n19848\n19849\n19850\n19851\n19852\n19853\n19854\n19855\n19856\n19857\n19858\n19859\n19860\n19861\n19862\n19863\n19864\n19865\n19866\n19867\n19868\n19869\n19870\n19871\n19872\n19873\n19874\n19875\n19876\n19877\n19878\n19879\n19880\n19881\n19882\n19883\n19884\n19885\n19886\n19887\n19888\n19889\n19890\n19891\n19892\n19893\n19894\n19895\n19896\n19897\n19898\n19899\n19900\n19901\n19902\n19903\n19904\n19905\n19906\n19907\n19908\n19909\n19910\n19911\n19912\n19913\n19914\n19915\n19916\n19917\n19918\n19919\n19920\n19921\n19922\n19923\n19924\n19925\n19926\n19927\n19928\n19929\n19930\n19931\n19932\n19933\n19934\n19935\n19936\n19937\n19938\n19939\n19940\n19941\n19942\n19943\n19944\n19945\n19946\n19947\n19948\n19949\n19950\n19951\n19952\n19953\n19954\n19955\n19956\n19957\n19958\n19959\n19960\n19961\n19962\n19963\n19964\n19965\n19966\n19967\n19968\n19969\n19970\n19971\n19972\n19973\n19974\n19975\n19976\n19977\n19978\n19979\n19980\n19981\n19982\n19983\n19984\n19985\n19986\n19987\n19988\n19989\n19990\n19991\n19992\n19993\n19994\n19995\n19996\n19997\n19998\n19999\n20000\n20001\n20002\n20003\n20004\n20005\n20006\n20007\n20008\n20009\n20010\n20011\n20012\n20013\n20014\n20015\n20016\n20017\n20018\n20019\n20020\n20021\n20022\n20023\n20024\n20025\n20026\n20027\n20028\n20029\n20030\n20031\n20032\n20033\n20034\n20035\n20036\n20037\n20038\n20039\n20040\n20041\n20042\n20043\n20044\n20045\n20046\n20047\n20048\n20049\n20050\n20051\n20052\n20053\n20054\n20055\n20056\n20057\n20058\n20059\n20060\n20061\n20062\n20063\n20064\n20065\n20066\n20067\n20068\n20069\n20070\n20071\n20072\n20073\n20074\n20075\n20076\n20077\n20078\n20079\n20080\n20081\n20082\n20083\n20084\n20085\n20086\n20087\n20088\n20089\n20090\n20091\n20092\n20093\n20094\n20095\n20096\n20097\n20098\n20099\n20100\n20101\n20102\n20103\n20104\n20105\n20106\n20107\n20108\n20109\n20110\n20111\n20112\n20113\n20114\n20115\n20116\n20117\n20118\n20119\n20120\n20121\n20122\n20123\n20124\n20125\n20126\n20127\n20128\n20129\n20130\n20131\n20132\n20133\n20134\n20135\n20136\n20137\n20138\n20139\n20140\n20141\n20142\n20143\n20144\n20145\n20146\n20147\n20148\n20149\n20150\n20151\n20152\n20153\n20154\n20155\n20156\n20157\n20158\n20159\n20160\n20161\n20162\n20163\n20164\n20165\n20166\n20167\n20168\n20169\n20170\n20171\n20172\n20173\n20174\n20175\n20176\n20177\n20178\n20179\n20180\n20181\n20182\n20183\n20184\n20185\n20186\n20187\n20188\n20189\n20190\n20191\n20192\n20193\n20194\n20195\n20196\n20197\n20198\n20199\n20200\n20201\n20202\n20203\n20204\n20205\n20206\n20207\n20208\n20209\n20210\n20211\n20212\n20213\n20214\n20215\n20216\n20217\n20218\n20219\n20220\n20221\n20222\n20223\n20224\n20225\n20226\n20227\n20228\n20229\n20230\n20231\n20232\n20233\n20234\n20235\n20236\n20237\n20238\n20239\n20240\n20241\n20242\n20243\n20244\n20245\n20246\n20247\n20248\n20249\n20250\n20251\n20252\n20253\n20254\n20255\n20256\n20257\n20258\n20259\n20260\n20261\n20262\n20263\n20264\n20265\n20266\n20267\n20268\n20269\n20270\n20271\n20272\n20273\n20274\n20275\n20276\n20277\n20278\n20279\n20280\n20281\n20282\n20283\n20284\n20285\n20286\n20287\n20288\n20289\n20290\n20291\n20292\n20293\n20294\n20295\n20296\n20297\n20298\n20299\n20300\n20301\n20302\n20303\n20304\n20305\n20306\n20307\n20308\n20309\n20310\n20311\n20312\n20313\n20314\n20315\n20316\n20317\n20318\n20319\n20320\n20321\n20322\n20323\n20324\n20325\n20326\n20327\n20328\n20329\n20330\n20331\n20332\n20333\n20334\n20335\n20336\n20337\n20338\n20339\n20340\n20341\n20342\n20343\n20344\n20345\n20346\n20347\n20348\n20349\n20350\n20351\n20352\n20353\n20354\n20355\n20356\n20357\n20358\n20359\n20360\n20361\n20362\n20363\n20364\n20365\n20366\n20367\n20368\n20369\n20370\n20371\n20372\n20373\n20374\n20375\n20376\n20377\n20378\n20379\n20380\n20381\n20382\n20383\n20384\n20385\n20386\n20387\n20388\n20389\n20390\n20391\n20392\n20393\n20394\n20395\n20396\n20397\n20398\n20399\n20400\n20401\n20402\n20403\n20404\n20405\n20406\n20407\n20408\n20409\n20410\n20411\n20412\n20413\n20414\n20415\n20416\n20417\n20418\n20419\n20420\n20421\n20422\n20423\n20424\n20425\n20426\n20427\n20428\n20429\n20430\n20431\n20432\n20433\n20434\n20435\n20436\n20437\n20438\n20439\n20440\n20441\n20442\n20443\n20444\n20445\n20446\n20447\n20448\n20449\n20450\n20451\n20452\n20453\n20454\n20455\n20456\n20457\n20458\n20459\n20460\n20461\n20462\n20463\n20464\n20465\n20466\n20467\n20468\n20469\n20470\n20471\n20472\n20473\n20474\n20475\n20476\n20477\n20478\n20479\n20480\n20481\n20482\n20483\n20484\n20485\n20486\n20487\n20488\n20489\n20490\n20491\n20492\n20493\n20494\n20495\n20496\n20497\n20498\n20499\n20500\n20501\n20502\n20503\n20504\n20505\n20506\n20507\n20508\n20509\n20510\n20511\n20512\n20513\n20514\n20515\n20516\n20517\n20518\n20519\n20520\n20521\n20522\n20523\n20524\n20525\n20526\n20527\n20528\n20529\n20530\n20531\n20532\n20533\n20534\n20535\n20536\n20537\n20538\n20539\n20540\n20541\n20542\n20543\n20544\n20545\n20546\n20547\n20548\n20549\n20550\n20551\n20552\n20553\n20554\n20555\n20556\n20557\n20558\n20559\n20560\n20561\n20562\n20563\n20564\n20565\n20566\n20567\n20568\n20569\n20570\n20571\n20572\n20573\n20574\n20575\n20576\n20577\n20578\n20579\n20580\n20581\n20582\n20583\n20584\n20585\n20586\n20587\n20588\n20589\n20590\n20591\n20592\n20593\n20594\n20595\n20596\n20597\n20598\n20599\n20600\n20601\n20602\n20603\n20604\n20605\n20606\n20607\n20608\n20609\n20610\n20611\n20612\n20613\n20614\n20615\n20616\n20617\n20618\n20619\n20620\n20621\n20622\n20623\n20624\n20625\n20626\n20627\n20628\n20629\n20630\n20631\n20632\n20633\n20634\n20635\n20636\n20637\n20638\n20639\n20640\n20641\n20642\n20643\n20644\n20645\n20646\n20647\n20648\n20649\n20650\n20651\n20652\n20653\n20654\n20655\n20656\n20657\n20658\n20659\n20660\n20661\n20662\n20663\n20664\n20665\n20666\n20667\n20668\n20669\n20670\n20671\n20672\n20673\n20674\n20675\n20676\n20677\n20678\n20679\n20680\n20681\n20682\n20683\n20684\n20685\n20686\n20687\n20688\n20689\n20690\n20691\n20692\n20693\n20694\n20695\n20696\n20697\n20698\n20699\n20700\n20701\n20702\n20703\n20704\n20705\n20706\n20707\n20708\n20709\n20710\n20711\n20712\n20713\n20714\n20715\n20716\n20717\n20718\n20719\n20720\n20721\n20722\n20723\n20724\n20725\n20726\n20727\n20728\n20729\n20730\n20731\n20732\n20733\n20734\n20735\n20736\n20737\n20738\n20739\n20740\n20741\n20742\n20743\n20744\n20745\n20746\n20747\n20748\n20749\n20750\n20751\n20752\n20753\n20754\n20755\n20756\n20757\n20758\n20759\n20760\n20761\n20762\n20763\n20764\n20765\n20766\n20767\n20768\n20769\n20770\n20771\n20772\n20773\n20774\n20775\n20776\n20777\n20778\n20779\n20780\n20781\n20782\n20783\n20784\n20785\n20786\n20787\n20788\n20789\n20790\n20791\n20792\n20793\n20794\n20795\n20796\n20797\n20798\n20799\n20800\n20801\n20802\n20803\n20804\n20805\n20806\n20807\n20808\n20809\n20810\n20811\n20812\n20813\n20814\n20815\n20816\n20817\n20818\n20819\n20820\n20821\n20822\n20823\n20824\n20825\n20826\n20827\n20828\n20829\n20830\n20831\n20832\n20833\n20834\n20835\n20836\n20837\n20838\n20839\n20840\n20841\n20842\n20843\n20844\n20845\n20846\n20847\n20848\n20849\n20850\n20851\n20852\n20853\n20854\n20855\n20856\n20857\n20858\n20859\n20860\n20861\n20862\n20863\n20864\n20865\n20866\n20867\n20868\n20869\n20870\n20871\n20872\n20873\n20874\n20875\n20876\n20877\n20878\n20879\n20880\n20881\n20882\n20883\n20884\n20885\n20886\n20887\n20888\n20889\n20890\n20891\n20892\n20893\n20894\n20895\n20896\n20897\n20898\n20899\n20900\n20901\n20902\n20903\n20904\n20905\n20906\n20907\n20908\n20909\n20910\n20911\n20912\n20913\n20914\n20915\n20916\n20917\n20918\n20919\n20920\n20921\n20922\n20923\n20924\n20925\n20926\n20927\n20928\n20929\n20930\n20931\n20932\n20933\n20934\n20935\n20936\n20937\n20938\n20939\n20940\n20941\n20942\n20943\n20944\n20945\n20946\n20947\n20948\n20949\n20950\n20951\n20952\n20953\n20954\n20955\n20956\n20957\n20958\n20959\n20960\n20961\n20962\n20963\n20964\n20965\n20966\n20967\n20968\n20969\n20970\n20971\n20972\n20973\n20974\n20975\n20976\n20977\n20978\n20979\n20980\n20981\n20982\n20983\n20984\n20985\n20986\n20987\n20988\n20989\n20990\n20991\n20992\n20993\n20994\n20995\n20996\n20997\n20998\n20999\n21000\n21001\n21002\n21003\n21004\n21005\n21006\n21007\n21008\n21009\n21010\n21011\n21012\n21013\n21014\n21015\n21016\n21017\n21018\n21019\n21020\n21021\n21022\n21023\n21024\n21025\n21026\n21027\n21028\n21029\n21030\n21031\n21032\n21033\n21034\n21035\n21036\n21037\n21038\n21039\n21040\n21041\n21042\n21043\n21044\n21045\n21046\n21047\n21048\n21049\n21050\n21051\n21052\n21053\n21054\n21055\n21056\n21057\n21058\n21059\n21060\n21061\n21062\n21063\n21064\n21065\n21066\n21067\n21068\n21069\n21070\n21071\n21072\n21073\n21074\n21075\n21076\n21077\n21078\n21079\n21080\n21081\n21082\n21083\n21084\n21085\n21086\n21087\n21088\n21089\n21090\n21091\n21092\n21093\n21094\n21095\n21096\n21097\n21098\n21099\n21100\n21101\n21102\n21103\n21104\n21105\n21106\n21107\n21108\n21109\n21110\n21111\n21112\n21113\n21114\n21115\n21116\n21117\n21118\n21119\n21120\n21121\n21122\n21123\n21124\n21125\n21126\n21127\n21128\n21129\n21130\n21131\n21132\n21133\n21134\n21135\n21136\n21137\n21138\n21139\n21140\n21141\n21142\n21143\n21144\n21145\n21146\n21147\n21148\n21149\n21150\n21151\n21152\n21153\n21154\n21155\n21156\n21157\n21158\n21159\n21160\n21161\n21162\n21163\n21164\n21165\n21166\n21167\n21168\n21169\n21170\n21171\n21172\n21173\n21174\n21175\n21176\n21177\n21178\n21179\n21180\n21181\n21182\n21183\n21184\n21185\n21186\n21187\n21188\n21189\n21190\n21191\n21192\n21193\n21194\n21195\n21196\n21197\n21198\n21199\n21200\n21201\n21202\n21203\n21204\n21205\n21206\n21207\n21208\n21209\n21210\n21211\n21212\n21213\n21214\n21215\n21216\n21217\n21218\n21219\n21220\n21221\n21222\n21223\n21224\n21225\n21226\n21227\n21228\n21229\n21230\n21231\n21232\n21233\n21234\n21235\n21236\n21237\n21238\n21239\n21240\n21241\n21242\n21243\n21244\n21245\n21246\n21247\n21248\n21249\n21250\n21251\n21252\n21253\n21254\n21255\n21256\n21257\n21258\n21259\n21260\n21261\n21262\n21263\n21264\n21265\n21266\n21267\n21268\n21269\n21270\n21271\n21272\n21273\n21274\n21275\n21276\n21277\n21278\n21279\n21280\n21281\n21282\n21283\n21284\n21285\n21286\n21287\n21288\n21289\n21290\n21291\n21292\n21293\n21294\n21295\n21296\n21297\n21298\n21299\n21300\n21301\n21302\n21303\n21304\n21305\n21306\n21307\n21308\n21309\n21310\n21311\n21312\n21313\n21314\n21315\n21316\n21317\n21318\n21319\n21320\n21321\n21322\n21323\n21324\n21325\n21326\n21327\n21328\n21329\n21330\n21331\n21332\n21333\n21334\n21335\n21336\n21337\n21338\n21339\n21340\n21341\n21342\n21343\n21344\n21345\n21346\n21347\n21348\n21349\n21350\n21351\n21352\n21353\n21354\n21355\n21356\n21357\n21358\n21359\n21360\n21361\n21362\n21363\n21364\n21365\n21366\n21367\n21368\n21369\n21370\n21371\n21372\n21373\n21374\n21375\n21376\n21377\n21378\n21379\n21380\n21381\n21382\n21383\n21384\n21385\n21386\n21387\n21388\n21389\n21390\n21391\n21392\n21393\n21394\n21395\n21396\n21397\n21398\n21399\n21400\n21401\n21402\n21403\n21404\n21405\n21406\n21407\n21408\n21409\n21410\n21411\n21412\n21413\n21414\n21415\n21416\n21417\n21418\n21419\n21420\n21421\n21422\n21423\n21424\n21425\n21426\n21427\n21428\n21429\n21430\n21431\n21432\n21433\n21434\n21435\n21436\n21437\n21438\n21439\n21440\n21441\n21442\n21443\n21444\n21445\n21446\n21447\n21448\n21449\n21450\n21451\n21452\n21453\n21454\n21455\n21456\n21457\n21458\n21459\n21460\n21461\n21462\n21463\n21464\n21465\n21466\n21467\n21468\n21469\n21470\n21471\n21472\n21473\n21474\n21475\n21476\n21477\n21478\n21479\n21480\n21481\n21482\n21483\n21484\n21485\n21486\n21487\n21488\n21489\n21490\n21491\n21492\n21493\n21494\n21495\n21496\n21497\n21498\n21499\n21500\n21501\n21502\n21503\n21504\n21505\n21506\n21507\n21508\n21509\n21510\n21511\n21512\n21513\n21514\n21515\n21516\n21517\n21518\n21519\n21520\n21521\n21522\n21523\n21524\n21525\n21526\n21527\n21528\n21529\n21530\n21531\n21532\n21533\n21534\n21535\n21536\n21537\n21538\n21539\n21540\n21541\n21542\n21543\n21544\n21545\n21546\n21547\n21548\n21549\n21550\n21551\n21552\n21553\n21554\n21555\n21556\n21557\n21558\n21559\n21560\n21561\n21562\n21563\n21564\n21565\n21566\n21567\n21568\n21569\n21570\n21571\n21572\n21573\n21574\n21575\n21576\n21577\n21578\n21579\n21580\n21581\n21582\n21583\n21584\n21585\n21586\n21587\n21588\n21589\n21590\n21591\n21592\n21593\n21594\n21595\n21596\n21597\n21598\n21599\n21600\n21601\n21602\n21603\n21604\n21605\n21606\n21607\n21608\n21609\n21610\n21611\n21612\n21613\n21614\n21615\n21616\n21617\n21618\n21619\n21620\n21621\n21622\n21623\n21624\n21625\n21626\n21627\n21628\n21629\n21630\n21631\n21632\n21633\n21634\n21635\n21636\n21637\n21638\n21639\n21640\n21641\n21642\n21643\n21644\n21645\n21646\n21647\n21648\n21649\n21650\n21651\n21652\n21653\n21654\n21655\n21656\n21657\n21658\n21659\n21660\n21661\n21662\n21663\n21664\n21665\n21666\n21667\n21668\n21669\n21670\n21671\n21672\n21673\n21674\n21675\n21676\n21677\n21678\n21679\n21680\n21681\n21682\n21683\n21684\n21685\n21686\n21687\n21688\n21689\n21690\n21691\n21692\n21693\n21694\n21695\n21696\n21697\n21698\n21699\n21700\n21701\n21702\n21703\n21704\n21705\n21706\n21707\n21708\n21709\n21710\n21711\n21712\n21713\n21714\n21715\n21716\n21717\n21718\n21719\n21720\n21721\n21722\n21723\n21724\n21725\n21726\n21727\n21728\n21729\n21730\n21731\n21732\n21733\n21734\n21735\n21736\n21737\n21738\n21739\n21740\n21741\n21742\n21743\n21744\n21745\n21746\n21747\n21748\n21749\n21750\n21751\n21752\n21753\n21754\n21755\n21756\n21757\n21758\n21759\n21760\n21761\n21762\n21763\n21764\n21765\n21766\n21767\n21768\n21769\n21770\n21771\n21772\n21773\n21774\n21775\n21776\n21777\n21778\n21779\n21780\n21781\n21782\n21783\n21784\n21785\n21786\n21787\n21788\n21789\n21790\n21791\n21792\n21793\n21794\n21795\n21796\n21797\n21798\n21799\n21800\n21801\n21802\n21803\n21804\n21805\n21806\n21807\n21808\n21809\n21810\n21811\n21812\n21813\n21814\n21815\n21816\n21817\n21818\n21819\n21820\n21821\n21822\n21823\n21824\n21825\n21826\n21827\n21828\n21829\n21830\n21831\n21832\n21833\n21834\n21835\n21836\n21837\n21838\n21839\n21840\n21841\n21842\n21843\n21844\n21845\n21846\n21847\n21848\n21849\n21850\n21851\n21852\n21853\n21854\n21855\n21856\n21857\n21858\n21859\n21860\n21861\n21862\n21863\n21864\n21865\n21866\n21867\n21868\n21869\n21870\n21871\n21872\n21873\n21874\n21875\n21876\n21877\n21878\n21879\n21880\n21881\n21882\n21883\n21884\n21885\n21886\n21887\n21888\n21889\n21890\n21891\n21892\n21893\n21894\n21895\n21896\n21897\n21898\n21899\n21900\n21901\n21902\n21903\n21904\n21905\n21906\n21907\n21908\n21909\n21910\n21911\n21912\n21913\n21914\n21915\n21916\n21917\n21918\n21919\n21920\n21921\n21922\n21923\n21924\n21925\n21926\n21927\n21928\n21929\n21930\n21931\n21932\n21933\n21934\n21935\n21936\n21937\n21938\n21939\n21940\n21941\n21942\n21943\n21944\n21945\n21946\n21947\n21948\n21949\n21950\n21951\n21952\n21953\n21954\n21955\n21956\n21957\n21958\n21959\n21960\n21961\n21962\n21963\n21964\n21965\n21966\n21967\n21968\n21969\n21970\n21971\n21972\n21973\n21974\n21975\n21976\n21977\n21978\n21979\n21980\n21981\n21982\n21983\n21984\n21985\n21986\n21987\n21988\n21989\n21990\n21991\n21992\n21993\n21994\n21995\n21996\n21997\n21998\n21999\n22000\n22001\n22002\n22003\n22004\n22005\n22006\n22007\n22008\n22009\n22010\n22011\n22012\n22013\n22014\n22015\n22016\n22017\n22018\n22019\n22020\n22021\n22022\n22023\n22024\n22025\n22026\n22027\n22028\n22029\n22030\n22031\n22032\n22033\n22034\n22035\n22036\n22037\n22038\n22039\n22040\n22041\n22042\n22043\n22044\n22045\n22046\n22047\n22048\n22049\n22050\n22051\n22052\n22053\n22054\n22055\n22056\n22057\n22058\n22059\n22060\n22061\n22062\n22063\n22064\n22065\n22066\n22067\n22068\n22069\n22070\n22071\n22072\n22073\n22074\n22075\n22076\n22077\n22078\n22079\n22080\n22081\n22082\n22083\n22084\n22085\n22086\n22087\n22088\n22089\n22090\n22091\n22092\n22093\n22094\n22095\n22096\n22097\n22098\n22099\n22100\n22101\n22102\n22103\n22104\n22105\n22106\n22107\n22108\n22109\n22110\n22111\n22112\n22113\n22114\n22115\n22116\n22117\n22118\n22119\n22120\n22121\n22122\n22123\n22124\n22125\n22126\n22127\n22128\n22129\n22130\n22131\n22132\n22133\n22134\n22135\n22136\n22137\n22138\n22139\n22140\n22141\n22142\n22143\n22144\n22145\n22146\n22147\n22148\n22149\n22150\n22151\n22152\n22153\n22154\n22155\n22156\n22157\n22158\n22159\n22160\n22161\n22162\n22163\n22164\n22165\n22166\n22167\n22168\n22169\n22170\n22171\n22172\n22173\n22174\n22175\n22176\n22177\n22178\n22179\n22180\n22181\n22182\n22183\n22184\n22185\n22186\n22187\n22188\n22189\n22190\n22191\n22192\n22193\n22194\n22195\n22196\n22197\n22198\n22199\n22200\n22201\n22202\n22203\n22204\n22205\n22206\n22207\n22208\n22209\n22210\n22211\n22212\n22213\n22214\n22215\n22216\n22217\n22218\n22219\n22220\n22221\n22222\n22223\n22224\n22225\n22226\n22227\n22228\n22229\n22230\n22231\n22232\n22233\n22234\n22235\n22236\n22237\n22238\n22239\n22240\n22241\n22242\n22243\n22244\n22245\n22246\n22247\n22248\n22249\n22250\n22251\n22252\n22253\n22254\n22255\n22256\n22257\n22258\n22259\n22260\n22261\n22262\n22263\n22264\n22265\n22266\n22267\n22268\n22269\n22270\n22271\n22272\n22273\n22274\n22275\n22276\n22277\n22278\n22279\n22280\n22281\n22282\n22283\n22284\n22285\n22286\n22287\n22288\n22289\n22290\n22291\n22292\n22293\n22294\n22295\n22296\n22297\n22298\n22299\n22300\n22301\n22302\n22303\n22304\n22305\n22306\n22307\n22308\n22309\n22310\n22311\n22312\n22313\n22314\n22315\n22316\n22317\n22318\n22319\n22320\n22321\n22322\n22323\n22324\n22325\n22326\n22327\n22328\n22329\n22330\n22331\n22332\n22333\n22334\n22335\n22336\n22337\n22338\n22339\n22340\n22341\n22342\n22343\n22344\n22345\n22346\n22347\n22348\n22349\n22350\n22351\n22352\n22353\n22354\n22355\n22356\n22357\n22358\n22359\n22360\n22361\n22362\n22363\n22364\n22365\n22366\n22367\n22368\n22369\n22370\n22371\n22372\n22373\n22374\n22375\n22376\n22377\n22378\n22379\n22380\n22381\n22382\n22383\n22384\n22385\n22386\n22387\n22388\n22389\n22390\n22391\n22392\n22393\n22394\n22395\n22396\n22397\n22398\n22399\n22400\n22401\n22402\n22403\n22404\n22405\n22406\n22407\n22408\n22409\n22410\n22411\n22412\n22413\n22414\n22415\n22416\n22417\n22418\n22419\n22420\n22421\n22422\n22423\n22424\n22425\n22426\n22427\n22428\n22429\n22430\n22431\n22432\n22433\n22434\n22435\n22436\n22437\n22438\n22439\n22440\n22441\n22442\n22443\n22444\n22445\n22446\n22447\n22448\n22449\n22450\n22451\n22452\n22453\n22454\n22455\n22456\n22457\n22458\n22459\n22460\n22461\n22462\n22463\n22464\n22465\n22466\n22467\n22468\n22469\n22470\n22471\n22472\n22473\n22474\n22475\n22476\n22477\n22478\n22479\n22480\n22481\n22482\n22483\n22484\n22485\n22486\n22487\n22488\n22489\n22490\n22491\n22492\n22493\n22494\n22495\n22496\n22497\n22498\n22499\n22500\n22501\n22502\n22503\n22504\n22505\n22506\n22507\n22508\n22509\n22510\n22511\n22512\n22513\n22514\n22515\n22516\n22517\n22518\n22519\n22520\n22521\n22522\n22523\n22524\n22525\n22526\n22527\n22528\n22529\n22530\n22531\n22532\n22533\n22534\n22535\n22536\n22537\n22538\n22539\n22540\n22541\n22542\n22543\n22544\n22545\n22546\n22547\n22548\n22549\n22550\n22551\n22552\n22553\n22554\n22555\n22556\n22557\n22558\n22559\n22560\n22561\n22562\n22563\n22564\n22565\n22566\n22567\n22568\n22569\n22570\n22571\n22572\n22573\n22574\n22575\n22576\n22577\n22578\n22579\n22580\n22581\n22582\n22583\n22584\n22585\n22586\n22587\n22588\n22589\n22590\n22591\n22592\n22593\n22594\n22595\n22596\n22597\n22598\n22599\n22600\n22601\n22602\n22603\n22604\n22605\n22606\n22607\n22608\n22609\n22610\n22611\n22612\n22613\n22614\n22615\n22616\n22617\n22618\n22619\n22620\n22621\n22622\n22623\n22624\n22625\n22626\n22627\n22628\n22629\n22630\n22631\n22632\n22633\n22634\n22635\n22636\n22637\n22638\n22639\n22640\n22641\n22642\n22643\n22644\n22645\n22646\n22647\n22648\n22649\n22650\n22651\n22652\n22653\n22654\n22655\n22656\n22657\n22658\n22659\n22660\n22661\n22662\n22663\n22664\n22665\n22666\n22667\n22668\n22669\n22670\n22671\n22672\n22673\n22674\n22675\n22676\n22677\n22678\n22679\n22680\n22681\n22682\n22683\n22684\n22685\n22686\n22687\n22688\n22689\n22690\n22691\n22692\n22693\n22694\n22695\n22696\n22697\n22698\n22699\n22700\n22701\n22702\n22703\n22704\n22705\n22706\n22707\n22708\n22709\n22710\n22711\n22712\n22713\n22714\n22715\n22716\n22717\n22718\n22719\n22720\n22721\n22722\n22723\n22724\n22725\n22726\n22727\n22728\n22729\n22730\n22731\n22732\n22733\n22734\n22735\n22736\n22737\n22738\n22739\n22740\n22741\n22742\n22743\n22744\n22745\n22746\n22747\n22748\n22749\n22750\n22751\n22752\n22753\n22754\n22755\n22756\n22757\n22758\n22759\n22760\n22761\n22762\n22763\n22764\n22765\n22766\n22767\n22768\n22769\n22770\n22771\n22772\n22773\n22774\n22775\n22776\n22777\n22778\n22779\n22780\n22781\n22782\n22783\n22784\n22785\n22786\n22787\n22788\n22789\n22790\n22791\n22792\n22793\n22794\n22795\n22796\n22797\n22798\n22799\n22800\n22801\n22802\n22803\n22804\n22805\n22806\n22807\n22808\n22809\n22810\n22811\n22812\n22813\n22814\n22815\n22816\n22817\n22818\n22819\n22820\n22821\n22822\n22823\n22824\n22825\n22826\n22827\n22828\n22829\n22830\n22831\n22832\n22833\n22834\n22835\n22836\n22837\n22838\n22839\n22840\n22841\n22842\n22843\n22844\n22845\n22846\n22847\n22848\n22849\n22850\n22851\n22852\n22853\n22854\n22855\n22856\n22857\n22858\n22859\n22860\n22861\n22862\n22863\n22864\n22865\n22866\n22867\n22868\n22869\n22870\n22871\n22872\n22873\n22874\n22875\n22876\n22877\n22878\n22879\n22880\n22881\n22882\n22883\n22884\n22885\n22886\n22887\n22888\n22889\n22890\n22891\n22892\n22893\n22894\n22895\n22896\n22897\n22898\n22899\n22900\n22901\n22902\n22903\n22904\n22905\n22906\n22907\n22908\n22909\n22910\n22911\n22912\n22913\n22914\n22915\n22916\n22917\n22918\n22919\n22920\n22921\n22922\n22923\n22924\n22925\n22926\n22927\n22928\n22929\n22930\n22931\n22932\n22933\n22934\n22935\n22936\n22937\n22938\n22939\n22940\n22941\n22942\n22943\n22944\n22945\n22946\n22947\n22948\n22949\n22950\n22951\n22952\n22953\n22954\n22955\n22956\n22957\n22958\n22959\n22960\n22961\n22962\n22963\n22964\n22965\n22966\n22967\n22968\n22969\n22970\n22971\n22972\n22973\n22974\n22975\n22976\n22977\n22978\n22979\n22980\n22981\n22982\n22983\n22984\n22985\n22986\n22987\n22988\n22989\n22990\n22991\n22992\n22993\n22994\n22995\n22996\n22997\n22998\n22999\n23000\n23001\n23002\n23003\n23004\n23005\n23006\n23007\n23008\n23009\n23010\n23011\n23012\n23013\n23014\n23015\n23016\n23017\n23018\n23019\n23020\n23021\n23022\n23023\n23024\n23025\n23026\n23027\n23028\n23029\n23030\n23031\n23032\n23033\n23034\n23035\n23036\n23037\n23038\n23039\n23040\n23041\n23042\n23043\n23044\n23045\n23046\n23047\n23048\n23049\n23050\n23051\n23052\n23053\n23054\n23055\n23056\n23057\n23058\n23059\n23060\n23061\n23062\n23063\n23064\n23065\n23066\n23067\n23068\n23069\n23070\n23071\n23072\n23073\n23074\n23075\n23076\n23077\n23078\n23079\n23080\n23081\n23082\n23083\n23084\n23085\n23086\n23087\n23088\n23089\n23090\n23091\n23092\n23093\n23094\n23095\n23096\n23097\n23098\n23099\n23100\n23101\n23102\n23103\n23104\n23105\n23106\n23107\n23108\n23109\n23110\n23111\n23112\n23113\n23114\n23115\n23116\n23117\n23118\n23119\n23120\n23121\n23122\n23123\n23124\n23125\n23126\n23127\n23128\n23129\n23130\n23131\n23132\n23133\n23134\n23135\n23136\n23137\n23138\n23139\n23140\n23141\n23142\n23143\n23144\n23145\n23146\n23147\n23148\n23149\n23150\n23151\n23152\n23153\n23154\n23155\n23156\n23157\n23158\n23159\n23160\n23161\n23162\n23163\n23164\n23165\n23166\n23167\n23168\n23169\n23170\n23171\n23172\n23173\n23174\n23175\n23176\n23177\n23178\n23179\n23180\n23181\n23182\n23183\n23184\n23185\n23186\n23187\n23188\n23189\n23190\n23191\n23192\n23193\n23194\n23195\n23196\n23197\n23198\n23199\n23200\n23201\n23202\n23203\n23204\n23205\n23206\n23207\n23208\n23209\n23210\n23211\n23212\n23213\n23214\n23215\n23216\n23217\n23218\n23219\n23220\n23221\n23222\n23223\n23224\n23225\n23226\n23227\n23228\n23229\n23230\n23231\n23232\n23233\n23234\n23235\n23236\n23237\n23238\n23239\n23240\n23241\n23242\n23243\n23244\n23245\n23246\n23247\n23248\n23249\n23250\n23251\n23252\n23253\n23254\n23255\n23256\n23257\n23258\n23259\n23260\n23261\n23262\n23263\n23264\n23265\n23266\n23267\n23268\n23269\n23270\n23271\n23272\n23273\n23274\n23275\n23276\n23277\n23278\n23279\n23280\n23281\n23282\n23283\n23284\n23285\n23286\n23287\n23288\n23289\n23290\n23291\n23292\n23293\n23294\n23295\n23296\n23297\n23298\n23299\n23300\n23301\n23302\n23303\n23304\n23305\n23306\n23307\n23308\n23309\n23310\n23311\n23312\n23313\n23314\n23315\n23316\n23317\n23318\n23319\n23320\n23321\n23322\n23323\n23324\n23325\n23326\n23327\n23328\n23329\n23330\n23331\n23332\n23333\n23334\n23335\n23336\n23337\n23338\n23339\n23340\n23341\n23342\n23343\n23344\n23345\n23346\n23347\n23348\n23349\n23350\n23351\n23352\n23353\n23354\n23355\n23356\n23357\n23358\n23359\n23360\n23361\n23362\n23363\n23364\n23365\n23366\n23367\n23368\n23369\n23370\n23371\n23372\n23373\n23374\n23375\n23376\n23377\n23378\n23379\n23380\n23381\n23382\n23383\n23384\n23385\n23386\n23387\n23388\n23389\n23390\n23391\n23392\n23393\n23394\n23395\n23396\n23397\n23398\n23399\n23400\n23401\n23402\n23403\n23404\n23405\n23406\n23407\n23408\n23409\n23410\n23411\n23412\n23413\n23414\n23415\n23416\n23417\n23418\n23419\n23420\n23421\n23422\n23423\n23424\n23425\n23426\n23427\n23428\n23429\n23430\n23431\n23432\n23433\n23434\n23435\n23436\n23437\n23438\n23439\n23440\n23441\n23442\n23443\n23444\n23445\n23446\n23447\n23448\n23449\n23450\n23451\n23452\n23453\n23454\n23455\n23456\n23457\n23458\n23459\n23460\n23461\n23462\n23463\n23464\n23465\n23466\n23467\n23468\n23469\n23470\n23471\n23472\n23473\n23474\n23475\n23476\n23477\n23478\n23479\n23480\n23481\n23482\n23483\n23484\n23485\n23486\n23487\n23488\n23489\n23490\n23491\n23492\n23493\n23494\n23495\n23496\n23497\n23498\n23499\n23500\n23501\n23502\n23503\n23504\n23505\n23506\n23507\n23508\n23509\n23510\n23511\n23512\n23513\n23514\n23515\n23516\n23517\n23518\n23519\n23520\n23521\n23522\n23523\n23524\n23525\n23526\n23527\n23528\n23529\n23530\n23531\n23532\n23533\n23534\n23535\n23536\n23537\n23538\n23539\n23540\n23541\n23542\n23543\n23544\n23545\n23546\n23547\n23548\n23549\n23550\n23551\n23552\n23553\n23554\n23555\n23556\n23557\n23558\n23559\n23560\n23561\n23562\n23563\n23564\n23565\n23566\n23567\n23568\n23569\n23570\n23571\n23572\n23573\n23574\n23575\n23576\n23577\n23578\n23579\n23580\n23581\n23582\n23583\n23584\n23585\n23586\n23587\n23588\n23589\n23590\n23591\n23592\n23593\n23594\n23595\n23596\n23597\n23598\n23599\n23600\n23601\n23602\n23603\n23604\n23605\n23606\n23607\n23608\n23609\n23610\n23611\n23612\n23613\n23614\n23615\n23616\n23617\n23618\n23619\n23620\n23621\n23622\n23623\n23624\n23625\n23626\n23627\n23628\n23629\n23630\n23631\n23632\n23633\n23634\n23635\n23636\n23637\n23638\n23639\n23640\n23641\n23642\n23643\n23644\n23645\n23646\n23647\n23648\n23649\n23650\n23651\n23652\n23653\n23654\n23655\n23656\n23657\n23658\n23659\n23660\n23661\n23662\n23663\n23664\n23665\n23666\n23667\n23668\n23669\n23670\n23671\n23672\n23673\n23674\n23675\n23676\n23677\n23678\n23679\n23680\n23681\n23682\n23683\n23684\n23685\n23686\n23687\n23688\n23689\n23690\n23691\n23692\n23693\n23694\n23695\n23696\n23697\n23698\n23699\n23700\n23701\n23702\n23703\n23704\n23705\n23706\n23707\n23708\n23709\n23710\n23711\n23712\n23713\n23714\n23715\n23716\n23717\n23718\n23719\n23720\n23721\n23722\n23723\n23724\n23725\n23726\n23727\n23728\n23729\n23730\n23731\n23732\n23733\n23734\n23735\n23736\n23737\n23738\n23739\n23740\n23741\n23742\n23743\n23744\n23745\n23746\n23747\n23748\n23749\n23750\n23751\n23752\n23753\n23754\n23755\n23756\n23757\n23758\n23759\n23760\n23761\n23762\n23763\n23764\n23765\n23766\n23767\n23768\n23769\n23770\n23771\n23772\n23773\n23774\n23775\n23776\n23777\n23778\n23779\n23780\n23781\n23782\n23783\n23784\n23785\n23786\n23787\n23788\n23789\n23790\n23791\n23792\n23793\n23794\n23795\n23796\n23797\n23798\n23799\n23800\n23801\n23802\n23803\n23804\n23805\n23806\n23807\n23808\n23809\n23810\n23811\n23812\n23813\n23814\n23815\n23816\n23817\n23818\n23819\n23820\n23821\n23822\n23823\n23824\n23825\n23826\n23827\n23828\n23829\n23830\n23831\n23832\n23833\n23834\n23835\n23836\n23837\n23838\n23839\n23840\n23841\n23842\n23843\n23844\n23845\n23846\n23847\n23848\n23849\n23850\n23851\n23852\n23853\n23854\n23855\n23856\n23857\n23858\n23859\n23860\n23861\n23862\n23863\n23864\n23865\n23866\n23867\n23868\n23869\n23870\n23871\n23872\n23873\n23874\n23875\n23876\n23877\n23878\n23879\n23880\n23881\n23882\n23883\n23884\n23885\n23886\n23887\n23888\n23889\n23890\n23891\n23892\n23893\n23894\n23895\n23896\n23897\n23898\n23899\n23900\n23901\n23902\n23903\n23904\n23905\n23906\n23907\n23908\n23909\n23910\n23911\n23912\n23913\n23914\n23915\n23916\n23917\n23918\n23919\n23920\n23921\n23922\n23923\n23924\n23925\n23926\n23927\n23928\n23929\n23930\n23931\n23932\n23933\n23934\n23935\n23936\n23937\n23938\n23939\n23940\n23941\n23942\n23943\n23944\n23945\n23946\n23947\n23948\n23949\n23950\n23951\n23952\n23953\n23954\n23955\n23956\n23957\n23958\n23959\n23960\n23961\n23962\n23963\n23964\n23965\n23966\n23967\n23968\n23969\n23970\n23971\n23972\n23973\n23974\n23975\n23976\n23977\n23978\n23979\n23980\n23981\n23982\n23983\n23984\n23985\n23986\n23987\n23988\n23989\n23990\n23991\n23992\n23993\n23994\n23995\n23996\n23997\n23998\n23999\n24000\n24001\n24002\n24003\n24004\n24005\n24006\n24007\n24008\n24009\n24010\n24011\n24012\n24013\n24014\n24015\n24016\n24017\n24018\n24019\n24020\n24021\n24022\n24023\n24024\n24025\n24026\n24027\n24028\n24029\n24030\n24031\n24032\n24033\n24034\n24035\n24036\n24037\n24038\n24039\n24040\n24041\n24042\n24043\n24044\n24045\n24046\n24047\n24048\n24049\n24050\n24051\n24052\n24053\n24054\n24055\n24056\n24057\n24058\n24059\n24060\n24061\n24062\n24063\n24064\n24065\n24066\n24067\n24068\n24069\n24070\n24071\n24072\n24073\n24074\n24075\n24076\n24077\n24078\n24079\n24080\n24081\n24082\n24083\n24084\n24085\n24086\n24087\n24088\n24089\n24090\n24091\n24092\n24093\n24094\n24095\n24096\n24097\n24098\n24099\n24100\n24101\n24102\n24103\n24104\n24105\n24106\n24107\n24108\n24109\n24110\n24111\n24112\n24113\n24114\n24115\n24116\n24117\n24118\n24119\n24120\n24121\n24122\n24123\n24124\n24125\n24126\n24127\n24128\n24129\n24130\n24131\n24132\n24133\n24134\n24135\n24136\n24137\n24138\n24139\n24140\n24141\n24142\n24143\n24144\n24145\n24146\n24147\n24148\n24149\n24150\n24151\n24152\n24153\n24154\n24155\n24156\n24157\n24158\n24159\n24160\n24161\n24162\n24163\n24164\n24165\n24166\n24167\n24168\n24169\n24170\n24171\n24172\n24173\n24174\n24175\n24176\n24177\n24178\n24179\n24180\n24181\n24182\n24183\n24184\n24185\n24186\n24187\n24188\n24189\n24190\n24191\n24192\n24193\n24194\n24195\n24196\n24197\n24198\n24199\n24200\n24201\n24202\n24203\n24204\n24205\n24206\n24207\n24208\n24209\n24210\n24211\n24212\n24213\n24214\n24215\n24216\n24217\n24218\n24219\n24220\n24221\n24222\n24223\n24224\n24225\n24226\n24227\n24228\n24229\n24230\n24231\n24232\n24233\n24234\n24235\n24236\n24237\n24238\n24239\n24240\n24241\n24242\n24243\n24244\n24245\n24246\n24247\n24248\n24249\n24250\n24251\n24252\n24253\n24254\n24255\n24256\n24257\n24258\n24259\n24260\n24261\n24262\n24263\n24264\n24265\n24266\n24267\n24268\n24269\n24270\n24271\n24272\n24273\n24274\n24275\n24276\n24277\n24278\n24279\n24280\n24281\n24282\n24283\n24284\n24285\n24286\n24287\n24288\n24289\n24290\n24291\n24292\n24293\n24294\n24295\n24296\n24297\n24298\n24299\n24300\n24301\n24302\n24303\n24304\n24305\n24306\n24307\n24308\n24309\n24310\n24311\n24312\n24313\n24314\n24315\n24316\n24317\n24318\n24319\n24320\n24321\n24322\n24323\n24324\n24325\n24326\n24327\n24328\n24329\n24330\n24331\n24332\n24333\n24334\n24335\n24336\n24337\n24338\n24339\n24340\n24341\n24342\n24343\n24344\n24345\n24346\n24347\n24348\n24349\n24350\n24351\n24352\n24353\n24354\n24355\n24356\n24357\n24358\n24359\n24360\n24361\n24362\n24363\n24364\n24365\n24366\n24367\n24368\n24369\n24370\n24371\n24372\n24373\n24374\n24375\n24376\n24377\n24378\n24379\n24380\n24381\n24382\n24383\n24384\n24385\n24386\n24387\n24388\n24389\n24390\n24391\n24392\n24393\n24394\n24395\n24396\n24397\n24398\n24399\n24400\n24401\n24402\n24403\n24404\n24405\n24406\n24407\n24408\n24409\n24410\n24411\n24412\n24413\n24414\n24415\n24416\n24417\n24418\n24419\n24420\n24421\n24422\n24423\n24424\n24425\n24426\n24427\n24428\n24429\n24430\n24431\n24432\n24433\n24434\n24435\n24436\n24437\n24438\n24439\n24440\n24441\n24442\n24443\n24444\n24445\n24446\n24447\n24448\n24449\n24450\n24451\n24452\n24453\n24454\n24455\n24456\n24457\n24458\n24459\n24460\n24461\n24462\n24463\n24464\n24465\n24466\n24467\n24468\n24469\n24470\n24471\n24472\n24473\n24474\n24475\n24476\n24477\n24478\n24479\n24480\n24481\n24482\n24483\n24484\n24485\n24486\n24487\n24488\n24489\n24490\n24491\n24492\n24493\n24494\n24495\n24496\n24497\n24498\n24499\n24500\n24501\n24502\n24503\n24504\n24505\n24506\n24507\n24508\n24509\n24510\n24511\n24512\n24513\n24514\n24515\n24516\n24517\n24518\n24519\n24520\n24521\n24522\n24523\n24524\n24525\n24526\n24527\n24528\n24529\n24530\n24531\n24532\n24533\n24534\n24535\n24536\n24537\n24538\n24539\n24540\n24541\n24542\n24543\n24544\n24545\n24546\n24547\n24548\n24549\n24550\n24551\n24552\n24553\n24554\n24555\n24556\n24557\n24558\n24559\n24560\n24561\n24562\n24563\n24564\n24565\n24566\n24567\n24568\n24569\n24570\n24571\n24572\n24573\n24574\n24575\n24576\n24577\n24578\n24579\n24580\n24581\n24582\n24583\n24584\n24585\n24586\n24587\n24588\n24589\n24590\n24591\n24592\n24593\n24594\n24595\n24596\n24597\n24598\n24599\n24600\n24601\n24602\n24603\n24604\n24605\n24606\n24607\n24608\n24609\n24610\n24611\n24612\n24613\n24614\n24615\n24616\n24617\n24618\n24619\n24620\n24621\n24622\n24623\n24624\n24625\n24626\n24627\n24628\n24629\n24630\n24631\n24632\n24633\n24634\n24635\n24636\n24637\n24638\n24639\n24640\n24641\n24642\n24643\n24644\n24645\n24646\n24647\n24648\n24649\n24650\n24651\n24652\n24653\n24654\n24655\n24656\n24657\n24658\n24659\n24660\n24661\n24662\n24663\n24664\n24665\n24666\n24667\n24668\n24669\n24670\n24671\n24672\n24673\n24674\n24675\n24676\n24677\n24678\n24679\n24680\n24681\n24682\n24683\n24684\n24685\n24686\n24687\n24688\n24689\n24690\n24691\n24692\n24693\n24694\n24695\n24696\n24697\n24698\n24699\n24700\n24701\n24702\n24703\n24704\n24705\n24706\n24707\n24708\n24709\n24710\n24711\n24712\n24713\n24714\n24715\n24716\n24717\n24718\n24719\n24720\n24721\n24722\n24723\n24724\n24725\n24726\n24727\n24728\n24729\n24730\n24731\n24732\n24733\n24734\n24735\n24736\n24737\n24738\n24739\n24740\n24741\n24742\n24743\n24744\n24745\n24746\n24747\n24748\n24749\n24750\n24751\n24752\n24753\n24754\n24755\n24756\n24757\n24758\n24759\n24760\n24761\n24762\n24763\n24764\n24765\n24766\n24767\n24768\n24769\n24770\n24771\n24772\n24773\n24774\n24775\n24776\n24777\n24778\n24779\n24780\n24781\n24782\n24783\n24784\n24785\n24786\n24787\n24788\n24789\n24790\n24791\n24792\n24793\n24794\n24795\n24796\n24797\n24798\n24799\n24800\n24801\n24802\n24803\n24804\n24805\n24806\n24807\n24808\n24809\n24810\n24811\n24812\n24813\n24814\n24815\n24816\n24817\n24818\n24819\n24820\n24821\n24822\n24823\n24824\n24825\n24826\n24827\n24828\n24829\n24830\n24831\n24832\n24833\n24834\n24835\n24836\n24837\n24838\n24839\n24840\n24841\n24842\n24843\n24844\n24845\n24846\n24847\n24848\n24849\n24850\n24851\n24852\n24853\n24854\n24855\n24856\n24857\n24858\n24859\n24860\n24861\n24862\n24863\n24864\n24865\n24866\n24867\n24868\n24869\n24870\n24871\n24872\n24873\n24874\n24875\n24876\n24877\n24878\n24879\n24880\n24881\n24882\n24883\n24884\n24885\n24886\n24887\n24888\n24889\n24890\n24891\n24892\n24893\n24894\n24895\n24896\n24897\n24898\n24899\n24900\n24901\n24902\n24903\n24904\n24905\n24906\n24907\n24908\n24909\n24910\n24911\n24912\n24913\n24914\n24915\n24916\n24917\n24918\n24919\n24920\n24921\n24922\n24923\n24924\n24925\n24926\n24927\n24928\n24929\n24930\n24931\n24932\n24933\n24934\n24935\n24936\n24937\n24938\n24939\n24940\n24941\n24942\n24943\n24944\n24945\n24946\n24947\n24948\n24949\n24950\n24951\n24952\n24953\n24954\n24955\n24956\n24957\n24958\n24959\n24960\n24961\n24962\n24963\n24964\n24965\n24966\n24967\n24968\n24969\n24970\n24971\n24972\n24973\n24974\n24975\n24976\n24977\n24978\n24979\n24980\n24981\n24982\n24983\n24984\n24985\n24986\n24987\n24988\n24989\n24990\n24991\n24992\n24993\n24994\n24995\n24996\n24997\n24998\n24999\n25000\n25001\n25002\n25003\n25004\n25005\n25006\n25007\n25008\n25009\n25010\n25011\n25012\n25013\n25014\n25015\n25016\n25017\n25018\n25019\n25020\n25021\n25022\n25023\n25024\n25025\n25026\n25027\n25028\n25029\n25030\n25031\n25032\n25033\n25034\n25035\n25036\n25037\n25038\n25039\n25040\n25041\n25042\n25043\n25044\n25045\n25046\n25047\n25048\n25049\n25050\n25051\n25052\n25053\n25054\n25055\n25056\n25057\n25058\n25059\n25060\n25061\n25062\n25063\n25064\n25065\n25066\n25067\n25068\n25069\n25070\n25071\n25072\n25073\n25074\n25075\n25076\n25077\n25078\n25079\n25080\n25081\n25082\n25083\n25084\n25085\n25086\n25087\n25088\n25089\n25090\n25091\n25092\n25093\n25094\n25095\n25096\n25097\n25098\n25099\n25100\n25101\n25102\n25103\n25104\n25105\n25106\n25107\n25108\n25109\n25110\n25111\n25112\n25113\n25114\n25115\n25116\n25117\n25118\n25119\n25120\n25121\n25122\n25123\n25124\n25125\n25126\n25127\n25128\n25129\n25130\n25131\n25132\n25133\n25134\n25135\n25136\n25137\n25138\n25139\n25140\n25141\n25142\n25143\n25144\n25145\n25146\n25147\n25148\n25149\n25150\n25151\n25152\n25153\n25154\n25155\n25156\n25157\n25158\n25159\n25160\n25161\n25162\n25163\n25164\n25165\n25166\n25167\n25168\n25169\n25170\n25171\n25172\n25173\n25174\n25175\n25176\n25177\n25178\n25179\n25180\n25181\n25182\n25183\n25184\n25185\n25186\n25187\n25188\n25189\n25190\n25191\n25192\n25193\n25194\n25195\n25196\n25197\n25198\n25199\n25200\n25201\n25202\n25203\n25204\n25205\n25206\n25207\n25208\n25209\n25210\n25211\n25212\n25213\n25214\n25215\n25216\n25217\n25218\n25219\n25220\n25221\n25222\n25223\n25224\n25225\n25226\n25227\n25228\n25229\n25230\n25231\n25232\n25233\n25234\n25235\n25236\n25237\n25238\n25239\n25240\n25241\n25242\n25243\n25244\n25245\n25246\n25247\n25248\n25249\n25250\n25251\n25252\n25253\n25254\n25255\n25256\n25257\n25258\n25259\n25260\n25261\n25262\n25263\n25264\n25265\n25266\n25267\n25268\n25269\n25270\n25271\n25272\n25273\n25274\n25275\n25276\n25277\n25278\n25279\n25280\n25281\n25282\n25283\n25284\n25285\n25286\n25287\n25288\n25289\n25290\n25291\n25292\n25293\n25294\n25295\n25296\n25297\n25298\n25299\n25300\n25301\n25302\n25303\n25304\n25305\n25306\n25307\n25308\n25309\n25310\n25311\n25312\n25313\n25314\n25315\n25316\n25317\n25318\n25319\n25320\n25321\n25322\n25323\n25324\n25325\n25326\n25327\n25328\n25329\n25330\n25331\n25332\n25333\n25334\n25335\n25336\n25337\n25338\n25339\n25340\n25341\n25342\n25343\n25344\n25345\n25346\n25347\n25348\n25349\n25350\n25351\n25352\n25353\n25354\n25355\n25356\n25357\n25358\n25359\n25360\n25361\n25362\n25363\n25364\n25365\n25366\n25367\n25368\n25369\n25370\n25371\n25372\n25373\n25374\n25375\n25376\n25377\n25378\n25379\n25380\n25381\n25382\n25383\n25384\n25385\n25386\n25387\n25388\n25389\n25390\n25391\n25392\n25393\n25394\n25395\n25396\n25397\n25398\n25399\n25400\n25401\n25402\n25403\n25404\n25405\n25406\n25407\n25408\n25409\n25410\n25411\n25412\n25413\n25414\n25415\n25416\n25417\n25418\n25419\n25420\n25421\n25422\n25423\n25424\n25425\n25426\n25427\n25428\n25429\n25430\n25431\n25432\n25433\n25434\n25435\n25436\n25437\n25438\n25439\n25440\n25441\n25442\n25443\n25444\n25445\n25446\n25447\n25448\n25449\n25450\n25451\n25452\n25453\n25454\n25455\n25456\n25457\n25458\n25459\n25460\n25461\n25462\n25463\n25464\n25465\n25466\n25467\n25468\n25469\n25470\n25471\n25472\n25473\n25474\n25475\n25476\n25477\n25478\n25479\n25480\n25481\n25482\n25483\n25484\n25485\n25486\n25487\n25488\n25489\n25490\n25491\n25492\n25493\n25494\n25495\n25496\n25497\n25498\n25499\n25500\n25501\n25502\n25503\n25504\n25505\n25506\n25507\n25508\n25509\n25510\n25511\n25512\n25513\n25514\n25515\n25516\n25517\n25518\n25519\n25520\n25521\n25522\n25523\n25524\n25525\n25526\n25527\n25528\n25529\n25530\n25531\n25532\n25533\n25534\n25535\n25536\n25537\n25538\n25539\n25540\n25541\n25542\n25543\n25544\n25545\n25546\n25547\n25548\n25549\n25550\n25551\n25552\n25553\n25554\n25555\n25556\n25557\n25558\n25559\n25560\n25561\n25562\n25563\n25564\n25565\n25566\n25567\n25568\n25569\n25570\n25571\n25572\n25573\n25574\n25575\n25576\n25577\n25578\n25579\n25580\n25581\n25582\n25583\n25584\n25585\n25586\n25587\n25588\n25589\n25590\n25591\n25592\n25593\n25594\n25595\n25596\n25597\n25598\n25599\n25600\n25601\n25602\n25603\n25604\n25605\n25606\n25607\n25608\n25609\n25610\n25611\n25612\n25613\n25614\n25615\n25616\n25617\n25618\n25619\n25620\n25621\n25622\n25623\n25624\n25625\n25626\n25627\n25628\n25629\n25630\n25631\n25632\n25633\n25634\n25635\n25636\n25637\n25638\n25639\n25640\n25641\n25642\n25643\n25644\n25645\n25646\n25647\n25648\n25649\n25650\n25651\n25652\n25653\n25654\n25655\n25656\n25657\n25658\n25659\n25660\n25661\n25662\n25663\n25664\n25665\n25666\n25667\n25668\n25669\n25670\n25671\n25672\n25673\n25674\n25675\n25676\n25677\n25678\n25679\n25680\n25681\n25682\n25683\n25684\n25685\n25686\n25687\n25688\n25689\n25690\n25691\n25692\n25693\n25694\n25695\n25696\n25697\n25698\n25699\n25700\n25701\n25702\n25703\n25704\n25705\n25706\n25707\n25708\n25709\n25710\n25711\n25712\n25713\n25714\n25715\n25716\n25717\n25718\n25719\n25720\n25721\n25722\n25723\n25724\n25725\n25726\n25727\n25728\n25729\n25730\n25731\n25732\n25733\n25734\n25735\n25736\n25737\n25738\n25739\n25740\n25741\n25742\n25743\n25744\n25745\n25746\n25747\n25748\n25749\n25750\n25751\n25752\n25753\n25754\n25755\n25756\n25757\n25758\n25759\n25760\n25761\n25762\n25763\n25764\n25765\n25766\n25767\n25768\n25769\n25770\n25771\n25772\n25773\n25774\n25775\n25776\n25777\n25778\n25779\n25780\n25781\n25782\n25783\n25784\n25785\n25786\n25787\n25788\n25789\n25790\n25791\n25792\n25793\n25794\n25795\n25796\n25797\n25798\n25799\n25800\n25801\n25802\n25803\n25804\n25805\n25806\n25807\n25808\n25809\n25810\n25811\n25812\n25813\n25814\n25815\n25816\n25817\n25818\n25819\n25820\n25821\n25822\n25823\n25824\n25825\n25826\n25827\n25828\n25829\n25830\n25831\n25832\n25833\n25834\n25835\n25836\n25837\n25838\n25839\n25840\n25841\n25842\n25843\n25844\n25845\n25846\n25847\n25848\n25849\n25850\n25851\n25852\n25853\n25854\n25855\n25856\n25857\n25858\n25859\n25860\n25861\n25862\n25863\n25864\n25865\n25866\n25867\n25868\n25869\n25870\n25871\n25872\n25873\n25874\n25875\n25876\n25877\n25878\n25879\n25880\n25881\n25882\n25883\n25884\n25885\n25886\n25887\n25888\n25889\n25890\n25891\n25892\n25893\n25894\n25895\n25896\n25897\n25898\n25899\n25900\n25901\n25902\n25903\n25904\n25905\n25906\n25907\n25908\n25909\n25910\n25911\n25912\n25913\n25914\n25915\n25916\n25917\n25918\n25919\n25920\n25921\n25922\n25923\n25924\n25925\n25926\n25927\n25928\n25929\n25930\n25931\n25932\n25933\n25934\n25935\n25936\n25937\n25938\n25939\n25940\n25941\n25942\n25943\n25944\n25945\n25946\n25947\n25948\n25949\n25950\n25951\n25952\n25953\n25954\n25955\n25956\n25957\n25958\n25959\n25960\n25961\n25962\n25963\n25964\n25965\n25966\n25967\n25968\n25969\n25970\n25971\n25972\n25973\n25974\n25975\n25976\n25977\n25978\n25979\n25980\n25981\n25982\n25983\n25984\n25985\n25986\n25987\n25988\n25989\n25990\n25991\n25992\n25993\n25994\n25995\n25996\n25997\n25998\n25999\n26000\n26001\n26002\n26003\n26004\n26005\n26006\n26007\n26008\n26009\n26010\n26011\n26012\n26013\n26014\n26015\n26016\n26017\n26018\n26019\n26020\n26021\n26022\n26023\n26024\n26025\n26026\n26027\n26028\n26029\n26030\n26031\n26032\n26033\n26034\n26035\n26036\n26037\n26038\n26039\n26040\n26041\n26042\n26043\n26044\n26045\n26046\n26047\n26048\n26049\n26050\n26051\n26052\n26053\n26054\n26055\n26056\n26057\n26058\n26059\n26060\n26061\n26062\n26063\n26064\n26065\n26066\n26067\n26068\n26069\n26070\n26071\n26072\n26073\n26074\n26075\n26076\n26077\n26078\n26079\n26080\n26081\n26082\n26083\n26084\n26085\n26086\n26087\n26088\n26089\n26090\n26091\n26092\n26093\n26094\n26095\n26096\n26097\n26098\n26099\n26100\n26101\n26102\n26103\n26104\n26105\n26106\n26107\n26108\n26109\n26110\n26111\n26112\n26113\n26114\n26115\n26116\n26117\n26118\n26119\n26120\n26121\n26122\n26123\n26124\n26125\n26126\n26127\n26128\n26129\n26130\n26131\n26132\n26133\n26134\n26135\n26136\n26137\n26138\n26139\n26140\n26141\n26142\n26143\n26144\n26145\n26146\n26147\n26148\n26149\n26150\n26151\n26152\n26153\n26154\n26155\n26156\n26157\n26158\n26159\n26160\n26161\n26162\n26163\n26164\n26165\n26166\n26167\n26168\n26169\n26170\n26171\n26172\n26173\n26174\n26175\n26176\n26177\n26178\n26179\n26180\n26181\n26182\n26183\n26184\n26185\n26186\n26187\n26188\n26189\n26190\n26191\n26192\n26193\n26194\n26195\n26196\n26197\n26198\n26199\n26200\n26201\n26202\n26203\n26204\n26205\n26206\n26207\n26208\n26209\n26210\n26211\n26212\n26213\n26214\n26215\n26216\n26217\n26218\n26219\n26220\n26221\n26222\n26223\n26224\n26225\n26226\n26227\n26228\n26229\n26230\n26231\n26232\n26233\n26234\n26235\n26236\n26237\n26238\n26239\n26240\n26241\n26242\n26243\n26244\n26245\n26246\n26247\n26248\n26249\n26250\n26251\n26252\n26253\n26254\n26255\n26256\n26257\n26258\n26259\n26260\n26261\n26262\n26263\n26264\n26265\n26266\n26267\n26268\n26269\n26270\n26271\n26272\n26273\n26274\n26275\n26276\n26277\n26278\n26279\n26280\n26281\n26282\n26283\n26284\n26285\n26286\n26287\n26288\n26289\n26290\n26291\n26292\n26293\n26294\n26295\n26296\n26297\n26298\n26299\n26300\n26301\n26302\n26303\n26304\n26305\n26306\n26307\n26308\n26309\n26310\n26311\n26312\n26313\n26314\n26315\n26316\n26317\n26318\n26319\n26320\n26321\n26322\n26323\n26324\n26325\n26326\n26327\n26328\n26329\n26330\n26331\n26332\n26333\n26334\n26335\n26336\n26337\n26338\n26339\n26340\n26341\n26342\n26343\n26344\n26345\n26346\n26347\n26348\n26349\n26350\n26351\n26352\n26353\n26354\n26355\n26356\n26357\n26358\n26359\n26360\n26361\n26362\n26363\n26364\n26365\n26366\n26367\n26368\n26369\n26370\n26371\n26372\n26373\n26374\n26375\n26376\n26377\n26378\n26379\n26380\n26381\n26382\n26383\n26384\n26385\n26386\n26387\n26388\n26389\n26390\n26391\n26392\n26393\n26394\n26395\n26396\n26397\n26398\n26399\n26400\n26401\n26402\n26403\n26404\n26405\n26406\n26407\n26408\n26409\n26410\n26411\n26412\n26413\n26414\n26415\n26416\n26417\n26418\n26419\n26420\n26421\n26422\n26423\n26424\n26425\n26426\n26427\n26428\n26429\n26430\n26431\n26432\n26433\n26434\n26435\n26436\n26437\n26438\n26439\n26440\n26441\n26442\n26443\n26444\n26445\n26446\n26447\n26448\n26449\n26450\n26451\n26452\n26453\n26454\n26455\n26456\n26457\n26458\n26459\n26460\n26461\n26462\n26463\n26464\n26465\n26466\n26467\n26468\n26469\n26470\n26471\n26472\n26473\n26474\n26475\n26476\n26477\n26478\n26479\n26480\n26481\n26482\n26483\n26484\n26485\n26486\n26487\n26488\n26489\n26490\n26491\n26492\n26493\n26494\n26495\n26496\n26497\n26498\n26499\n26500\n26501\n26502\n26503\n26504\n26505\n26506\n26507\n26508\n26509\n26510\n26511\n26512\n26513\n26514\n26515\n26516\n26517\n26518\n26519\n26520\n26521\n26522\n26523\n26524\n26525\n26526\n26527\n26528\n26529\n26530\n26531\n26532\n26533\n26534\n26535\n26536\n26537\n26538\n26539\n26540\n26541\n26542\n26543\n26544\n26545\n26546\n26547\n26548\n26549\n26550\n26551\n26552\n26553\n26554\n26555\n26556\n26557\n26558\n26559\n26560\n26561\n26562\n26563\n26564\n26565\n26566\n26567\n26568\n26569\n26570\n26571\n26572\n26573\n26574\n26575\n26576\n26577\n26578\n26579\n26580\n26581\n26582\n26583\n26584\n26585\n26586\n26587\n26588\n26589\n26590\n26591\n26592\n26593\n26594\n26595\n26596\n26597\n26598\n26599\n26600\n26601\n26602\n26603\n26604\n26605\n26606\n26607\n26608\n26609\n26610\n26611\n26612\n26613\n26614\n26615\n26616\n26617\n26618\n26619\n26620\n26621\n26622\n26623\n26624\n26625\n26626\n26627\n26628\n26629\n26630\n26631\n26632\n26633\n26634\n26635\n26636\n26637\n26638\n26639\n26640\n26641\n26642\n26643\n26644\n26645\n26646\n26647\n26648\n26649\n26650\n26651\n26652\n26653\n26654\n26655\n26656\n26657\n26658\n26659\n26660\n26661\n26662\n26663\n26664\n26665\n26666\n26667\n26668\n26669\n26670\n26671\n26672\n26673\n26674\n26675\n26676\n26677\n26678\n26679\n26680\n26681\n26682\n26683\n26684\n26685\n26686\n26687\n26688\n26689\n26690\n26691\n26692\n26693\n26694\n26695\n26696\n26697\n26698\n26699\n26700\n26701\n26702\n26703\n26704\n26705\n26706\n26707\n26708\n26709\n26710\n26711\n26712\n26713\n26714\n26715\n26716\n26717\n26718\n26719\n26720\n26721\n26722\n26723\n26724\n26725\n26726\n26727\n26728\n26729\n26730\n26731\n26732\n26733\n26734\n26735\n26736\n26737\n26738\n26739\n26740\n26741\n26742\n26743\n26744\n26745\n26746\n26747\n26748\n26749\n26750\n26751\n26752\n26753\n26754\n26755\n26756\n26757\n26758\n26759\n26760\n26761\n26762\n26763\n26764\n26765\n26766\n26767\n26768\n26769\n26770\n26771\n26772\n26773\n26774\n26775\n26776\n26777\n26778\n26779\n26780\n26781\n26782\n26783\n26784\n26785\n26786\n26787\n26788\n26789\n26790\n26791\n26792\n26793\n26794\n26795\n26796\n26797\n26798\n26799\n26800\n26801\n26802\n26803\n26804\n26805\n26806\n26807\n26808\n26809\n26810\n26811\n26812\n26813\n26814\n26815\n26816\n26817\n26818\n26819\n26820\n26821\n26822\n26823\n26824\n26825\n26826\n26827\n26828\n26829\n26830\n26831\n26832\n26833\n26834\n26835\n26836\n26837\n26838\n26839\n26840\n26841\n26842\n26843\n26844\n26845\n26846\n26847\n26848\n26849\n26850\n26851\n26852\n26853\n26854\n26855\n26856\n26857\n26858\n26859\n26860\n26861\n26862\n26863\n26864\n26865\n26866\n26867\n26868\n26869\n26870\n26871\n26872\n26873\n26874\n26875\n26876\n26877\n26878\n26879\n26880\n26881\n26882\n26883\n26884\n26885\n26886\n26887\n26888\n26889\n26890\n26891\n26892\n26893\n26894\n26895\n26896\n26897\n26898\n26899\n26900\n26901\n26902\n26903\n26904\n26905\n26906\n26907\n26908\n26909\n26910\n26911\n26912\n26913\n26914\n26915\n26916\n26917\n26918\n26919\n26920\n26921\n26922\n26923\n26924\n26925\n26926\n26927\n26928\n26929\n26930\n26931\n26932\n26933\n26934\n26935\n26936\n26937\n26938\n26939\n26940\n26941\n26942\n26943\n26944\n26945\n26946\n26947\n26948\n26949\n26950\n26951\n26952\n26953\n26954\n26955\n26956\n26957\n26958\n26959\n26960\n26961\n26962\n26963\n26964\n26965\n26966\n26967\n26968\n26969\n26970\n26971\n26972\n26973\n26974\n26975\n26976\n26977\n26978\n26979\n26980\n26981\n26982\n26983\n26984\n26985\n26986\n26987\n26988\n26989\n26990\n26991\n26992\n26993\n26994\n26995\n26996\n26997\n26998\n26999\n27000\n27001\n27002\n27003\n27004\n27005\n27006\n27007\n27008\n27009\n27010\n27011\n27012\n27013\n27014\n27015\n27016\n27017\n27018\n27019\n27020\n27021\n27022\n27023\n27024\n27025\n27026\n27027\n27028\n27029\n27030\n27031\n27032\n27033\n27034\n27035\n27036\n27037\n27038\n27039\n27040\n27041\n27042\n27043\n27044\n27045\n27046\n27047\n27048\n27049\n27050\n27051\n27052\n27053\n27054\n27055\n27056\n27057\n27058\n27059\n27060\n27061\n27062\n27063\n27064\n27065\n27066\n27067\n27068\n27069\n27070\n27071\n27072\n27073\n27074\n27075\n27076\n27077\n27078\n27079\n27080\n27081\n27082\n27083\n27084\n27085\n27086\n27087\n27088\n27089\n27090\n27091\n27092\n27093\n27094\n27095\n27096\n27097\n27098\n27099\n27100\n27101\n27102\n27103\n27104\n27105\n27106\n27107\n27108\n27109\n27110\n27111\n27112\n27113\n27114\n27115\n27116\n27117\n27118\n27119\n27120\n27121\n27122\n27123\n27124\n27125\n27126\n27127\n27128\n27129\n27130\n27131\n27132\n27133\n27134\n27135\n27136\n27137\n27138\n27139\n27140\n27141\n27142\n27143\n27144\n27145\n27146\n27147\n27148\n27149\n27150\n27151\n27152\n27153\n27154\n27155\n27156\n27157\n27158\n27159\n27160\n27161\n27162\n27163\n27164\n27165\n27166\n27167\n27168\n27169\n27170\n27171\n27172\n27173\n27174\n27175\n27176\n27177\n27178\n27179\n27180\n27181\n27182\n27183\n27184\n27185\n27186\n27187\n27188\n27189\n27190\n27191\n27192\n27193\n27194\n27195\n27196\n27197\n27198\n27199\n27200\n27201\n27202\n27203\n27204\n27205\n27206\n27207\n27208\n27209\n27210\n27211\n27212\n27213\n27214\n27215\n27216\n27217\n27218\n27219\n27220\n27221\n27222\n27223\n27224\n27225\n27226\n27227\n27228\n27229\n27230\n27231\n27232\n27233\n27234\n27235\n27236\n27237\n27238\n27239\n27240\n27241\n27242\n27243\n27244\n27245\n27246\n27247\n27248\n27249\n27250\n27251\n27252\n27253\n27254\n27255\n27256\n27257\n27258\n27259\n27260\n27261\n27262\n27263\n27264\n27265\n27266\n27267\n27268\n27269\n27270\n27271\n27272\n27273\n27274\n27275\n27276\n27277\n27278\n27279\n27280\n27281\n27282\n27283\n27284\n27285\n27286\n27287\n27288\n27289\n27290\n27291\n27292\n27293\n27294\n27295\n27296\n27297\n27298\n27299\n27300\n27301\n27302\n27303\n27304\n27305\n27306\n27307\n27308\n27309\n27310\n27311\n27312\n27313\n27314\n27315\n27316\n27317\n27318\n27319\n27320\n27321\n27322\n27323\n27324\n27325\n27326\n27327\n27328\n27329\n27330\n27331\n27332\n27333\n27334\n27335\n27336\n27337\n27338\n27339\n27340\n27341\n27342\n27343\n27344\n27345\n27346\n27347\n27348\n27349\n27350\n27351\n27352\n27353\n27354\n27355\n27356\n27357\n27358\n27359\n27360\n27361\n27362\n27363\n27364\n27365\n27366\n27367\n27368\n27369\n27370\n27371\n27372\n27373\n27374\n27375\n27376\n27377\n27378\n27379\n27380\n27381\n27382\n27383\n27384\n27385\n27386\n27387\n27388\n27389\n27390\n27391\n27392\n27393\n27394\n27395\n27396\n27397\n27398\n27399\n27400\n27401\n27402\n27403\n27404\n27405\n27406\n27407\n27408\n27409\n27410\n27411\n27412\n27413\n27414\n27415\n27416\n27417\n27418\n27419\n27420\n27421\n27422\n27423\n27424\n27425\n27426\n27427\n27428\n27429\n27430\n27431\n27432\n27433\n27434\n27435\n27436\n27437\n27438\n27439\n27440\n27441\n27442\n27443\n27444\n27445\n27446\n27447\n27448\n27449\n27450\n27451\n27452\n27453\n27454\n27455\n27456\n27457\n27458\n27459\n27460\n27461\n27462\n27463\n27464\n27465\n27466\n27467\n27468\n27469\n27470\n27471\n27472\n27473\n27474\n27475\n27476\n27477\n27478\n27479\n27480\n27481\n27482\n27483\n27484\n27485\n27486\n27487\n27488\n27489\n27490\n27491\n27492\n27493\n27494\n27495\n27496\n27497\n27498\n27499\n27500\n27501\n27502\n27503\n27504\n27505\n27506\n27507\n27508\n27509\n27510\n27511\n27512\n27513\n27514\n27515\n27516\n27517\n27518\n27519\n27520\n27521\n27522\n27523\n27524\n27525\n27526\n27527\n27528\n27529\n27530\n27531\n27532\n27533\n27534\n27535\n27536\n27537\n27538\n27539\n27540\n27541\n27542\n27543\n27544\n27545\n27546\n27547\n27548\n27549\n27550\n27551\n27552\n27553\n27554\n27555\n27556\n27557\n27558\n27559\n27560\n27561\n27562\n27563\n27564\n27565\n27566\n27567\n27568\n27569\n27570\n27571\n27572\n27573\n27574\n27575\n27576\n27577\n27578\n27579\n27580\n27581\n27582\n27583\n27584\n27585\n27586\n27587\n27588\n27589\n27590\n27591\n27592\n27593\n27594\n27595\n27596\n27597\n27598\n27599\n27600\n27601\n27602\n27603\n27604\n27605\n27606\n27607\n27608\n27609\n27610\n27611\n27612\n27613\n27614\n27615\n27616\n27617\n27618\n27619\n27620\n27621\n27622\n27623\n27624\n27625\n27626\n27627\n27628\n27629\n27630\n27631\n27632\n27633\n27634\n27635\n27636\n27637\n27638\n27639\n27640\n27641\n27642\n27643\n27644\n27645\n27646\n27647\n27648\n27649\n27650\n27651\n27652\n27653\n27654\n27655\n27656\n27657\n27658\n27659\n27660\n27661\n27662\n27663\n27664\n27665\n27666\n27667\n27668\n27669\n27670\n27671\n27672\n27673\n27674\n27675\n27676\n27677\n27678\n27679\n27680\n27681\n27682\n27683\n27684\n27685\n27686\n27687\n27688\n27689\n27690\n27691\n27692\n27693\n27694\n27695\n27696\n27697\n27698\n27699\n27700\n27701\n27702\n27703\n27704\n27705\n27706\n27707\n27708\n27709\n27710\n27711\n27712\n27713\n27714\n27715\n27716\n27717\n27718\n27719\n27720\n27721\n27722\n27723\n27724\n27725\n27726\n27727\n27728\n27729\n27730\n27731\n27732\n27733\n27734\n27735\n27736\n27737\n27738\n27739\n27740\n27741\n27742\n27743\n27744\n27745\n27746\n27747\n27748\n27749\n27750\n27751\n27752\n27753\n27754\n27755\n27756\n27757\n27758\n27759\n27760\n27761\n27762\n27763\n27764\n27765\n27766\n27767\n27768\n27769\n27770\n27771\n27772\n27773\n27774\n27775\n27776\n27777\n27778\n27779\n27780\n27781\n27782\n27783\n27784\n27785\n27786\n27787\n27788\n27789\n27790\n27791\n27792\n27793\n27794\n27795\n27796\n27797\n27798\n27799\n27800\n27801\n27802\n27803\n27804\n27805\n27806\n27807\n27808\n27809\n27810\n27811\n27812\n27813\n27814\n27815\n27816\n27817\n27818\n27819\n27820\n27821\n27822\n27823\n27824\n27825\n27826\n27827\n27828\n27829\n27830\n27831\n27832\n27833\n27834\n27835\n27836\n27837\n27838\n27839\n27840\n27841\n27842\n27843\n27844\n27845\n27846\n27847\n27848\n27849\n27850\n27851\n27852\n27853\n27854\n27855\n27856\n27857\n27858\n27859\n27860\n27861\n27862\n27863\n27864\n27865\n27866\n27867\n27868\n27869\n27870\n27871\n27872\n27873\n27874\n27875\n27876\n27877\n27878\n27879\n27880\n27881\n27882\n27883\n27884\n27885\n27886\n27887\n27888\n27889\n27890\n27891\n27892\n27893\n27894\n27895\n27896\n27897\n27898\n27899\n27900\n27901\n27902\n27903\n27904\n27905\n27906\n27907\n27908\n27909\n27910\n27911\n27912\n27913\n27914\n27915\n27916\n27917\n27918\n27919\n27920\n27921\n27922\n27923\n27924\n27925\n27926\n27927\n27928\n27929\n27930\n27931\n27932\n27933\n27934\n27935\n27936\n27937\n27938\n27939\n27940\n27941\n27942\n27943\n27944\n27945\n27946\n27947\n27948\n27949\n27950\n27951\n27952\n27953\n27954\n27955\n27956\n27957\n27958\n27959\n27960\n27961\n27962\n27963\n27964\n27965\n27966\n27967\n27968\n27969\n27970\n27971\n27972\n27973\n27974\n27975\n27976\n27977\n27978\n27979\n27980\n27981\n27982\n27983\n27984\n27985\n27986\n27987\n27988\n27989\n27990\n27991\n27992\n27993\n27994\n27995\n27996\n27997\n27998\n27999\n28000\n28001\n28002\n28003\n28004\n28005\n28006\n28007\n28008\n28009\n28010\n28011\n28012\n28013\n28014\n28015\n28016\n28017\n28018\n28019\n28020\n28021\n28022\n28023\n28024\n28025\n28026\n28027\n28028\n28029\n28030\n28031\n28032\n28033\n28034\n28035\n28036\n28037\n28038\n28039\n28040\n28041\n28042\n28043\n28044\n28045\n28046\n28047\n28048\n28049\n28050\n28051\n28052\n28053\n28054\n28055\n28056\n28057\n28058\n28059\n28060\n28061\n28062\n28063\n28064\n28065\n28066\n28067\n28068\n28069\n28070\n28071\n28072\n28073\n28074\n28075\n28076\n28077\n28078\n28079\n28080\n28081\n28082\n28083\n28084\n28085\n28086\n28087\n28088\n28089\n28090\n28091\n28092\n28093\n28094\n28095\n28096\n28097\n28098\n28099\n28100\n28101\n28102\n28103\n28104\n28105\n28106\n28107\n28108\n28109\n28110\n28111\n28112\n28113\n28114\n28115\n28116\n28117\n28118\n28119\n28120\n28121\n28122\n28123\n28124\n28125\n28126\n28127\n28128\n28129\n28130\n28131\n28132\n28133\n28134\n28135\n28136\n28137\n28138\n28139\n28140\n28141\n28142\n28143\n28144\n28145\n28146\n28147\n28148\n28149\n28150\n28151\n28152\n28153\n28154\n28155\n28156\n28157\n28158\n28159\n28160\n28161\n28162\n28163\n28164\n28165\n28166\n28167\n28168\n28169\n28170\n28171\n28172\n28173\n28174\n28175\n28176\n28177\n28178\n28179\n28180\n28181\n28182\n28183\n28184\n28185\n28186\n28187\n28188\n28189\n28190\n28191\n28192\n28193\n28194\n28195\n28196\n28197\n28198\n28199\n28200\n28201\n28202\n28203\n28204\n28205\n28206\n28207\n28208\n28209\n28210\n28211\n28212\n28213\n28214\n28215\n28216\n28217\n28218\n28219\n28220\n28221\n28222\n28223\n28224\n28225\n28226\n28227\n28228\n28229\n28230\n28231\n28232\n28233\n28234\n28235\n28236\n28237\n28238\n28239\n28240\n28241\n28242\n28243\n28244\n28245\n28246\n28247\n28248\n28249\n28250\n28251\n28252\n28253\n28254\n28255\n28256\n28257\n28258\n28259\n28260\n28261\n28262\n28263\n28264\n28265\n28266\n28267\n28268\n28269\n28270\n28271\n28272\n28273\n28274\n28275\n28276\n28277\n28278\n28279\n28280\n28281\n28282\n28283\n28284\n28285\n28286\n28287\n28288\n28289\n28290\n28291\n28292\n28293\n28294\n28295\n28296\n28297\n28298\n28299\n28300\n28301\n28302\n28303\n28304\n28305\n28306\n28307\n28308\n28309\n28310\n28311\n28312\n28313\n28314\n28315\n28316\n28317\n28318\n28319\n28320\n28321\n28322\n28323\n28324\n28325\n28326\n28327\n28328\n28329\n28330\n28331\n28332\n28333\n28334\n28335\n28336\n28337\n28338\n28339\n28340\n28341\n28342\n28343\n28344\n28345\n28346\n28347\n28348\n28349\n28350\n28351\n28352\n28353\n28354\n28355\n28356\n28357\n28358\n28359\n28360\n28361\n28362\n28363\n28364\n28365\n28366\n28367\n28368\n28369\n28370\n28371\n28372\n28373\n28374\n28375\n28376\n28377\n28378\n28379\n28380\n28381\n28382\n28383\n28384\n28385\n28386\n28387\n28388\n28389\n28390\n28391\n28392\n28393\n28394\n28395\n28396\n28397\n28398\n28399\n28400\n28401\n28402\n28403\n28404\n28405\n28406\n28407\n28408\n28409\n28410\n28411\n28412\n28413\n28414\n28415\n28416\n28417\n28418\n28419\n28420\n28421\n28422\n28423\n28424\n28425\n28426\n28427\n28428\n28429\n28430\n28431\n28432\n28433\n28434\n28435\n28436\n28437\n28438\n28439\n28440\n28441\n28442\n28443\n28444\n28445\n28446\n28447\n28448\n28449\n28450\n28451\n28452\n28453\n28454\n28455\n28456\n28457\n28458\n28459\n28460\n28461\n28462\n28463\n28464\n28465\n28466\n28467\n28468\n28469\n28470\n28471\n28472\n28473\n28474\n28475\n28476\n28477\n28478\n28479\n28480\n28481\n28482\n28483\n28484\n28485\n28486\n28487\n28488\n28489\n28490\n28491\n28492\n28493\n28494\n28495\n28496\n28497\n28498\n28499\n28500\n28501\n28502\n28503\n28504\n28505\n28506\n28507\n28508\n28509\n28510\n28511\n28512\n28513\n28514\n28515\n28516\n28517\n28518\n28519\n28520\n28521\n28522\n28523\n28524\n28525\n28526\n28527\n28528\n28529\n28530\n28531\n28532\n28533\n28534\n28535\n28536\n28537\n28538\n28539\n28540\n28541\n28542\n28543\n28544\n28545\n28546\n28547\n28548\n28549\n28550\n28551\n28552\n28553\n28554\n28555\n28556\n28557\n28558\n28559\n28560\n28561\n28562\n28563\n28564\n28565\n28566\n28567\n28568\n28569\n28570\n28571\n28572\n28573\n28574\n28575\n28576\n28577\n28578\n28579\n28580\n28581\n28582\n28583\n28584\n28585\n28586\n28587\n28588\n28589\n28590\n28591\n28592\n28593\n28594\n28595\n28596\n28597\n28598\n28599\n28600\n28601\n28602\n28603\n28604\n28605\n28606\n28607\n28608\n28609\n28610\n28611\n28612\n28613\n28614\n28615\n28616\n28617\n28618\n28619\n28620\n28621\n28622\n28623\n28624\n28625\n28626\n28627\n28628\n28629\n28630\n28631\n28632\n28633\n28634\n28635\n28636\n28637\n28638\n28639\n28640\n28641\n28642\n28643\n28644\n28645\n28646\n28647\n28648\n28649\n28650\n28651\n28652\n28653\n28654\n28655\n28656\n28657\n28658\n28659\n28660\n28661\n28662\n28663\n28664\n28665\n28666\n28667\n28668\n28669\n28670\n28671\n28672\n28673\n28674\n28675\n28676\n28677\n28678\n28679\n28680\n28681\n28682\n28683\n28684\n28685\n28686\n28687\n28688\n28689\n28690\n28691\n28692\n28693\n28694\n28695\n28696\n28697\n28698\n28699\n28700\n28701\n28702\n28703\n28704\n28705\n28706\n28707\n28708\n28709\n28710\n28711\n28712\n28713\n28714\n28715\n28716\n28717\n28718\n28719\n28720\n28721\n28722\n28723\n28724\n28725\n28726\n28727\n28728\n28729\n28730\n28731\n28732\n28733\n28734\n28735\n28736\n28737\n28738\n28739\n28740\n28741\n28742\n28743\n28744\n28745\n28746\n28747\n28748\n28749\n28750\n28751\n28752\n28753\n28754\n28755\n28756\n28757\n28758\n28759\n28760\n28761\n28762\n28763\n28764\n28765\n28766\n28767\n28768\n28769\n28770\n28771\n28772\n28773\n28774\n28775\n28776\n28777\n28778\n28779\n28780\n28781\n28782\n28783\n28784\n28785\n28786\n28787\n28788\n28789\n28790\n28791\n28792\n28793\n28794\n28795\n28796\n28797\n28798\n28799\n28800\n28801\n28802\n28803\n28804\n28805\n28806\n28807\n28808\n28809\n28810\n28811\n28812\n28813\n28814\n28815\n28816\n28817\n28818\n28819\n28820\n28821\n28822\n28823\n28824\n28825\n28826\n28827\n28828\n28829\n28830\n28831\n28832\n28833\n28834\n28835\n28836\n28837\n28838\n28839\n28840\n28841\n28842\n28843\n28844\n28845\n28846\n28847\n28848\n28849\n28850\n28851\n28852\n28853\n28854\n28855\n28856\n28857\n28858\n28859\n28860\n28861\n28862\n28863\n28864\n28865\n28866\n28867\n28868\n28869\n28870\n28871\n28872\n28873\n28874\n28875\n28876\n28877\n28878\n28879\n28880\n28881\n28882\n28883\n28884\n28885\n28886\n28887\n28888\n28889\n28890\n28891\n28892\n28893\n28894\n28895\n28896\n28897\n28898\n28899\n28900\n28901\n28902\n28903\n28904\n28905\n28906\n28907\n28908\n28909\n28910\n28911\n28912\n28913\n28914\n28915\n28916\n28917\n28918\n28919\n28920\n28921\n28922\n28923\n28924\n28925\n28926\n28927\n28928\n28929\n28930\n28931\n28932\n28933\n28934\n28935\n28936\n28937\n28938\n28939\n28940\n28941\n28942\n28943\n28944\n28945\n28946\n28947\n28948\n28949\n28950\n28951\n28952\n28953\n28954\n28955\n28956\n28957\n28958\n28959\n28960\n28961\n28962\n28963\n28964\n28965\n28966\n28967\n28968\n28969\n28970\n28971\n28972\n28973\n28974\n28975\n28976\n28977\n28978\n28979\n28980\n28981\n28982\n28983\n28984\n28985\n28986\n28987\n28988\n28989\n28990\n28991\n28992\n28993\n28994\n28995\n28996\n28997\n28998\n28999\n29000\n29001\n29002\n29003\n29004\n29005\n29006\n29007\n29008\n29009\n29010\n29011\n29012\n29013\n29014\n29015\n29016\n29017\n29018\n29019\n29020\n29021\n29022\n29023\n29024\n29025\n29026\n29027\n29028\n29029\n29030\n29031\n29032\n29033\n29034\n29035\n29036\n29037\n29038\n29039\n29040\n29041\n29042\n29043\n29044\n29045\n29046\n29047\n29048\n29049\n29050\n29051\n29052\n29053\n29054\n29055\n29056\n29057\n29058\n29059\n29060\n29061\n29062\n29063\n29064\n29065\n29066\n29067\n29068\n29069\n29070\n29071\n29072\n29073\n29074\n29075\n29076\n29077\n29078\n29079\n29080\n29081\n29082\n29083\n29084\n29085\n29086\n29087\n29088\n29089\n29090\n29091\n29092\n29093\n29094\n29095\n29096\n29097\n29098\n29099\n29100\n29101\n29102\n29103\n29104\n29105\n29106\n29107\n29108\n29109\n29110\n29111\n29112\n29113\n29114\n29115\n29116\n29117\n29118\n29119\n29120\n29121\n29122\n29123\n29124\n29125\n29126\n29127\n29128\n29129\n29130\n29131\n29132\n29133\n29134\n29135\n29136\n29137\n29138\n29139\n29140\n29141\n29142\n29143\n29144\n29145\n29146\n29147\n29148\n29149\n29150\n29151\n29152\n29153\n29154\n29155\n29156\n29157\n29158\n29159\n29160\n29161\n29162\n29163\n29164\n29165\n29166\n29167\n29168\n29169\n29170\n29171\n29172\n29173\n29174\n29175\n29176\n29177\n29178\n29179\n29180\n29181\n29182\n29183\n29184\n29185\n29186\n29187\n29188\n29189\n29190\n29191\n29192\n29193\n29194\n29195\n29196\n29197\n29198\n29199\n29200\n29201\n29202\n29203\n29204\n29205\n29206\n29207\n29208\n29209\n29210\n29211\n29212\n29213\n29214\n29215\n29216\n29217\n29218\n29219\n29220\n29221\n29222\n29223\n29224\n29225\n29226\n29227\n29228\n29229\n29230\n29231\n29232\n29233\n29234\n29235\n29236\n29237\n29238\n29239\n29240\n29241\n29242\n29243\n29244\n29245\n29246\n29247\n29248\n29249\n29250\n29251\n29252\n29253\n29254\n29255\n29256\n29257\n29258\n29259\n29260\n29261\n29262\n29263\n29264\n29265\n29266\n29267\n29268\n29269\n29270\n29271\n29272\n29273\n29274\n29275\n29276\n29277\n29278\n29279\n29280\n29281\n29282\n29283\n29284\n29285\n29286\n29287\n29288\n29289\n29290\n29291\n29292\n29293\n29294\n29295\n29296\n29297\n29298\n29299\n29300\n29301\n29302\n29303\n29304\n29305\n29306\n29307\n29308\n29309\n29310\n29311\n29312\n29313\n29314\n29315\n29316\n29317\n29318\n29319\n29320\n29321\n29322\n29323\n29324\n29325\n29326\n29327\n29328\n29329\n29330\n29331\n29332\n29333\n29334\n29335\n29336\n29337\n29338\n29339\n29340\n29341\n29342\n29343\n29344\n29345\n29346\n29347\n29348\n29349\n29350\n29351\n29352\n29353\n29354\n29355\n29356\n29357\n29358\n29359\n29360\n29361\n29362\n29363\n29364\n29365\n29366\n29367\n29368\n29369\n29370\n29371\n29372\n29373\n29374\n29375\n29376\n29377\n29378\n29379\n29380\n29381\n29382\n29383\n29384\n29385\n29386\n29387\n29388\n29389\n29390\n29391\n29392\n29393\n29394\n29395\n29396\n29397\n29398\n29399\n29400\n29401\n29402\n29403\n29404\n29405\n29406\n29407\n29408\n29409\n29410\n29411\n29412\n29413\n29414\n29415\n29416\n29417\n29418\n29419\n29420\n29421\n29422\n29423\n29424\n29425\n29426\n29427\n29428\n29429\n29430\n29431\n29432\n29433\n29434\n29435\n29436\n29437\n29438\n29439\n29440\n29441\n29442\n29443\n29444\n29445\n29446\n29447\n29448\n29449\n29450\n29451\n29452\n29453\n29454\n29455\n29456\n29457\n29458\n29459\n29460\n29461\n29462\n29463\n29464\n29465\n29466\n29467\n29468\n29469\n29470\n29471\n29472\n29473\n29474\n29475\n29476\n29477\n29478\n29479\n29480\n29481\n29482\n29483\n29484\n29485\n29486\n29487\n29488\n29489\n29490\n29491\n29492\n29493\n29494\n29495\n29496\n29497\n29498\n29499\n29500\n29501\n29502\n29503\n29504\n29505\n29506\n29507\n29508\n29509\n29510\n29511\n29512\n29513\n29514\n29515\n29516\n29517\n29518\n29519\n29520\n29521\n29522\n29523\n29524\n29525\n29526\n29527\n29528\n29529\n29530\n29531\n29532\n29533\n29534\n29535\n29536\n29537\n29538\n29539\n29540\n29541\n29542\n29543\n29544\n29545\n29546\n29547\n29548\n29549\n29550\n29551\n29552\n29553\n29554\n29555\n29556\n29557\n29558\n29559\n29560\n29561\n29562\n29563\n29564\n29565\n29566\n29567\n29568\n29569\n29570\n29571\n29572\n29573\n29574\n29575\n29576\n29577\n29578\n29579\n29580\n29581\n29582\n29583\n29584\n29585\n29586\n29587\n29588\n29589\n29590\n29591\n29592\n29593\n29594\n29595\n29596\n29597\n29598\n29599\n29600\n29601\n29602\n29603\n29604\n29605\n29606\n29607\n29608\n29609\n29610\n29611\n29612\n29613\n29614\n29615\n29616\n29617\n29618\n29619\n29620\n29621\n29622\n29623\n29624\n29625\n29626\n29627\n29628\n29629\n29630\n29631\n29632\n29633\n29634\n29635\n29636\n29637\n29638\n29639\n29640\n29641\n29642\n29643\n29644\n29645\n29646\n29647\n29648\n29649\n29650\n29651\n29652\n29653\n29654\n29655\n29656\n29657\n29658\n29659\n29660\n29661\n29662\n29663\n29664\n29665\n29666\n29667\n29668\n29669\n29670\n29671\n29672\n29673\n29674\n29675\n29676\n29677\n29678\n29679\n29680\n29681\n29682\n29683\n29684\n29685\n29686\n29687\n29688\n29689\n29690\n29691\n29692\n29693\n29694\n29695\n29696\n29697\n29698\n29699\n29700\n29701\n29702\n29703\n29704\n29705\n29706\n29707\n29708\n29709\n29710\n29711\n29712\n29713\n29714\n29715\n29716\n29717\n29718\n29719\n29720\n29721\n29722\n29723\n29724\n29725\n29726\n29727\n29728\n29729\n29730\n29731\n29732\n29733\n29734\n29735\n29736\n29737\n29738\n29739\n29740\n29741\n29742\n29743\n29744\n29745\n29746\n29747\n29748\n29749\n29750\n29751\n29752\n29753\n29754\n29755\n29756\n29757\n29758\n29759\n29760\n29761\n29762\n29763\n29764\n29765\n29766\n29767\n29768\n29769\n29770\n29771\n29772\n29773\n29774\n29775\n29776\n29777\n29778\n29779\n29780\n29781\n29782\n29783\n29784\n29785\n29786\n29787\n29788\n29789\n29790\n29791\n29792\n29793\n29794\n29795\n29796\n29797\n29798\n29799\n29800\n29801\n29802\n29803\n29804\n29805\n29806\n29807\n29808\n29809\n29810\n29811\n29812\n29813\n29814\n29815\n29816\n29817\n29818\n29819\n29820\n29821\n29822\n29823\n29824\n29825\n29826\n29827\n29828\n29829\n29830\n29831\n29832\n29833\n29834\n29835\n29836\n29837\n29838\n29839\n29840\n29841\n29842\n29843\n29844\n29845\n29846\n29847\n29848\n29849\n29850\n29851\n29852\n29853\n29854\n29855\n29856\n29857\n29858\n29859\n29860\n29861\n29862\n29863\n29864\n29865\n29866\n29867\n29868\n29869\n29870\n29871\n29872\n29873\n29874\n29875\n29876\n29877\n29878\n29879\n29880\n29881\n29882\n29883\n29884\n29885\n29886\n29887\n29888\n29889\n29890\n29891\n29892\n29893\n29894\n29895\n29896\n29897\n29898\n29899\n29900\n29901\n29902\n29903\n29904\n29905\n29906\n29907\n29908\n29909\n29910\n29911\n29912\n29913\n29914\n29915\n29916\n29917\n29918\n29919\n29920\n29921\n29922\n29923\n29924\n29925\n29926\n29927\n29928\n29929\n29930\n29931\n29932\n29933\n29934\n29935\n29936\n29937\n29938\n29939\n29940\n29941\n29942\n29943\n29944\n29945\n29946\n29947\n29948\n29949\n29950\n29951\n29952\n29953\n29954\n29955\n29956\n29957\n29958\n29959\n29960\n29961\n29962\n29963\n29964\n29965\n29966\n29967\n29968\n29969\n29970\n29971\n29972\n29973\n29974\n29975\n29976\n29977\n29978\n29979\n29980\n29981\n29982\n29983\n29984\n29985\n29986\n29987\n29988\n29989\n29990\n29991\n29992\n29993\n29994\n29995\n29996\n29997\n29998\n29999' \ No newline at end of file diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/test2.arff b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/test2.arff new file mode 100644 index 0000000000000000000000000000000000000000..30f0dbf91b078ef670868d5e7321f956a6a7a506 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/test2.arff @@ -0,0 +1,15 @@ +@RELATION test2 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 real +@ATTRIBUTE attr2 integer +@ATTRIBUTE attr3 Integer +@ATTRIBUTE attr4 Numeric +@ATTRIBUTE attr5 numeric +@ATTRIBUTE attr6 string +@ATTRIBUTE attr7 STRING +@ATTRIBUTE attr8 {bla} +@ATTRIBUTE attr9 {bla, bla} + +@DATA +0.1, 0.2, 0.3, 0.4,class1 diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/single_empty_string.mat b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/single_empty_string.mat new file mode 100644 index 0000000000000000000000000000000000000000..293f387719e8bdcacb075e0de5737894e5dafed3 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/single_empty_string.mat differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_empty_struct.mat b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_empty_struct.mat new file mode 100644 index 0000000000000000000000000000000000000000..30c8c8ad5378be4508bd785da8b7cef38adbd13e Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_empty_struct.mat differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..fc893f331c985cf17b7ce9b7b8c179eaf2103659 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..b3b086cc31dce2de1e300a1d018b0bf5661b69f3 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..316f8894c5ecc88468cfa0908c277f730e3163e8 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..0c4729c56b6ab1e8945249a4d3144c79d8538e9e Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..f68323b0c8eb7fc999dead349ea3bd3a6da66bd4 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsimplecell.mat b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsimplecell.mat new file mode 100644 index 0000000000000000000000000000000000000000..2a98f48917f8f275e541eeac5ef1fe741c40bb0b Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsimplecell.mat differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..81c536d0b067b92cae1b7a2ee71824e2c5e730d9 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..028841f9d3aae42d6cf782db14634cbe375f0a05 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a1eaa9243ff83caa8e8078adc0cfb268119a4c72 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/__init__.py @@ -0,0 +1,327 @@ +""" +======================================= +Signal processing (:mod:`scipy.signal`) +======================================= + +Convolution +=========== + +.. autosummary:: + :toctree: generated/ + + convolve -- N-D convolution. + correlate -- N-D correlation. + fftconvolve -- N-D convolution using the FFT. + oaconvolve -- N-D convolution using the overlap-add method. + convolve2d -- 2-D convolution (more options). + correlate2d -- 2-D correlation (more options). + sepfir2d -- Convolve with a 2-D separable FIR filter. + choose_conv_method -- Chooses faster of FFT and direct convolution methods. + correlation_lags -- Determines lag indices for 1D cross-correlation. + +B-splines +========= + +.. autosummary:: + :toctree: generated/ + + gauss_spline -- Gaussian approximation to the B-spline basis function. + cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline. + qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline. + cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline. + qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline. + cspline1d_eval -- Evaluate a cubic spline at the given points. + qspline1d_eval -- Evaluate a quadratic spline at the given points. + spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array. + +Filtering +========= + +.. autosummary:: + :toctree: generated/ + + order_filter -- N-D order filter. + medfilt -- N-D median filter. + medfilt2d -- 2-D median filter (faster). + wiener -- N-D Wiener filter. + + symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems). + symiirorder2 -- 4th-order IIR filter (cascade of second-order systems). + lfilter -- 1-D FIR and IIR digital linear filtering. + lfiltic -- Construct initial conditions for `lfilter`. + lfilter_zi -- Compute an initial state zi for the lfilter function that + -- corresponds to the steady state of the step response. + filtfilt -- A forward-backward filter. + savgol_filter -- Filter a signal using the Savitzky-Golay filter. + + deconvolve -- 1-D deconvolution using lfilter. + + sosfilt -- 1-D IIR digital linear filtering using + -- a second-order sections filter representation. + sosfilt_zi -- Compute an initial state zi for the sosfilt function that + -- corresponds to the steady state of the step response. + sosfiltfilt -- A forward-backward filter for second-order sections. + hilbert -- Compute 1-D analytic signal, using the Hilbert transform. + hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform. + envelope -- Compute the envelope of a real- or complex-valued signal. + + decimate -- Downsample a signal. + detrend -- Remove linear and/or constant trends from data. + resample -- Resample using Fourier method. + resample_poly -- Resample using polyphase filtering method. + upfirdn -- Upsample, apply FIR filter, downsample. + +Filter design +============= + +.. autosummary:: + :toctree: generated/ + + bilinear -- Digital filter from an analog filter using + -- the bilinear transform. + bilinear_zpk -- Digital filter from an analog filter using + -- the bilinear transform. + findfreqs -- Find array of frequencies for computing filter response. + firls -- FIR filter design using least-squares error minimization. + firwin -- Windowed FIR filter design, with frequency response + -- defined as pass and stop bands. + firwin2 -- Windowed FIR filter design, with arbitrary frequency + -- response. + freqs -- Analog filter frequency response from TF coefficients. + freqs_zpk -- Analog filter frequency response from ZPK coefficients. + freqz -- Digital filter frequency response from TF coefficients. + freqz_sos -- Digital filter frequency response for SOS format filter. + freqz_zpk -- Digital filter frequency response from ZPK coefficients. + gammatone -- FIR and IIR gammatone filter design. + group_delay -- Digital filter group delay. + iirdesign -- IIR filter design given bands and gains. + iirfilter -- IIR filter design given order and critical frequencies. + kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given + -- the number of taps and the transition width at + -- discontinuities in the frequency response. + kaiser_beta -- Compute the Kaiser parameter beta, given the desired + -- FIR filter attenuation. + kaiserord -- Design a Kaiser window to limit ripple and width of + -- transition region. + minimum_phase -- Convert a linear phase FIR filter to minimum phase. + savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay + -- filter. + remez -- Optimal FIR filter design. + + unique_roots -- Unique roots and their multiplicities. + residue -- Partial fraction expansion of b(s) / a(s). + residuez -- Partial fraction expansion of b(z) / a(z). + invres -- Inverse partial fraction expansion for analog filter. + invresz -- Inverse partial fraction expansion for digital filter. + BadCoefficients -- Warning on badly conditioned filter coefficients. + +Lower-level filter design functions: + +.. autosummary:: + :toctree: generated/ + + abcd_normalize -- Check state-space matrices and ensure they are rank-2. + band_stop_obj -- Band Stop Objective Function for order minimization. + besselap -- Return (z,p,k) for analog prototype of Bessel filter. + buttap -- Return (z,p,k) for analog prototype of Butterworth filter. + cheb1ap -- Return (z,p,k) for type I Chebyshev filter. + cheb2ap -- Return (z,p,k) for type II Chebyshev filter. + ellipap -- Return (z,p,k) for analog prototype of elliptic filter. + lp2bp -- Transform a lowpass filter prototype to a bandpass filter. + lp2bp_zpk -- Transform a lowpass filter prototype to a bandpass filter. + lp2bs -- Transform a lowpass filter prototype to a bandstop filter. + lp2bs_zpk -- Transform a lowpass filter prototype to a bandstop filter. + lp2hp -- Transform a lowpass filter prototype to a highpass filter. + lp2hp_zpk -- Transform a lowpass filter prototype to a highpass filter. + lp2lp -- Transform a lowpass filter prototype to a lowpass filter. + lp2lp_zpk -- Transform a lowpass filter prototype to a lowpass filter. + normalize -- Normalize polynomial representation of a transfer function. + + + +Matlab-style IIR filter design +============================== + +.. autosummary:: + :toctree: generated/ + + butter -- Butterworth + buttord + cheby1 -- Chebyshev Type I + cheb1ord + cheby2 -- Chebyshev Type II + cheb2ord + ellip -- Elliptic (Cauer) + ellipord + bessel -- Bessel (no order selection available -- try butterod) + iirnotch -- Design second-order IIR notch digital filter. + iirpeak -- Design second-order IIR peak (resonant) digital filter. + iircomb -- Design IIR comb filter. + +Continuous-time linear systems +============================== + +.. autosummary:: + :toctree: generated/ + + lti -- Continuous-time linear time invariant system base class. + StateSpace -- Linear time invariant system in state space form. + TransferFunction -- Linear time invariant system in transfer function form. + ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form. + lsim -- Continuous-time simulation of output to linear system. + impulse -- Impulse response of linear, time-invariant (LTI) system. + step -- Step response of continuous-time LTI system. + freqresp -- Frequency response of a continuous-time LTI system. + bode -- Bode magnitude and phase data (continuous-time LTI). + +Discrete-time linear systems +============================ + +.. autosummary:: + :toctree: generated/ + + dlti -- Discrete-time linear time invariant system base class. + StateSpace -- Linear time invariant system in state space form. + TransferFunction -- Linear time invariant system in transfer function form. + ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form. + dlsim -- Simulation of output to a discrete-time linear system. + dimpulse -- Impulse response of a discrete-time LTI system. + dstep -- Step response of a discrete-time LTI system. + dfreqresp -- Frequency response of a discrete-time LTI system. + dbode -- Bode magnitude and phase data (discrete-time LTI). + +LTI representations +=================== + +.. autosummary:: + :toctree: generated/ + + tf2zpk -- Transfer function to zero-pole-gain. + tf2sos -- Transfer function to second-order sections. + tf2ss -- Transfer function to state-space. + zpk2tf -- Zero-pole-gain to transfer function. + zpk2sos -- Zero-pole-gain to second-order sections. + zpk2ss -- Zero-pole-gain to state-space. + ss2tf -- State-pace to transfer function. + ss2zpk -- State-space to pole-zero-gain. + sos2zpk -- Second-order sections to zero-pole-gain. + sos2tf -- Second-order sections to transfer function. + cont2discrete -- Continuous-time to discrete-time LTI conversion. + place_poles -- Pole placement. + +Waveforms +========= + +.. autosummary:: + :toctree: generated/ + + chirp -- Frequency swept cosine signal, with several freq functions. + gausspulse -- Gaussian modulated sinusoid. + max_len_seq -- Maximum length sequence. + sawtooth -- Periodic sawtooth. + square -- Square wave. + sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial. + unit_impulse -- Discrete unit impulse. + +Window functions +================ + +For window functions, see the `scipy.signal.windows` namespace. + +In the `scipy.signal` namespace, there is a convenience function to +obtain these windows by name: + +.. autosummary:: + :toctree: generated/ + + get_window -- Return a window of a given length and type. + +Peak finding +============ + +.. autosummary:: + :toctree: generated/ + + argrelmin -- Calculate the relative minima of data. + argrelmax -- Calculate the relative maxima of data. + argrelextrema -- Calculate the relative extrema of data. + find_peaks -- Find a subset of peaks inside a signal. + find_peaks_cwt -- Find peaks in a 1-D array with wavelet transformation. + peak_prominences -- Calculate the prominence of each peak in a signal. + peak_widths -- Calculate the width of each peak in a signal. + +Spectral analysis +================= + +.. autosummary:: + :toctree: generated/ + + periodogram -- Compute a (modified) periodogram. + welch -- Compute a periodogram using Welch's method. + csd -- Compute the cross spectral density, using Welch's method. + coherence -- Compute the magnitude squared coherence, using Welch's method. + spectrogram -- Compute the spectrogram (legacy). + lombscargle -- Computes the Lomb-Scargle periodogram. + vectorstrength -- Computes the vector strength. + ShortTimeFFT -- Interface for calculating the \ + :ref:`Short Time Fourier Transform ` and \ + its inverse. + stft -- Compute the Short Time Fourier Transform (legacy). + istft -- Compute the Inverse Short Time Fourier Transform (legacy). + check_COLA -- Check the COLA constraint for iSTFT reconstruction. + check_NOLA -- Check the NOLA constraint for iSTFT reconstruction. + +Chirp Z-transform and Zoom FFT +============================================ + +.. autosummary:: + :toctree: generated/ + + czt - Chirp z-transform convenience function + zoom_fft - Zoom FFT convenience function + CZT - Chirp z-transform function generator + ZoomFFT - Zoom FFT function generator + czt_points - Output the z-plane points sampled by a chirp z-transform + +The functions are simpler to use than the classes, but are less efficient when +using the same transform on many arrays of the same length, since they +repeatedly generate the same chirp signal with every call. In these cases, +use the classes to create a reusable function instead. + +""" + +from . import _sigtools, windows +from ._waveforms import * +from ._max_len_seq import max_len_seq +from ._upfirdn import upfirdn + +from ._spline import ( + sepfir2d +) + +from ._spline_filters import * +from ._filter_design import * +from ._fir_filter_design import * +from ._ltisys import * +from ._lti_conversion import * +from ._signaltools import * +from ._savitzky_golay import savgol_coeffs, savgol_filter +from ._spectral_py import * +from ._short_time_fft import * +from ._peak_finding import * +from ._czt import * +from .windows import get_window # keep this one in signal namespace + +# Deprecated namespaces, to be removed in v2.0.0 +from . import ( + bsplines, filter_design, fir_filter_design, lti_conversion, ltisys, + spectral, signaltools, waveforms, wavelets, spline +) + +__all__ = [ + s for s in dir() if not s.startswith("_") +] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_arraytools.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_arraytools.py new file mode 100644 index 0000000000000000000000000000000000000000..87ce75d8d892a64021da7abc5d149556c22cf983 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_arraytools.py @@ -0,0 +1,264 @@ +""" +Functions for acting on a axis of an array. +""" +import numpy as np + + +def axis_slice(a, start=None, stop=None, step=None, axis=-1): + """Take a slice along axis 'axis' from 'a'. + + Parameters + ---------- + a : numpy.ndarray + The array to be sliced. + start, stop, step : int or None + The slice parameters. + axis : int, optional + The axis of `a` to be sliced. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import axis_slice + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> axis_slice(a, start=0, stop=1, axis=1) + array([[1], + [4], + [7]]) + >>> axis_slice(a, start=1, axis=0) + array([[4, 5, 6], + [7, 8, 9]]) + + Notes + ----- + The keyword arguments start, stop and step are used by calling + slice(start, stop, step). This implies axis_slice() does not + handle its arguments the exactly the same as indexing. To select + a single index k, for example, use + axis_slice(a, start=k, stop=k+1) + In this case, the length of the axis 'axis' in the result will + be 1; the trivial dimension is not removed. (Use numpy.squeeze() + to remove trivial axes.) + """ + a_slice = [slice(None)] * a.ndim + a_slice[axis] = slice(start, stop, step) + b = a[tuple(a_slice)] + return b + + +def axis_reverse(a, axis=-1): + """Reverse the 1-D slices of `a` along axis `axis`. + + Returns axis_slice(a, step=-1, axis=axis). + """ + return axis_slice(a, step=-1, axis=axis) + + +def odd_ext(x, n, axis=-1): + """ + Odd extension at the boundaries of an array + + Generate a new ndarray by making an odd extension of `x` along an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import odd_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> odd_ext(a, 2) + array([[-1, 0, 1, 2, 3, 4, 5, 6, 7], + [-4, -1, 0, 1, 4, 9, 16, 23, 28]]) + + Odd extension is a "180 degree rotation" at the endpoints of the original + array: + + >>> t = np.linspace(0, 1.5, 100) + >>> a = 0.9 * np.sin(2 * np.pi * t**2) + >>> b = odd_ext(a, 40) + >>> import matplotlib.pyplot as plt + >>> plt.plot(np.arange(-40, 140), b, 'b', lw=1, label='odd extension') + >>> plt.plot(np.arange(100), a, 'r', lw=2, label='original') + >>> plt.legend(loc='best') + >>> plt.show() + """ + if n < 1: + return x + if n > x.shape[axis] - 1: + raise ValueError(("The extension length n (%d) is too big. " + + "It must not exceed x.shape[axis]-1, which is %d.") + % (n, x.shape[axis] - 1)) + left_end = axis_slice(x, start=0, stop=1, axis=axis) + left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) + right_end = axis_slice(x, start=-1, axis=axis) + right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) + ext = np.concatenate((2 * left_end - left_ext, + x, + 2 * right_end - right_ext), + axis=axis) + return ext + + +def even_ext(x, n, axis=-1): + """ + Even extension at the boundaries of an array + + Generate a new ndarray by making an even extension of `x` along an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import even_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> even_ext(a, 2) + array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3], + [ 4, 1, 0, 1, 4, 9, 16, 9, 4]]) + + Even extension is a "mirror image" at the boundaries of the original array: + + >>> t = np.linspace(0, 1.5, 100) + >>> a = 0.9 * np.sin(2 * np.pi * t**2) + >>> b = even_ext(a, 40) + >>> import matplotlib.pyplot as plt + >>> plt.plot(np.arange(-40, 140), b, 'b', lw=1, label='even extension') + >>> plt.plot(np.arange(100), a, 'r', lw=2, label='original') + >>> plt.legend(loc='best') + >>> plt.show() + """ + if n < 1: + return x + if n > x.shape[axis] - 1: + raise ValueError(("The extension length n (%d) is too big. " + + "It must not exceed x.shape[axis]-1, which is %d.") + % (n, x.shape[axis] - 1)) + left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) + right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) + ext = np.concatenate((left_ext, + x, + right_ext), + axis=axis) + return ext + + +def const_ext(x, n, axis=-1): + """ + Constant extension at the boundaries of an array + + Generate a new ndarray that is a constant extension of `x` along an axis. + + The extension repeats the values at the first and last element of + the axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import const_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> const_ext(a, 2) + array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5], + [ 0, 0, 0, 1, 4, 9, 16, 16, 16]]) + + Constant extension continues with the same values as the endpoints of the + array: + + >>> t = np.linspace(0, 1.5, 100) + >>> a = 0.9 * np.sin(2 * np.pi * t**2) + >>> b = const_ext(a, 40) + >>> import matplotlib.pyplot as plt + >>> plt.plot(np.arange(-40, 140), b, 'b', lw=1, label='constant extension') + >>> plt.plot(np.arange(100), a, 'r', lw=2, label='original') + >>> plt.legend(loc='best') + >>> plt.show() + """ + if n < 1: + return x + left_end = axis_slice(x, start=0, stop=1, axis=axis) + ones_shape = [1] * x.ndim + ones_shape[axis] = n + ones = np.ones(ones_shape, dtype=x.dtype) + left_ext = ones * left_end + right_end = axis_slice(x, start=-1, axis=axis) + right_ext = ones * right_end + ext = np.concatenate((left_ext, + x, + right_ext), + axis=axis) + return ext + + +def zero_ext(x, n, axis=-1): + """ + Zero padding at the boundaries of an array + + Generate a new ndarray that is a zero-padded extension of `x` along + an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the + axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._arraytools import zero_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> zero_ext(a, 2) + array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0], + [ 0, 0, 0, 1, 4, 9, 16, 0, 0]]) + """ + if n < 1: + return x + zeros_shape = list(x.shape) + zeros_shape[axis] = n + zeros = np.zeros(zeros_shape, dtype=x.dtype) + ext = np.concatenate((zeros, x, zeros), axis=axis) + return ext + + +def _validate_fs(fs, allow_none=True): + """ + Check if the given sampling frequency is a scalar and raises an exception + otherwise. If allow_none is False, also raises an exception for none + sampling rates. Returns the sampling frequency as float or none if the + input is none. + """ + if fs is None: + if not allow_none: + raise ValueError("Sampling frequency can not be none.") + else: # should be float + if not np.isscalar(fs): + raise ValueError("Sampling frequency fs must be a single scalar.") + fs = float(fs) + return fs diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_czt.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_czt.py new file mode 100644 index 0000000000000000000000000000000000000000..c5e5715b460fb2719b68d4694474bc1efc0a9fa0 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_czt.py @@ -0,0 +1,575 @@ +# This program is public domain +# Authors: Paul Kienzle, Nadav Horesh +""" +Chirp z-transform. + +We provide two interfaces to the chirp z-transform: an object interface +which precalculates part of the transform and can be applied efficiently +to many different data sets, and a functional interface which is applied +only to the given data set. + +Transforms +---------- + +CZT : callable (x, axis=-1) -> array + Define a chirp z-transform that can be applied to different signals. +ZoomFFT : callable (x, axis=-1) -> array + Define a Fourier transform on a range of frequencies. + +Functions +--------- + +czt : array + Compute the chirp z-transform for a signal. +zoom_fft : array + Compute the Fourier transform on a range of frequencies. +""" + +import cmath +import numbers +import numpy as np +from numpy import pi, arange +from scipy.fft import fft, ifft, next_fast_len + +__all__ = ['czt', 'zoom_fft', 'CZT', 'ZoomFFT', 'czt_points'] + + +def _validate_sizes(n, m): + if n < 1 or not isinstance(n, numbers.Integral): + raise ValueError('Invalid number of CZT data ' + f'points ({n}) specified. ' + 'n must be positive and integer type.') + + if m is None: + m = n + elif m < 1 or not isinstance(m, numbers.Integral): + raise ValueError('Invalid number of CZT output ' + f'points ({m}) specified. ' + 'm must be positive and integer type.') + + return m + + +def czt_points(m, w=None, a=1+0j): + """ + Return the points at which the chirp z-transform is computed. + + Parameters + ---------- + m : int + The number of points desired. + w : complex, optional + The ratio between points in each step. + Defaults to equally spaced points around the entire unit circle. + a : complex, optional + The starting point in the complex plane. Default is 1+0j. + + Returns + ------- + out : ndarray + The points in the Z plane at which `CZT` samples the z-transform, + when called with arguments `m`, `w`, and `a`, as complex numbers. + + See Also + -------- + CZT : Class that creates a callable chirp z-transform function. + czt : Convenience function for quickly calculating CZT. + + Examples + -------- + Plot the points of a 16-point FFT: + + >>> import numpy as np + >>> from scipy.signal import czt_points + >>> points = czt_points(16) + >>> import matplotlib.pyplot as plt + >>> plt.plot(points.real, points.imag, 'o') + >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) + >>> plt.axis('equal') + >>> plt.show() + + and a 91-point logarithmic spiral that crosses the unit circle: + + >>> m, w, a = 91, 0.995*np.exp(-1j*np.pi*.05), 0.8*np.exp(1j*np.pi/6) + >>> points = czt_points(m, w, a) + >>> plt.plot(points.real, points.imag, 'o') + >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) + >>> plt.axis('equal') + >>> plt.show() + """ + m = _validate_sizes(1, m) + + k = arange(m) + + a = 1.0 * a # at least float + + if w is None: + # Nothing specified, default to FFT + return a * np.exp(2j * pi * k / m) + else: + # w specified + w = 1.0 * w # at least float + return a * w**-k + + +class CZT: + """ + Create a callable chirp z-transform function. + + Transform to compute the frequency response around a spiral. + Objects of this class are callables which can compute the + chirp z-transform on their inputs. This object precalculates the constant + chirps used in the given transform. + + Parameters + ---------- + n : int + The size of the signal. + m : int, optional + The number of output points desired. Default is `n`. + w : complex, optional + The ratio between points in each step. This must be precise or the + accumulated error will degrade the tail of the output sequence. + Defaults to equally spaced points around the entire unit circle. + a : complex, optional + The starting point in the complex plane. Default is 1+0j. + + Returns + ------- + f : CZT + Callable object ``f(x, axis=-1)`` for computing the chirp z-transform + on `x`. + + See Also + -------- + czt : Convenience function for quickly calculating CZT. + ZoomFFT : Class that creates a callable partial FFT function. + + Notes + ----- + The defaults are chosen such that ``f(x)`` is equivalent to + ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, m)`` is equivalent to + ``fft.fft(x, m)``. + + If `w` does not lie on the unit circle, then the transform will be + around a spiral with exponentially-increasing radius. Regardless, + angle will increase linearly. + + For transforms that do lie on the unit circle, accuracy is better when + using `ZoomFFT`, since any numerical error in `w` is + accumulated for long data lengths, drifting away from the unit circle. + + The chirp z-transform can be faster than an equivalent FFT with + zero padding. Try it with your own array sizes to see. + + However, the chirp z-transform is considerably less precise than the + equivalent zero-padded FFT. + + As this CZT is implemented using the Bluestein algorithm, it can compute + large prime-length Fourier transforms in O(N log N) time, rather than the + O(N**2) time required by the direct DFT calculation. (`scipy.fft` also + uses Bluestein's algorithm'.) + + (The name "chirp z-transform" comes from the use of a chirp in the + Bluestein algorithm. It does not decompose signals into chirps, like + other transforms with "chirp" in the name.) + + References + ---------- + .. [1] Leo I. Bluestein, "A linear filtering approach to the computation + of the discrete Fourier transform," Northeast Electronics Research + and Engineering Meeting Record 10, 218-219 (1968). + .. [2] Rabiner, Schafer, and Rader, "The chirp z-transform algorithm and + its application," Bell Syst. Tech. J. 48, 1249-1292 (1969). + + Examples + -------- + Compute multiple prime-length FFTs: + + >>> from scipy.signal import CZT + >>> import numpy as np + >>> a = np.random.rand(7) + >>> b = np.random.rand(7) + >>> c = np.random.rand(7) + >>> czt_7 = CZT(n=7) + >>> A = czt_7(a) + >>> B = czt_7(b) + >>> C = czt_7(c) + + Display the points at which the FFT is calculated: + + >>> czt_7.points() + array([ 1.00000000+0.j , 0.62348980+0.78183148j, + -0.22252093+0.97492791j, -0.90096887+0.43388374j, + -0.90096887-0.43388374j, -0.22252093-0.97492791j, + 0.62348980-0.78183148j]) + >>> import matplotlib.pyplot as plt + >>> plt.plot(czt_7.points().real, czt_7.points().imag, 'o') + >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) + >>> plt.axis('equal') + >>> plt.show() + """ + + def __init__(self, n, m=None, w=None, a=1+0j): + m = _validate_sizes(n, m) + + k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2)) + + if w is None: + # Nothing specified, default to FFT-like + w = cmath.exp(-2j*pi/m) + wk2 = np.exp(-(1j * pi * ((k**2) % (2*m))) / m) + else: + # w specified + wk2 = w**(k**2/2.) + + a = 1.0 * a # at least float + + self.w, self.a = w, a + self.m, self.n = m, n + + nfft = next_fast_len(n + m - 1) + self._Awk2 = a**-k[:n] * wk2[:n] + self._nfft = nfft + self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft) + self._wk2 = wk2[:m] + self._yidx = slice(n-1, n+m-1) + + def __call__(self, x, *, axis=-1): + """ + Calculate the chirp z-transform of a signal. + + Parameters + ---------- + x : array + The signal to transform. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : ndarray + An array of the same dimensions as `x`, but with the length of the + transformed axis set to `m`. + """ + x = np.asarray(x) + if x.shape[axis] != self.n: + raise ValueError(f"CZT defined for length {self.n}, not " + f"{x.shape[axis]}") + # Calculate transpose coordinates, to allow operation on any given axis + trnsp = np.arange(x.ndim) + trnsp[[axis, -1]] = [-1, axis] + x = x.transpose(*trnsp) + y = ifft(self._Fwk2 * fft(x*self._Awk2, self._nfft)) + y = y[..., self._yidx] * self._wk2 + return y.transpose(*trnsp) + + def points(self): + """ + Return the points at which the chirp z-transform is computed. + """ + return czt_points(self.m, self.w, self.a) + + +class ZoomFFT(CZT): + """ + Create a callable zoom FFT transform function. + + This is a specialization of the chirp z-transform (`CZT`) for a set of + equally-spaced frequencies around the unit circle, used to calculate a + section of the FFT more efficiently than calculating the entire FFT and + truncating. + + Parameters + ---------- + n : int + The size of the signal. + fn : array_like + A length-2 sequence [`f1`, `f2`] giving the frequency range, or a + scalar, for which the range [0, `fn`] is assumed. + m : int, optional + The number of points to evaluate. Default is `n`. + fs : float, optional + The sampling frequency. If ``fs=10`` represented 10 kHz, for example, + then `f1` and `f2` would also be given in kHz. + The default sampling frequency is 2, so `f1` and `f2` should be + in the range [0, 1] to keep the transform below the Nyquist + frequency. + endpoint : bool, optional + If True, `f2` is the last sample. Otherwise, it is not included. + Default is False. + + Returns + ------- + f : ZoomFFT + Callable object ``f(x, axis=-1)`` for computing the zoom FFT on `x`. + + See Also + -------- + zoom_fft : Convenience function for calculating a zoom FFT. + + Notes + ----- + The defaults are chosen such that ``f(x, 2)`` is equivalent to + ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, 2, m)`` is equivalent to + ``fft.fft(x, m)``. + + Sampling frequency is 1/dt, the time step between samples in the + signal `x`. The unit circle corresponds to frequencies from 0 up + to the sampling frequency. The default sampling frequency of 2 + means that `f1`, `f2` values up to the Nyquist frequency are in the + range [0, 1). For `f1`, `f2` values expressed in radians, a sampling + frequency of 2*pi should be used. + + Remember that a zoom FFT can only interpolate the points of the existing + FFT. It cannot help to resolve two separate nearby frequencies. + Frequency resolution can only be increased by increasing acquisition + time. + + These functions are implemented using Bluestein's algorithm (as is + `scipy.fft`). [2]_ + + References + ---------- + .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its + applications", pg 29 (1970) + https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf + .. [2] Leo I. Bluestein, "A linear filtering approach to the computation + of the discrete Fourier transform," Northeast Electronics Research + and Engineering Meeting Record 10, 218-219 (1968). + + Examples + -------- + To plot the transform results use something like the following: + + >>> import numpy as np + >>> from scipy.signal import ZoomFFT + >>> t = np.linspace(0, 1, 1021) + >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t) + >>> f1, f2 = 5, 27 + >>> transform = ZoomFFT(len(x), [f1, f2], len(x), fs=1021) + >>> X = transform(x) + >>> f = np.linspace(f1, f2, len(x)) + >>> import matplotlib.pyplot as plt + >>> plt.plot(f, 20*np.log10(np.abs(X))) + >>> plt.show() + """ + + def __init__(self, n, fn, m=None, *, fs=2, endpoint=False): + m = _validate_sizes(n, m) + + k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2)) + + if np.size(fn) == 2: + f1, f2 = fn + elif np.size(fn) == 1: + f1, f2 = 0.0, fn + else: + raise ValueError('fn must be a scalar or 2-length sequence') + + self.f1, self.f2, self.fs = f1, f2, fs + + if endpoint: + scale = ((f2 - f1) * m) / (fs * (m - 1)) + else: + scale = (f2 - f1) / fs + a = cmath.exp(2j * pi * f1/fs) + wk2 = np.exp(-(1j * pi * scale * k**2) / m) + + self.w = cmath.exp(-2j*pi/m * scale) + self.a = a + self.m, self.n = m, n + + ak = np.exp(-2j * pi * f1/fs * k[:n]) + self._Awk2 = ak * wk2[:n] + + nfft = next_fast_len(n + m - 1) + self._nfft = nfft + self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft) + self._wk2 = wk2[:m] + self._yidx = slice(n-1, n+m-1) + + +def czt(x, m=None, w=None, a=1+0j, *, axis=-1): + """ + Compute the frequency response around a spiral in the Z plane. + + Parameters + ---------- + x : array + The signal to transform. + m : int, optional + The number of output points desired. Default is the length of the + input data. + w : complex, optional + The ratio between points in each step. This must be precise or the + accumulated error will degrade the tail of the output sequence. + Defaults to equally spaced points around the entire unit circle. + a : complex, optional + The starting point in the complex plane. Default is 1+0j. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : ndarray + An array of the same dimensions as `x`, but with the length of the + transformed axis set to `m`. + + See Also + -------- + CZT : Class that creates a callable chirp z-transform function. + zoom_fft : Convenience function for partial FFT calculations. + + Notes + ----- + The defaults are chosen such that ``signal.czt(x)`` is equivalent to + ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.czt(x, m)`` is + equivalent to ``fft.fft(x, m)``. + + If the transform needs to be repeated, use `CZT` to construct a + specialized transform function which can be reused without + recomputing constants. + + An example application is in system identification, repeatedly evaluating + small slices of the z-transform of a system, around where a pole is + expected to exist, to refine the estimate of the pole's true location. [1]_ + + References + ---------- + .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its + applications", pg 20 (1970) + https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf + + Examples + -------- + Generate a sinusoid: + + >>> import numpy as np + >>> f1, f2, fs = 8, 10, 200 # Hz + >>> t = np.linspace(0, 1, fs, endpoint=False) + >>> x = np.sin(2*np.pi*t*f2) + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, x) + >>> plt.axis([0, 1, -1.1, 1.1]) + >>> plt.show() + + Its discrete Fourier transform has all of its energy in a single frequency + bin: + + >>> from scipy.fft import rfft, rfftfreq + >>> from scipy.signal import czt, czt_points + >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x))) + >>> plt.margins(0, 0.1) + >>> plt.show() + + However, if the sinusoid is logarithmically-decaying: + + >>> x = np.exp(-t*f1) * np.sin(2*np.pi*t*f2) + >>> plt.plot(t, x) + >>> plt.axis([0, 1, -1.1, 1.1]) + >>> plt.show() + + the DFT will have spectral leakage: + + >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x))) + >>> plt.margins(0, 0.1) + >>> plt.show() + + While the DFT always samples the z-transform around the unit circle, the + chirp z-transform allows us to sample the Z-transform along any + logarithmic spiral, such as a circle with radius smaller than unity: + + >>> M = fs // 2 # Just positive frequencies, like rfft + >>> a = np.exp(-f1/fs) # Starting point of the circle, radius < 1 + >>> w = np.exp(-1j*np.pi/M) # "Step size" of circle + >>> points = czt_points(M + 1, w, a) # M + 1 to include Nyquist + >>> plt.plot(points.real, points.imag, '.') + >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) + >>> plt.axis('equal'); plt.axis([-1.05, 1.05, -0.05, 1.05]) + >>> plt.show() + + With the correct radius, this transforms the decaying sinusoid (and others + with the same decay rate) without spectral leakage: + + >>> z_vals = czt(x, M + 1, w, a) # Include Nyquist for comparison to rfft + >>> freqs = np.angle(points)*fs/(2*np.pi) # angle = omega, radius = sigma + >>> plt.plot(freqs, abs(z_vals)) + >>> plt.margins(0, 0.1) + >>> plt.show() + """ + x = np.asarray(x) + transform = CZT(x.shape[axis], m=m, w=w, a=a) + return transform(x, axis=axis) + + +def zoom_fft(x, fn, m=None, *, fs=2, endpoint=False, axis=-1): + """ + Compute the DFT of `x` only for frequencies in range `fn`. + + Parameters + ---------- + x : array + The signal to transform. + fn : array_like + A length-2 sequence [`f1`, `f2`] giving the frequency range, or a + scalar, for which the range [0, `fn`] is assumed. + m : int, optional + The number of points to evaluate. The default is the length of `x`. + fs : float, optional + The sampling frequency. If ``fs=10`` represented 10 kHz, for example, + then `f1` and `f2` would also be given in kHz. + The default sampling frequency is 2, so `f1` and `f2` should be + in the range [0, 1] to keep the transform below the Nyquist + frequency. + endpoint : bool, optional + If True, `f2` is the last sample. Otherwise, it is not included. + Default is False. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : ndarray + The transformed signal. The Fourier transform will be calculated + at the points f1, f1+df, f1+2df, ..., f2, where df=(f2-f1)/m. + + See Also + -------- + ZoomFFT : Class that creates a callable partial FFT function. + + Notes + ----- + The defaults are chosen such that ``signal.zoom_fft(x, 2)`` is equivalent + to ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.zoom_fft(x, 2, m)`` + is equivalent to ``fft.fft(x, m)``. + + To graph the magnitude of the resulting transform, use:: + + plot(linspace(f1, f2, m, endpoint=False), abs(zoom_fft(x, [f1, f2], m))) + + If the transform needs to be repeated, use `ZoomFFT` to construct + a specialized transform function which can be reused without + recomputing constants. + + Examples + -------- + To plot the transform results use something like the following: + + >>> import numpy as np + >>> from scipy.signal import zoom_fft + >>> t = np.linspace(0, 1, 1021) + >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t) + >>> f1, f2 = 5, 27 + >>> X = zoom_fft(x, [f1, f2], len(x), fs=1021) + >>> f = np.linspace(f1, f2, len(x)) + >>> import matplotlib.pyplot as plt + >>> plt.plot(f, 20*np.log10(np.abs(X))) + >>> plt.show() + """ + x = np.asarray(x) + transform = ZoomFFT(x.shape[axis], fn, m=m, fs=fs, endpoint=endpoint) + return transform(x, axis=axis) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_filter_design.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..0f177247c602cd529a064cc043d8052aa6cbc811 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_filter_design.py @@ -0,0 +1,5663 @@ +"""Filter design.""" +import math +import operator +import warnings + +import numpy as np +from numpy import (atleast_1d, poly, polyval, roots, real, asarray, + resize, pi, absolute, sqrt, tan, log10, + arcsinh, sin, exp, cosh, arccosh, ceil, conjugate, + zeros, sinh, append, concatenate, prod, ones, full, array, + mintypecode) +from numpy.polynomial.polynomial import polyval as npp_polyval +from numpy.polynomial.polynomial import polyvalfromroots + +from scipy import special, optimize, fft as sp_fft +from scipy.special import comb +from scipy._lib._util import float_factorial +from scipy.signal._arraytools import _validate_fs + + +__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize', + 'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign', + 'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel', + 'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord', + 'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap', + 'BadCoefficients', 'freqs_zpk', 'freqz_zpk', + 'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay', + 'sosfreqz', 'freqz_sos', 'iirnotch', 'iirpeak', 'bilinear_zpk', + 'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk', + 'gammatone', 'iircomb'] + + +class BadCoefficients(UserWarning): + """Warning about badly conditioned filter coefficients""" + pass + + +abs = absolute + + +def _is_int_type(x): + """ + Check if input is of a scalar integer type (so ``5`` and ``array(5)`` will + pass, while ``5.0`` and ``array([5])`` will fail. + """ + if np.ndim(x) != 0: + # Older versions of NumPy did not raise for np.array([1]).__index__() + # This is safe to remove when support for those versions is dropped + return False + try: + operator.index(x) + except TypeError: + return False + else: + return True + + +def findfreqs(num, den, N, kind='ba'): + """ + Find array of frequencies for computing the response of an analog filter. + + Parameters + ---------- + num, den : array_like, 1-D + The polynomial coefficients of the numerator and denominator of the + transfer function of the filter or LTI system, where the coefficients + are ordered from highest to lowest degree. Or, the roots of the + transfer function numerator and denominator (i.e., zeroes and poles). + N : int + The length of the array to be computed. + kind : str {'ba', 'zp'}, optional + Specifies whether the numerator and denominator are specified by their + polynomial coefficients ('ba'), or their roots ('zp'). + + Returns + ------- + w : (N,) ndarray + A 1-D array of frequencies, logarithmically spaced. + + Examples + -------- + Find a set of nine frequencies that span the "interesting part" of the + frequency response for the filter with the transfer function + + H(s) = s / (s^2 + 8s + 25) + + >>> from scipy import signal + >>> signal.findfreqs([1, 0], [1, 8, 25], N=9) + array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01, + 3.16227766e-01, 1.00000000e+00, 3.16227766e+00, + 1.00000000e+01, 3.16227766e+01, 1.00000000e+02]) + """ + if kind == 'ba': + ep = atleast_1d(roots(den)) + 0j + tz = atleast_1d(roots(num)) + 0j + elif kind == 'zp': + ep = atleast_1d(den) + 0j + tz = atleast_1d(num) + 0j + else: + raise ValueError("input must be one of {'ba', 'zp'}") + + if len(ep) == 0: + ep = atleast_1d(-1000) + 0j + + ez = np.r_[ep[ep.imag >= 0], tz[(np.abs(tz) < 1e5) & (tz.imag >= 0)]] + + integ = np.abs(ez) < 1e-10 + hfreq = np.round(np.log10(np.max(3 * np.abs(ez.real + integ) + + 1.5 * ez.imag)) + 0.5) + lfreq = np.round(np.log10(0.1 * np.min(np.abs((ez + integ).real) + + 2 * ez.imag)) - 0.5) + + w = np.logspace(lfreq, hfreq, N) + return w + + +def freqs(b, a, worN=200, plot=None): + """ + Compute frequency response of analog filter. + + Given the M-order numerator `b` and N-order denominator `a` of an analog + filter, compute its frequency response:: + + b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M] + H(w) = ---------------------------------------------- + a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N] + + Parameters + ---------- + b : array_like + Numerator of a linear filter. + a : array_like + Denominator of a linear filter. + worN : {None, int, array_like}, optional + If None, then compute at 200 frequencies around the interesting parts + of the response curve (determined by pole-zero locations). If a single + integer, then compute at that many frequencies. Otherwise, compute the + response at the angular frequencies (e.g., rad/s) given in `worN`. + plot : callable, optional + A callable that takes two arguments. If given, the return parameters + `w` and `h` are passed to plot. Useful for plotting the frequency + response inside `freqs`. + + Returns + ------- + w : ndarray + The angular frequencies at which `h` was computed. + h : ndarray + The frequency response. + + See Also + -------- + freqz : Compute the frequency response of a digital filter. + + Notes + ----- + Using Matplotlib's "plot" function as the callable for `plot` produces + unexpected results, this plots the real part of the complex transfer + function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``. + + Examples + -------- + >>> from scipy.signal import freqs, iirfilter + >>> import numpy as np + + >>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1') + + >>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000)) + + >>> import matplotlib.pyplot as plt + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude response [dB]') + >>> plt.grid(True) + >>> plt.show() + + """ + if worN is None: + # For backwards compatibility + w = findfreqs(b, a, 200) + elif _is_int_type(worN): + w = findfreqs(b, a, worN) + else: + w = atleast_1d(worN) + + s = 1j * w + h = polyval(b, s) / polyval(a, s) + if plot is not None: + plot(w, h) + + return w, h + + +def freqs_zpk(z, p, k, worN=200): + """ + Compute frequency response of analog filter. + + Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its + frequency response:: + + (jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1]) + H(w) = k * ---------------------------------------- + (jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1]) + + Parameters + ---------- + z : array_like + Zeroes of a linear filter + p : array_like + Poles of a linear filter + k : scalar + Gain of a linear filter + worN : {None, int, array_like}, optional + If None, then compute at 200 frequencies around the interesting parts + of the response curve (determined by pole-zero locations). If a single + integer, then compute at that many frequencies. Otherwise, compute the + response at the angular frequencies (e.g., rad/s) given in `worN`. + + Returns + ------- + w : ndarray + The angular frequencies at which `h` was computed. + h : ndarray + The frequency response. + + See Also + -------- + freqs : Compute the frequency response of an analog filter in TF form + freqz : Compute the frequency response of a digital filter in TF form + freqz_zpk : Compute the frequency response of a digital filter in ZPK form + + Notes + ----- + .. versionadded:: 0.19.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import freqs_zpk, iirfilter + + >>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1', + ... output='zpk') + + >>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000)) + + >>> import matplotlib.pyplot as plt + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude response [dB]') + >>> plt.grid(True) + >>> plt.show() + + """ + k = np.asarray(k) + if k.size > 1: + raise ValueError('k must be a single scalar gain') + + if worN is None: + # For backwards compatibility + w = findfreqs(z, p, 200, kind='zp') + elif _is_int_type(worN): + w = findfreqs(z, p, worN, kind='zp') + else: + w = worN + + w = atleast_1d(w) + s = 1j * w + num = polyvalfromroots(s, z) + den = polyvalfromroots(s, p) + h = k * num/den + return w, h + + +def freqz(b, a=1, worN=512, whole=False, plot=None, fs=2*pi, + include_nyquist=False): + """ + Compute the frequency response of a digital filter. + + Given the M-order numerator `b` and N-order denominator `a` of a digital + filter, compute its frequency response:: + + jw -jw -jwM + jw B(e ) b[0] + b[1]e + ... + b[M]e + H(e ) = ------ = ----------------------------------- + jw -jw -jwN + A(e ) a[0] + a[1]e + ... + a[N]e + + Parameters + ---------- + b : array_like + Numerator of a linear filter. If `b` has dimension greater than 1, + it is assumed that the coefficients are stored in the first dimension, + and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies + array must be compatible for broadcasting. + a : array_like + Denominator of a linear filter. If `b` has dimension greater than 1, + it is assumed that the coefficients are stored in the first dimension, + and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies + array must be compatible for broadcasting. + worN : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). This is a convenient alternative to:: + + np.linspace(0, fs if whole else fs/2, N, endpoint=include_nyquist) + + Using a number that is fast for FFT computations can result in + faster computations (see Notes). + + If an array_like, compute the response at the frequencies given. + These are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. Ignored if worN is array_like. + plot : callable + A callable that takes two arguments. If given, the return parameters + `w` and `h` are passed to plot. Useful for plotting the frequency + response inside `freqz`. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + .. versionadded:: 1.2.0 + include_nyquist : bool, optional + If `whole` is False and `worN` is an integer, setting `include_nyquist` + to True will include the last frequency (Nyquist frequency) and is + otherwise ignored. + + .. versionadded:: 1.5.0 + + Returns + ------- + w : ndarray + The frequencies at which `h` was computed, in the same units as `fs`. + By default, `w` is normalized to the range [0, pi) (radians/sample). + h : ndarray + The frequency response, as complex numbers. + + See Also + -------- + freqz_zpk + freqz_sos + + Notes + ----- + Using Matplotlib's :func:`matplotlib.pyplot.plot` function as the callable + for `plot` produces unexpected results, as this plots the real part of the + complex transfer function, not the magnitude. + Try ``lambda w, h: plot(w, np.abs(h))``. + + A direct computation via (R)FFT is used to compute the frequency response + when the following conditions are met: + + 1. An integer value is given for `worN`. + 2. `worN` is fast to compute via FFT (i.e., + `next_fast_len(worN) ` equals `worN`). + 3. The denominator coefficients are a single value (``a.shape[0] == 1``). + 4. `worN` is at least as long as the numerator coefficients + (``worN >= b.shape[0]``). + 5. If ``b.ndim > 1``, then ``b.shape[-1] == 1``. + + For long FIR filters, the FFT approach can have lower error and be much + faster than the equivalent direct polynomial calculation. + + Examples + -------- + >>> from scipy import signal + >>> import numpy as np + >>> taps, f_c = 80, 1.0 # number of taps and cut-off frequency + >>> b = signal.firwin(taps, f_c, window=('kaiser', 8), fs=2*np.pi) + >>> w, h = signal.freqz(b) + + >>> import matplotlib.pyplot as plt + >>> fig, ax1 = plt.subplots(tight_layout=True) + >>> ax1.set_title(f"Frequency Response of {taps} tap FIR Filter" + + ... f"($f_c={f_c}$ rad/sample)") + >>> ax1.axvline(f_c, color='black', linestyle=':', linewidth=0.8) + >>> ax1.plot(w, 20 * np.log10(abs(h)), 'C0') + >>> ax1.set_ylabel("Amplitude in dB", color='C0') + >>> ax1.set(xlabel="Frequency in rad/sample", xlim=(0, np.pi)) + + >>> ax2 = ax1.twinx() + >>> phase = np.unwrap(np.angle(h)) + >>> ax2.plot(w, phase, 'C1') + >>> ax2.set_ylabel('Phase [rad]', color='C1') + >>> ax2.grid(True) + >>> ax2.axis('tight') + >>> plt.show() + + Broadcasting Examples + + Suppose we have two FIR filters whose coefficients are stored in the + rows of an array with shape (2, 25). For this demonstration, we'll + use random data: + + >>> rng = np.random.default_rng() + >>> b = rng.random((2, 25)) + + To compute the frequency response for these two filters with one call + to `freqz`, we must pass in ``b.T``, because `freqz` expects the first + axis to hold the coefficients. We must then extend the shape with a + trivial dimension of length 1 to allow broadcasting with the array + of frequencies. That is, we pass in ``b.T[..., np.newaxis]``, which has + shape (25, 2, 1): + + >>> w, h = signal.freqz(b.T[..., np.newaxis], worN=1024) + >>> w.shape + (1024,) + >>> h.shape + (2, 1024) + + Now, suppose we have two transfer functions, with the same numerator + coefficients ``b = [0.5, 0.5]``. The coefficients for the two denominators + are stored in the first dimension of the 2-D array `a`:: + + a = [ 1 1 ] + [ -0.25, -0.5 ] + + >>> b = np.array([0.5, 0.5]) + >>> a = np.array([[1, 1], [-0.25, -0.5]]) + + Only `a` is more than 1-D. To make it compatible for + broadcasting with the frequencies, we extend it with a trivial dimension + in the call to `freqz`: + + >>> w, h = signal.freqz(b, a[..., np.newaxis], worN=1024) + >>> w.shape + (1024,) + >>> h.shape + (2, 1024) + + """ + b = atleast_1d(b) + a = atleast_1d(a) + + fs = _validate_fs(fs, allow_none=False) + + if worN is None: + # For backwards compatibility + worN = 512 + + h = None + + if _is_int_type(worN): + N = operator.index(worN) + del worN + if N < 0: + raise ValueError(f'worN must be nonnegative, got {N}') + lastpoint = 2 * pi if whole else pi + # if include_nyquist is true and whole is false, w should + # include end point + w = np.linspace(0, lastpoint, N, + endpoint=include_nyquist and not whole) + n_fft = N if whole else 2 * (N - 1) if include_nyquist else 2 * N + if (a.size == 1 and (b.ndim == 1 or (b.shape[-1] == 1)) + and n_fft >= b.shape[0] + and n_fft > 0): # TODO: review threshold acc. to benchmark? + if np.isrealobj(b) and np.isrealobj(a): + fft_func = sp_fft.rfft + else: + fft_func = sp_fft.fft + h = fft_func(b, n=n_fft, axis=0)[:N] + h /= a + if fft_func is sp_fft.rfft and whole: + # exclude DC and maybe Nyquist (no need to use axis_reverse + # here because we can build reversal with the truncation) + stop = -1 if n_fft % 2 == 1 else -2 + h_flip = slice(stop, 0, -1) + h = np.concatenate((h, h[h_flip].conj())) + if b.ndim > 1: + # Last axis of h has length 1, so drop it. + h = h[..., 0] + # Move the first axis of h to the end. + h = np.moveaxis(h, 0, -1) + else: + w = atleast_1d(worN) + del worN + w = 2*pi*w/fs + + if h is None: # still need to compute using freqs w + zm1 = exp(-1j * w) + h = (npp_polyval(zm1, b, tensor=False) / + npp_polyval(zm1, a, tensor=False)) + + w = w*(fs/(2*pi)) + + if plot is not None: + plot(w, h) + + return w, h + + +def freqz_zpk(z, p, k, worN=512, whole=False, fs=2*pi): + r""" + Compute the frequency response of a digital filter in ZPK form. + + Given the Zeros, Poles and Gain of a digital filter, compute its frequency + response: + + :math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])` + + where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are + the `poles`. + + Parameters + ---------- + z : array_like + Zeroes of a linear filter + p : array_like + Poles of a linear filter + k : scalar + Gain of a linear filter + worN : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). + + If an array_like, compute the response at the frequencies given. + These are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. Ignored if w is array_like. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + .. versionadded:: 1.2.0 + + Returns + ------- + w : ndarray + The frequencies at which `h` was computed, in the same units as `fs`. + By default, `w` is normalized to the range [0, pi) (radians/sample). + h : ndarray + The frequency response, as complex numbers. + + See Also + -------- + freqs : Compute the frequency response of an analog filter in TF form + freqs_zpk : Compute the frequency response of an analog filter in ZPK form + freqz : Compute the frequency response of a digital filter in TF form + + Notes + ----- + .. versionadded:: 0.19.0 + + Examples + -------- + Design a 4th-order digital Butterworth filter with cut-off of 100 Hz in a + system with sample rate of 1000 Hz, and plot the frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> z, p, k = signal.butter(4, 100, output='zpk', fs=1000) + >>> w, h = signal.freqz_zpk(z, p, k, fs=1000) + + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(1, 1, 1) + >>> ax1.set_title('Digital filter frequency response') + + >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b') + >>> ax1.set_ylabel('Amplitude [dB]', color='b') + >>> ax1.set_xlabel('Frequency [Hz]') + >>> ax1.grid(True) + + >>> ax2 = ax1.twinx() + >>> phase = np.unwrap(np.angle(h)) + >>> ax2.plot(w, phase, 'g') + >>> ax2.set_ylabel('Phase [rad]', color='g') + + >>> plt.axis('tight') + >>> plt.show() + + """ + z, p = map(atleast_1d, (z, p)) + + fs = _validate_fs(fs, allow_none=False) + + if whole: + lastpoint = 2 * pi + else: + lastpoint = pi + + if worN is None: + # For backwards compatibility + w = np.linspace(0, lastpoint, 512, endpoint=False) + elif _is_int_type(worN): + w = np.linspace(0, lastpoint, worN, endpoint=False) + else: + w = atleast_1d(worN) + w = 2*pi*w/fs + + zm1 = exp(1j * w) + h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p) + + w = w*(fs/(2*pi)) + + return w, h + + +def group_delay(system, w=512, whole=False, fs=2*pi): + r"""Compute the group delay of a digital filter. + + The group delay measures by how many samples amplitude envelopes of + various spectral components of a signal are delayed by a filter. + It is formally defined as the derivative of continuous (unwrapped) phase:: + + d jw + D(w) = - -- arg H(e) + dw + + Parameters + ---------- + system : tuple of array_like (b, a) + Numerator and denominator coefficients of a filter transfer function. + w : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). + + If an array_like, compute the delay at the frequencies given. These + are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. Ignored if w is array_like. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + .. versionadded:: 1.2.0 + + Returns + ------- + w : ndarray + The frequencies at which group delay was computed, in the same units + as `fs`. By default, `w` is normalized to the range [0, pi) + (radians/sample). + gd : ndarray + The group delay. + + See Also + -------- + freqz : Frequency response of a digital filter + + Notes + ----- + The similar function in MATLAB is called `grpdelay`. + + If the transfer function :math:`H(z)` has zeros or poles on the unit + circle, the group delay at corresponding frequencies is undefined. + When such a case arises the warning is raised and the group delay + is set to 0 at those frequencies. + + For the details of numerical computation of the group delay refer to [1]_ or [2]_. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] Richard G. Lyons, "Understanding Digital Signal Processing, + 3rd edition", p. 830. + .. [2] Julius O. Smith III, "Numerical Computation of Group Delay", + in "Introduction to Digital Filters with Audio Applications", + online book, 2007, + https://ccrma.stanford.edu/~jos/fp/Numerical_Computation_Group_Delay.html + + Examples + -------- + >>> from scipy import signal + >>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1') + >>> w, gd = signal.group_delay((b, a)) + + >>> import matplotlib.pyplot as plt + >>> plt.title('Digital filter group delay') + >>> plt.plot(w, gd) + >>> plt.ylabel('Group delay [samples]') + >>> plt.xlabel('Frequency [rad/sample]') + >>> plt.show() + + """ + if w is None: + # For backwards compatibility + w = 512 + + fs = _validate_fs(fs, allow_none=False) + + if _is_int_type(w): + if whole: + w = np.linspace(0, 2 * pi, w, endpoint=False) + else: + w = np.linspace(0, pi, w, endpoint=False) + else: + w = np.atleast_1d(w) + w = 2*pi*w/fs + + b, a = map(np.atleast_1d, system) + c = np.convolve(b, conjugate(a[::-1])) + cr = c * np.arange(c.size) + z = np.exp(-1j * w) + num = np.polyval(cr[::-1], z) + den = np.polyval(c[::-1], z) + gd = np.real(num / den) - a.size + 1 + singular = ~np.isfinite(gd) + near_singular = np.absolute(den) < 10 * EPSILON + + if np.any(singular): + gd[singular] = 0 + warnings.warn( + "The group delay is singular at frequencies " + f"[{', '.join(f'{ws:.3f}' for ws in w[singular])}], setting to 0", + stacklevel=2 + ) + + elif np.any(near_singular): + warnings.warn( + "The filter's denominator is extremely small at frequencies " + f"[{', '.join(f'{ws:.3f}' for ws in w[near_singular])}], " + "around which a singularity may be present", + stacklevel=2 + ) + + w = w*(fs/(2*pi)) + + return w, gd + + +def _validate_sos(sos): + """Helper to validate a SOS input""" + sos = np.atleast_2d(sos) + if sos.ndim != 2: + raise ValueError('sos array must be 2D') + n_sections, m = sos.shape + if m != 6: + raise ValueError('sos array must be shape (n_sections, 6)') + if not (sos[:, 3] == 1).all(): + raise ValueError('sos[:, 3] should be all ones') + return sos, n_sections + + +def freqz_sos(sos, worN=512, whole=False, fs=2*pi): + r""" + Compute the frequency response of a digital filter in SOS format. + + Given `sos`, an array with shape (n, 6) of second order sections of + a digital filter, compute the frequency response of the system function:: + + B0(z) B1(z) B{n-1}(z) + H(z) = ----- * ----- * ... * --------- + A0(z) A1(z) A{n-1}(z) + + for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and + denominator of the transfer function of the k-th second order section. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + worN : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). Using a number that is fast for FFT computations can result + in faster computations (see Notes of `freqz`). + + If an array_like, compute the response at the frequencies given (must + be 1-D). These are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + .. versionadded:: 1.2.0 + + Returns + ------- + w : ndarray + The frequencies at which `h` was computed, in the same units as `fs`. + By default, `w` is normalized to the range [0, pi) (radians/sample). + h : ndarray + The frequency response, as complex numbers. + + See Also + -------- + freqz, sosfilt + + Notes + ----- + .. versionadded:: 0.19.0 + + Examples + -------- + Design a 15th-order bandpass filter in SOS format. + + >>> from scipy import signal + >>> import numpy as np + >>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass', + ... output='sos') + + Compute the frequency response at 1500 points from DC to Nyquist. + + >>> w, h = signal.freqz_sos(sos, worN=1500) + + Plot the response. + + >>> import matplotlib.pyplot as plt + >>> plt.subplot(2, 1, 1) + >>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5)) + >>> plt.plot(w/np.pi, db) + >>> plt.ylim(-75, 5) + >>> plt.grid(True) + >>> plt.yticks([0, -20, -40, -60]) + >>> plt.ylabel('Gain [dB]') + >>> plt.title('Frequency Response') + >>> plt.subplot(2, 1, 2) + >>> plt.plot(w/np.pi, np.angle(h)) + >>> plt.grid(True) + >>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi], + ... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$']) + >>> plt.ylabel('Phase [rad]') + >>> plt.xlabel('Normalized frequency (1.0 = Nyquist)') + >>> plt.show() + + If the same filter is implemented as a single transfer function, + numerical error corrupts the frequency response: + + >>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass', + ... output='ba') + >>> w, h = signal.freqz(b, a, worN=1500) + >>> plt.subplot(2, 1, 1) + >>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5)) + >>> plt.plot(w/np.pi, db) + >>> plt.ylim(-75, 5) + >>> plt.grid(True) + >>> plt.yticks([0, -20, -40, -60]) + >>> plt.ylabel('Gain [dB]') + >>> plt.title('Frequency Response') + >>> plt.subplot(2, 1, 2) + >>> plt.plot(w/np.pi, np.angle(h)) + >>> plt.grid(True) + >>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi], + ... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$']) + >>> plt.ylabel('Phase [rad]') + >>> plt.xlabel('Normalized frequency (1.0 = Nyquist)') + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=False) + + sos, n_sections = _validate_sos(sos) + if n_sections == 0: + raise ValueError('Cannot compute frequencies with no sections') + h = 1. + for row in sos: + w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole, fs=fs) + h *= rowh + return w, h + + +def sosfreqz(*args, **kwargs): + """ + Compute the frequency response of a digital filter in SOS format. + + .. warning:: This function is an alias, provided for backward + compatibility. New code should use the function + :func:`scipy.signal.freqz_sos`. + """ + return freqz_sos(*args, **kwargs) + + +def _cplxreal(z, tol=None): + """ + Split into complex and real parts, combining conjugate pairs. + + The 1-D input vector `z` is split up into its complex (`zc`) and real (`zr`) + elements. Every complex element must be part of a complex-conjugate pair, + which are combined into a single number (with positive imaginary part) in + the output. Two complex numbers are considered a conjugate pair if their + real and imaginary parts differ in magnitude by less than ``tol * abs(z)``. + + Parameters + ---------- + z : array_like + Vector of complex numbers to be sorted and split + tol : float, optional + Relative tolerance for testing realness and conjugate equality. + Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for + float64) + + Returns + ------- + zc : ndarray + Complex elements of `z`, with each pair represented by a single value + having positive imaginary part, sorted first by real part, and then + by magnitude of imaginary part. The pairs are averaged when combined + to reduce error. + zr : ndarray + Real elements of `z` (those having imaginary part less than + `tol` times their magnitude), sorted by value. + + Raises + ------ + ValueError + If there are any complex numbers in `z` for which a conjugate + cannot be found. + + See Also + -------- + _cplxpair + + Examples + -------- + >>> from scipy.signal._filter_design import _cplxreal + >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] + >>> zc, zr = _cplxreal(a) + >>> print(zc) + [ 1.+1.j 2.+1.j 2.+1.j 2.+2.j] + >>> print(zr) + [ 1. 3. 4.] + """ + + z = atleast_1d(z) + if z.size == 0: + return z, z + elif z.ndim != 1: + raise ValueError('_cplxreal only accepts 1-D input') + + if tol is None: + # Get tolerance from dtype of input + tol = 100 * np.finfo((1.0 * z).dtype).eps + + # Sort by real part, magnitude of imaginary part (speed up further sorting) + z = z[np.lexsort((abs(z.imag), z.real))] + + # Split reals from conjugate pairs + real_indices = abs(z.imag) <= tol * abs(z) + zr = z[real_indices].real + + if len(zr) == len(z): + # Input is entirely real + return array([]), zr + + # Split positive and negative halves of conjugates + z = z[~real_indices] + zp = z[z.imag > 0] + zn = z[z.imag < 0] + + if len(zp) != len(zn): + raise ValueError('Array contains complex value with no matching ' + 'conjugate.') + + # Find runs of (approximately) the same real part + same_real = np.diff(zp.real) <= tol * abs(zp[:-1]) + diffs = np.diff(concatenate(([0], same_real, [0]))) + run_starts = np.nonzero(diffs > 0)[0] + run_stops = np.nonzero(diffs < 0)[0] + + # Sort each run by their imaginary parts + for i in range(len(run_starts)): + start = run_starts[i] + stop = run_stops[i] + 1 + for chunk in (zp[start:stop], zn[start:stop]): + chunk[...] = chunk[np.lexsort([abs(chunk.imag)])] + + # Check that negatives match positives + if any(abs(zp - zn.conj()) > tol * abs(zn)): + raise ValueError('Array contains complex value with no matching ' + 'conjugate.') + + # Average out numerical inaccuracy in real vs imag parts of pairs + zc = (zp + zn.conj()) / 2 + + return zc, zr + + +def _cplxpair(z, tol=None): + """ + Sort into pairs of complex conjugates. + + Complex conjugates in `z` are sorted by increasing real part. In each + pair, the number with negative imaginary part appears first. + + If pairs have identical real parts, they are sorted by increasing + imaginary magnitude. + + Two complex numbers are considered a conjugate pair if their real and + imaginary parts differ in magnitude by less than ``tol * abs(z)``. The + pairs are forced to be exact complex conjugates by averaging the positive + and negative values. + + Purely real numbers are also sorted, but placed after the complex + conjugate pairs. A number is considered real if its imaginary part is + smaller than `tol` times the magnitude of the number. + + Parameters + ---------- + z : array_like + 1-D input array to be sorted. + tol : float, optional + Relative tolerance for testing realness and conjugate equality. + Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for + float64) + + Returns + ------- + y : ndarray + Complex conjugate pairs followed by real numbers. + + Raises + ------ + ValueError + If there are any complex numbers in `z` for which a conjugate + cannot be found. + + See Also + -------- + _cplxreal + + Examples + -------- + >>> from scipy.signal._filter_design import _cplxpair + >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] + >>> z = _cplxpair(a) + >>> print(z) + [ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j + 3.+0.j 4.+0.j] + """ + + z = atleast_1d(z) + if z.size == 0 or np.isrealobj(z): + return np.sort(z) + + if z.ndim != 1: + raise ValueError('z must be 1-D') + + zc, zr = _cplxreal(z, tol) + + # Interleave complex values and their conjugates, with negative imaginary + # parts first in each pair + zc = np.dstack((zc.conj(), zc)).flatten() + z = np.append(zc, zr) + return z + + +def tf2zpk(b, a): + r"""Return zero, pole, gain (z, p, k) representation from a numerator, + denominator representation of a linear filter. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + + Returns + ------- + z : ndarray + Zeros of the transfer function. + p : ndarray + Poles of the transfer function. + k : float + System gain. + + Notes + ----- + If some values of `b` are too close to 0, they are removed. In that case, + a BadCoefficients warning is emitted. + + The `b` and `a` arrays are interpreted as coefficients for positive, + descending powers of the transfer function variable. So the inputs + :math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]` + can represent an analog filter of the form: + + .. math:: + + H(s) = \frac + {b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M} + {a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N} + + or a discrete-time filter of the form: + + .. math:: + + H(z) = \frac + {b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M} + {a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N} + + This "positive powers" form is found more commonly in controls + engineering. If `M` and `N` are equal (which is true for all filters + generated by the bilinear transform), then this happens to be equivalent + to the "negative powers" discrete-time form preferred in DSP: + + .. math:: + + H(z) = \frac + {b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}} + {a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}} + + Although this is true for common filters, remember that this is not true + in the general case. If `M` and `N` are not equal, the discrete-time + transfer function coefficients must first be converted to the "positive + powers" form before finding the poles and zeros. + + Examples + -------- + Find the zeroes, poles and gain of + a filter with the transfer function + + .. math:: + + H(s) = \frac{3s^2}{s^2 + 5s + 13} + + >>> from scipy.signal import tf2zpk + >>> tf2zpk([3, 0, 0], [1, 5, 13]) + ( array([ 0. , 0. ]), + array([ -2.5+2.59807621j , -2.5-2.59807621j]), + 3.0) + """ + b, a = normalize(b, a) + b = (b + 0.0) / a[0] + a = (a + 0.0) / a[0] + k = b[0] + b /= b[0] + z = roots(b) + p = roots(a) + return z, p, k + + +def zpk2tf(z, p, k): + r""" + Return polynomial transfer function representation from zeros and poles + + Parameters + ---------- + z : array_like + Zeros of the transfer function. + p : array_like + Poles of the transfer function. + k : float + System gain. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + Examples + -------- + Find the polynomial representation of a transfer function H(s) + using its 'zpk' (Zero-Pole-Gain) representation. + + .. math:: + + H(z) = 5 \frac + { (s - 2)(s - 6) } + { (s - 1)(s - 8) } + + >>> from scipy.signal import zpk2tf + >>> z = [2, 6] + >>> p = [1, 8] + >>> k = 5 + >>> zpk2tf(z, p, k) + ( array([ 5., -40., 60.]), array([ 1., -9., 8.])) + """ + z = atleast_1d(z) + k = atleast_1d(k) + if len(z.shape) > 1: + temp = poly(z[0]) + b = np.empty((z.shape[0], z.shape[1] + 1), temp.dtype.char) + if len(k) == 1: + k = [k[0]] * z.shape[0] + for i in range(z.shape[0]): + b[i] = k[i] * poly(z[i]) + else: + b = k * poly(z) + a = atleast_1d(poly(p)) + + # Use real output if possible. Copied from np.poly, since + # we can't depend on a specific version of numpy. + if issubclass(b.dtype.type, np.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = np.asarray(z, complex) + pos_roots = np.compress(roots.imag > 0, roots) + neg_roots = np.conjugate(np.compress(roots.imag < 0, roots)) + if len(pos_roots) == len(neg_roots): + if np.all(np.sort_complex(neg_roots) == np.sort_complex(pos_roots)): + b = b.real.copy() + + if issubclass(a.dtype.type, np.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = np.asarray(p, complex) + pos_roots = np.compress(roots.imag > 0, roots) + neg_roots = np.conjugate(np.compress(roots.imag < 0, roots)) + if len(pos_roots) == len(neg_roots): + if np.all(np.sort_complex(neg_roots) == np.sort_complex(pos_roots)): + a = a.real.copy() + + return b, a + + +def tf2sos(b, a, pairing=None, *, analog=False): + r""" + Return second-order sections from transfer function representation + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional + The method to use to combine pairs of poles and zeros into sections. + See `zpk2sos` for information and restrictions on `pairing` and + `analog` arguments. + analog : bool, optional + If True, system is analog, otherwise discrete. + + .. versionadded:: 1.8.0 + + Returns + ------- + sos : ndarray + Array of second-order filter coefficients, with shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + See Also + -------- + zpk2sos, sosfilt + + Notes + ----- + It is generally discouraged to convert from TF to SOS format, since doing + so usually will not improve numerical precision errors. Instead, consider + designing filters in ZPK format and converting directly to SOS. TF is + converted to SOS by first converting to ZPK format, then converting + ZPK to SOS. + + .. versionadded:: 0.16.0 + + Examples + -------- + Find the 'sos' (second-order sections) of the transfer function H(s) + using its polynomial representation. + + .. math:: + + H(s) = \frac{s^2 - 3.5s - 2}{s^4 + 3s^3 - 15s^2 - 19s + 30} + + >>> from scipy.signal import tf2sos + >>> tf2sos([1, -3.5, -2], [1, 3, -15, -19, 30], analog=True) + array([[ 0. , 0. , 1. , 1. , 2. , -15. ], + [ 1. , -3.5, -2. , 1. , 1. , -2. ]]) + """ + return zpk2sos(*tf2zpk(b, a), pairing=pairing, analog=analog) + + +def sos2tf(sos): + r""" + Return a single transfer function from a series of second-order sections + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + Notes + ----- + .. versionadded:: 0.16.0 + + Examples + -------- + Find the polynomial representation of an elliptic filter + using its 'sos' (second-order sections) format. + + >>> from scipy.signal import sos2tf + >>> from scipy import signal + >>> sos = signal.ellip(1, 0.001, 50, 0.1, output='sos') + >>> sos2tf(sos) + ( array([0.91256522, 0.91256522, 0. ]), + array([1. , 0.82513043, 0. ])) + """ + sos = np.asarray(sos) + result_type = sos.dtype + if result_type.kind in 'bui': + result_type = np.float64 + + b = np.array([1], dtype=result_type) + a = np.array([1], dtype=result_type) + n_sections = sos.shape[0] + for section in range(n_sections): + b = np.polymul(b, sos[section, :3]) + a = np.polymul(a, sos[section, 3:]) + return b, a + + +def sos2zpk(sos): + """ + Return zeros, poles, and gain of a series of second-order sections + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + Returns + ------- + z : ndarray + Zeros of the transfer function. + p : ndarray + Poles of the transfer function. + k : float + System gain. + + Notes + ----- + The number of zeros and poles returned will be ``n_sections * 2`` + even if some of these are (effectively) zero. + + .. versionadded:: 0.16.0 + """ + sos = np.asarray(sos) + n_sections = sos.shape[0] + z = np.zeros(n_sections*2, np.complex128) + p = np.zeros(n_sections*2, np.complex128) + k = 1. + for section in range(n_sections): + zpk = tf2zpk(sos[section, :3], sos[section, 3:]) + z[2*section:2*section+len(zpk[0])] = zpk[0] + p[2*section:2*section+len(zpk[1])] = zpk[1] + k *= zpk[2] + return z, p, k + + +def _nearest_real_complex_idx(fro, to, which): + """Get the next closest real or complex element based on distance""" + assert which in ('real', 'complex', 'any') + order = np.argsort(np.abs(fro - to)) + if which == 'any': + return order[0] + else: + mask = np.isreal(fro[order]) + if which == 'complex': + mask = ~mask + return order[np.nonzero(mask)[0][0]] + + +def _single_zpksos(z, p, k): + """Create one second-order section from up to two zeros and poles""" + sos = np.zeros(6) + b, a = zpk2tf(z, p, k) + sos[3-len(b):3] = b + sos[6-len(a):6] = a + return sos + + +def zpk2sos(z, p, k, pairing=None, *, analog=False): + """Return second-order sections from zeros, poles, and gain of a system + + Parameters + ---------- + z : array_like + Zeros of the transfer function. + p : array_like + Poles of the transfer function. + k : float + System gain. + pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional + The method to use to combine pairs of poles and zeros into sections. + If analog is False and pairing is None, pairing is set to 'nearest'; + if analog is True, pairing must be 'minimal', and is set to that if + it is None. + analog : bool, optional + If True, system is analog, otherwise discrete. + + .. versionadded:: 1.8.0 + + Returns + ------- + sos : ndarray + Array of second-order filter coefficients, with shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + See Also + -------- + sosfilt + + Notes + ----- + The algorithm used to convert ZPK to SOS format is designed to + minimize errors due to numerical precision issues. The pairing + algorithm attempts to minimize the peak gain of each biquadratic + section. This is done by pairing poles with the nearest zeros, starting + with the poles closest to the unit circle for discrete-time systems, and + poles closest to the imaginary axis for continuous-time systems. + + ``pairing='minimal'`` outputs may not be suitable for `sosfilt`, + and ``analog=True`` outputs will never be suitable for `sosfilt`. + + *Algorithms* + + The steps in the ``pairing='nearest'``, ``pairing='keep_odd'``, + and ``pairing='minimal'`` algorithms are mostly shared. The + ``'nearest'`` algorithm attempts to minimize the peak gain, while + ``'keep_odd'`` minimizes peak gain under the constraint that + odd-order systems should retain one section as first order. + ``'minimal'`` is similar to ``'keep_odd'``, but no additional + poles or zeros are introduced + + The algorithm steps are as follows: + + As a pre-processing step for ``pairing='nearest'``, + ``pairing='keep_odd'``, add poles or zeros to the origin as + necessary to obtain the same number of poles and zeros for + pairing. If ``pairing == 'nearest'`` and there are an odd number + of poles, add an additional pole and a zero at the origin. + + The following steps are then iterated over until no more poles or + zeros remain: + + 1. Take the (next remaining) pole (complex or real) closest to the + unit circle (or imaginary axis, for ``analog=True``) to + begin a new filter section. + + 2. If the pole is real and there are no other remaining real poles [#]_, + add the closest real zero to the section and leave it as a first + order section. Note that after this step we are guaranteed to be + left with an even number of real poles, complex poles, real zeros, + and complex zeros for subsequent pairing iterations. + + 3. Else: + + 1. If the pole is complex and the zero is the only remaining real + zero*, then pair the pole with the *next* closest zero + (guaranteed to be complex). This is necessary to ensure that + there will be a real zero remaining to eventually create a + first-order section (thus keeping the odd order). + + 2. Else pair the pole with the closest remaining zero (complex or + real). + + 3. Proceed to complete the second-order section by adding another + pole and zero to the current pole and zero in the section: + + 1. If the current pole and zero are both complex, add their + conjugates. + + 2. Else if the pole is complex and the zero is real, add the + conjugate pole and the next closest real zero. + + 3. Else if the pole is real and the zero is complex, add the + conjugate zero and the real pole closest to those zeros. + + 4. Else (we must have a real pole and real zero) add the next + real pole closest to the unit circle, and then add the real + zero closest to that pole. + + .. [#] This conditional can only be met for specific odd-order inputs + with the ``pairing = 'keep_odd'`` or ``'minimal'`` methods. + + .. versionadded:: 0.16.0 + + Examples + -------- + + Design a 6th order low-pass elliptic digital filter for a system with a + sampling rate of 8000 Hz that has a pass-band corner frequency of + 1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and + the attenuation in the stop-band should be at least 90 dB. + + In the following call to `ellip`, we could use ``output='sos'``, + but for this example, we'll use ``output='zpk'``, and then convert + to SOS format with `zpk2sos`: + + >>> from scipy import signal + >>> import numpy as np + >>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk') + + Now convert to SOS format. + + >>> sos = signal.zpk2sos(z, p, k) + + The coefficients of the numerators of the sections: + + >>> sos[:, :3] + array([[0.0014152 , 0.00248677, 0.0014152 ], + [1. , 0.72976874, 1. ], + [1. , 0.17607852, 1. ]]) + + The symmetry in the coefficients occurs because all the zeros are on the + unit circle. + + The coefficients of the denominators of the sections: + + >>> sos[:, 3:] + array([[ 1. , -1.32544025, 0.46989976], + [ 1. , -1.26118294, 0.62625924], + [ 1. , -1.2570723 , 0.8619958 ]]) + + The next example shows the effect of the `pairing` option. We have a + system with three poles and three zeros, so the SOS array will have + shape (2, 6). The means there is, in effect, an extra pole and an extra + zero at the origin in the SOS representation. + + >>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j]) + >>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j]) + + With ``pairing='nearest'`` (the default), we obtain + + >>> signal.zpk2sos(z1, p1, 1) + array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ], + [ 1. , 1. , 0. , 1. , -1.6 , 0.65]]) + + The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles + {0, 0.75}, and the second section has the zeros {-1, 0} and poles + {0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin + have been assigned to different sections. + + With ``pairing='keep_odd'``, we obtain: + + >>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd') + array([[ 1. , 1. , 0. , 1. , -0.75, 0. ], + [ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]]) + + The extra pole and zero at the origin are in the same section. + The first section is, in effect, a first-order section. + + With ``pairing='minimal'``, the first-order section doesn't have + the extra pole and zero at the origin: + + >>> signal.zpk2sos(z1, p1, 1, pairing='minimal') + array([[ 0. , 1. , 1. , 0. , 1. , -0.75], + [ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]]) + + """ + # TODO in the near future: + # 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259). + # 2. Make `decimate` use `sosfilt` instead of `lfilter`. + # 3. Make sosfilt automatically simplify sections to first order + # when possible. Note this might make `sosfiltfilt` a bit harder (ICs). + # 4. Further optimizations of the section ordering / pole-zero pairing. + # See the wiki for other potential issues. + + if pairing is None: + pairing = 'minimal' if analog else 'nearest' + + valid_pairings = ['nearest', 'keep_odd', 'minimal'] + if pairing not in valid_pairings: + raise ValueError(f'pairing must be one of {valid_pairings}, not {pairing}') + + if analog and pairing != 'minimal': + raise ValueError('for analog zpk2sos conversion, ' + 'pairing must be "minimal"') + + if len(z) == len(p) == 0: + if not analog: + return np.array([[k, 0., 0., 1., 0., 0.]]) + else: + return np.array([[0., 0., k, 0., 0., 1.]]) + + if pairing != 'minimal': + # ensure we have the same number of poles and zeros, and make copies + p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0)))) + z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0)))) + n_sections = (max(len(p), len(z)) + 1) // 2 + + if len(p) % 2 == 1 and pairing == 'nearest': + p = np.concatenate((p, [0.])) + z = np.concatenate((z, [0.])) + assert len(p) == len(z) + else: + if len(p) < len(z): + raise ValueError('for analog zpk2sos conversion, ' + 'must have len(p)>=len(z)') + + n_sections = (len(p) + 1) // 2 + + # Ensure we have complex conjugate pairs + # (note that _cplxreal only gives us one element of each complex pair): + z = np.concatenate(_cplxreal(z)) + p = np.concatenate(_cplxreal(p)) + if not np.isreal(k): + raise ValueError('k must be real') + k = k.real + + if not analog: + # digital: "worst" is the closest to the unit circle + def idx_worst(p): + return np.argmin(np.abs(1 - np.abs(p))) + else: + # analog: "worst" is the closest to the imaginary axis + def idx_worst(p): + return np.argmin(np.abs(np.real(p))) + + sos = np.zeros((n_sections, 6)) + + # Construct the system, reversing order so the "worst" are last + for si in range(n_sections-1, -1, -1): + # Select the next "worst" pole + p1_idx = idx_worst(p) + p1 = p[p1_idx] + p = np.delete(p, p1_idx) + + # Pair that pole with a zero + + if np.isreal(p1) and np.isreal(p).sum() == 0: + # Special case (1): last remaining real pole + if pairing != 'minimal': + z1_idx = _nearest_real_complex_idx(z, p1, 'real') + z1 = z[z1_idx] + z = np.delete(z, z1_idx) + sos[si] = _single_zpksos([z1, 0], [p1, 0], 1) + elif len(z) > 0: + z1_idx = _nearest_real_complex_idx(z, p1, 'real') + z1 = z[z1_idx] + z = np.delete(z, z1_idx) + sos[si] = _single_zpksos([z1], [p1], 1) + else: + sos[si] = _single_zpksos([], [p1], 1) + + elif (len(p) + 1 == len(z) + and not np.isreal(p1) + and np.isreal(p).sum() == 1 + and np.isreal(z).sum() == 1): + + # Special case (2): there's one real pole and one real zero + # left, and an equal number of poles and zeros to pair up. + # We *must* pair with a complex zero + + z1_idx = _nearest_real_complex_idx(z, p1, 'complex') + z1 = z[z1_idx] + z = np.delete(z, z1_idx) + sos[si] = _single_zpksos([z1, z1.conj()], [p1, p1.conj()], 1) + + else: + if np.isreal(p1): + prealidx = np.flatnonzero(np.isreal(p)) + p2_idx = prealidx[idx_worst(p[prealidx])] + p2 = p[p2_idx] + p = np.delete(p, p2_idx) + else: + p2 = p1.conj() + + # find closest zero + if len(z) > 0: + z1_idx = _nearest_real_complex_idx(z, p1, 'any') + z1 = z[z1_idx] + z = np.delete(z, z1_idx) + + if not np.isreal(z1): + sos[si] = _single_zpksos([z1, z1.conj()], [p1, p2], 1) + else: + if len(z) > 0: + z2_idx = _nearest_real_complex_idx(z, p1, 'real') + z2 = z[z2_idx] + assert np.isreal(z2) + z = np.delete(z, z2_idx) + sos[si] = _single_zpksos([z1, z2], [p1, p2], 1) + else: + sos[si] = _single_zpksos([z1], [p1, p2], 1) + else: + # no more zeros + sos[si] = _single_zpksos([], [p1, p2], 1) + + assert len(p) == len(z) == 0 # we've consumed all poles and zeros + del p, z + + # put gain in first sos + sos[0][:3] *= k + return sos + + +def _align_nums(nums): + """Aligns the shapes of multiple numerators. + + Given an array of numerator coefficient arrays [[a_1, a_2,..., + a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator + arrays with zero's so that all numerators have the same length. Such + alignment is necessary for functions like 'tf2ss', which needs the + alignment when dealing with SIMO transfer functions. + + Parameters + ---------- + nums: array_like + Numerator or list of numerators. Not necessarily with same length. + + Returns + ------- + nums: array + The numerator. If `nums` input was a list of numerators then a 2-D + array with padded zeros for shorter numerators is returned. Otherwise + returns ``np.asarray(nums)``. + """ + try: + # The statement can throw a ValueError if one + # of the numerators is a single digit and another + # is array-like e.g. if nums = [5, [1, 2, 3]] + nums = asarray(nums) + + if not np.issubdtype(nums.dtype, np.number): + raise ValueError("dtype of numerator is non-numeric") + + return nums + + except ValueError: + nums = [np.atleast_1d(num) for num in nums] + max_width = max(num.size for num in nums) + + # pre-allocate + aligned_nums = np.zeros((len(nums), max_width)) + + # Create numerators with padded zeros + for index, num in enumerate(nums): + aligned_nums[index, -num.size:] = num + + return aligned_nums + + +def normalize(b, a): + """Normalize numerator/denominator of a continuous-time transfer function. + + If values of `b` are too close to 0, they are removed. In that case, a + BadCoefficients warning is emitted. + + Parameters + ---------- + b: array_like + Numerator of the transfer function. Can be a 2-D array to normalize + multiple transfer functions. + a: array_like + Denominator of the transfer function. At most 1-D. + + Returns + ------- + num: array + The numerator of the normalized transfer function. At least a 1-D + array. A 2-D array if the input `num` is a 2-D array. + den: 1-D array + The denominator of the normalized transfer function. + + Notes + ----- + Coefficients for both the numerator and denominator should be specified in + descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as + ``[1, 3, 5]``). + + Examples + -------- + >>> from scipy.signal import normalize + + Normalize the coefficients of the transfer function + ``(3*s^2 - 2*s + 5) / (2*s^2 + 3*s + 1)``: + + >>> b = [3, -2, 5] + >>> a = [2, 3, 1] + >>> normalize(b, a) + (array([ 1.5, -1. , 2.5]), array([1. , 1.5, 0.5])) + + A warning is generated if, for example, the first coefficient of + `b` is 0. In the following example, the result is as expected: + + >>> import warnings + >>> with warnings.catch_warnings(record=True) as w: + ... num, den = normalize([0, 3, 6], [2, -5, 4]) + + >>> num + array([1.5, 3. ]) + >>> den + array([ 1. , -2.5, 2. ]) + + >>> print(w[0].message) + Badly conditioned filter coefficients (numerator): the results may be meaningless + + """ + num, den = b, a + + den = np.atleast_1d(den) + num = np.atleast_2d(_align_nums(num)) + + if den.ndim != 1: + raise ValueError("Denominator polynomial must be rank-1 array.") + if num.ndim > 2: + raise ValueError("Numerator polynomial must be rank-1 or" + " rank-2 array.") + if np.all(den == 0): + raise ValueError("Denominator must have at least on nonzero element.") + + # Trim leading zeros in denominator, leave at least one. + den = np.trim_zeros(den, 'f') + + # Normalize transfer function + num, den = num / den[0], den / den[0] + + # Count numerator columns that are all zero + leading_zeros = 0 + for col in num.T: + if np.allclose(col, 0, atol=1e-14): + leading_zeros += 1 + else: + break + + # Trim leading zeros of numerator + if leading_zeros > 0: + warnings.warn("Badly conditioned filter coefficients (numerator): the " + "results may be meaningless", + BadCoefficients, stacklevel=2) + # Make sure at least one column remains + if leading_zeros == num.shape[1]: + leading_zeros -= 1 + num = num[:, leading_zeros:] + + # Squeeze first dimension if singular + if num.shape[0] == 1: + num = num[0, :] + + return num, den + + +def lp2lp(b, a, wo=1.0): + r""" + Transform a lowpass filter prototype to a different frequency. + + Return an analog low-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, in + transfer function ('ba') representation. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + wo : float + Desired cutoff, as angular frequency (e.g. rad/s). + Defaults to no change. + + Returns + ------- + b : array_like + Numerator polynomial coefficients of the transformed low-pass filter. + a : array_like + Denominator polynomial coefficients of the transformed low-pass filter. + + See Also + -------- + lp2hp, lp2bp, lp2bs, bilinear + lp2lp_zpk + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s}{\omega_0} + + Examples + -------- + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> lp = signal.lti([1.0], [1.0, 1.0]) + >>> lp2 = signal.lti(*signal.lp2lp(lp.num, lp.den, 2)) + >>> w, mag_lp, p_lp = lp.bode() + >>> w, mag_lp2, p_lp2 = lp2.bode(w) + + >>> plt.plot(w, mag_lp, label='Lowpass') + >>> plt.plot(w, mag_lp2, label='Transformed Lowpass') + >>> plt.semilogx() + >>> plt.grid(True) + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.legend() + + """ + a, b = map(atleast_1d, (a, b)) + try: + wo = float(wo) + except TypeError: + wo = float(wo[0]) + d = len(a) + n = len(b) + M = max((d, n)) + pwo = pow(wo, np.arange(M - 1, -1, -1)) + start1 = max((n - d, 0)) + start2 = max((d - n, 0)) + b = b * pwo[start1] / pwo[start2:] + a = a * pwo[start1] / pwo[start1:] + return normalize(b, a) + + +def lp2hp(b, a, wo=1.0): + r""" + Transform a lowpass filter prototype to a highpass filter. + + Return an analog high-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, in + transfer function ('ba') representation. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + wo : float + Desired cutoff, as angular frequency (e.g., rad/s). + Defaults to no change. + + Returns + ------- + b : array_like + Numerator polynomial coefficients of the transformed high-pass filter. + a : array_like + Denominator polynomial coefficients of the transformed high-pass filter. + + See Also + -------- + lp2lp, lp2bp, lp2bs, bilinear + lp2hp_zpk + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{\omega_0}{s} + + This maintains symmetry of the lowpass and highpass responses on a + logarithmic scale. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> lp = signal.lti([1.0], [1.0, 1.0]) + >>> hp = signal.lti(*signal.lp2hp(lp.num, lp.den)) + >>> w, mag_lp, p_lp = lp.bode() + >>> w, mag_hp, p_hp = hp.bode(w) + + >>> plt.plot(w, mag_lp, label='Lowpass') + >>> plt.plot(w, mag_hp, label='Highpass') + >>> plt.semilogx() + >>> plt.grid(True) + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.legend() + + """ + a, b = map(atleast_1d, (a, b)) + try: + wo = float(wo) + except TypeError: + wo = float(wo[0]) + d = len(a) + n = len(b) + if wo != 1: + pwo = pow(wo, np.arange(max((d, n)))) + else: + pwo = np.ones(max((d, n)), b.dtype.char) + if d >= n: + outa = a[::-1] * pwo + outb = resize(b, (d,)) + outb[n:] = 0.0 + outb[:n] = b[::-1] * pwo[:n] + else: + outb = b[::-1] * pwo + outa = resize(a, (n,)) + outa[d:] = 0.0 + outa[:d] = a[::-1] * pwo[:d] + + return normalize(outb, outa) + + +def lp2bp(b, a, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandpass filter. + + Return an analog band-pass filter with center frequency `wo` and + bandwidth `bw` from an analog low-pass filter prototype with unity + cutoff frequency, in transfer function ('ba') representation. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + wo : float + Desired passband center, as angular frequency (e.g., rad/s). + Defaults to no change. + bw : float + Desired passband width, as angular frequency (e.g., rad/s). + Defaults to 1. + + Returns + ------- + b : array_like + Numerator polynomial coefficients of the transformed band-pass filter. + a : array_like + Denominator polynomial coefficients of the transformed band-pass filter. + + See Also + -------- + lp2lp, lp2hp, lp2bs, bilinear + lp2bp_zpk + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}} + + This is the "wideband" transformation, producing a passband with + geometric (log frequency) symmetry about `wo`. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> lp = signal.lti([1.0], [1.0, 1.0]) + >>> bp = signal.lti(*signal.lp2bp(lp.num, lp.den)) + >>> w, mag_lp, p_lp = lp.bode() + >>> w, mag_bp, p_bp = bp.bode(w) + + >>> plt.plot(w, mag_lp, label='Lowpass') + >>> plt.plot(w, mag_bp, label='Bandpass') + >>> plt.semilogx() + >>> plt.grid(True) + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.legend() + """ + + a, b = map(atleast_1d, (a, b)) + D = len(a) - 1 + N = len(b) - 1 + artype = mintypecode((a, b)) + ma = max([N, D]) + Np = N + ma + Dp = D + ma + bprime = np.empty(Np + 1, artype) + aprime = np.empty(Dp + 1, artype) + wosq = wo * wo + for j in range(Np + 1): + val = 0.0 + for i in range(0, N + 1): + for k in range(0, i + 1): + if ma - i + 2 * k == j: + val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i + bprime[Np - j] = val + for j in range(Dp + 1): + val = 0.0 + for i in range(0, D + 1): + for k in range(0, i + 1): + if ma - i + 2 * k == j: + val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i + aprime[Dp - j] = val + + return normalize(bprime, aprime) + + +def lp2bs(b, a, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandstop filter. + + Return an analog band-stop filter with center frequency `wo` and + bandwidth `bw` from an analog low-pass filter prototype with unity + cutoff frequency, in transfer function ('ba') representation. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + wo : float + Desired stopband center, as angular frequency (e.g., rad/s). + Defaults to no change. + bw : float + Desired stopband width, as angular frequency (e.g., rad/s). + Defaults to 1. + + Returns + ------- + b : array_like + Numerator polynomial coefficients of the transformed band-stop filter. + a : array_like + Denominator polynomial coefficients of the transformed band-stop filter. + + See Also + -------- + lp2lp, lp2hp, lp2bp, bilinear + lp2bs_zpk + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2} + + This is the "wideband" transformation, producing a stopband with + geometric (log frequency) symmetry about `wo`. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> lp = signal.lti([1.0], [1.0, 1.5]) + >>> bs = signal.lti(*signal.lp2bs(lp.num, lp.den)) + >>> w, mag_lp, p_lp = lp.bode() + >>> w, mag_bs, p_bs = bs.bode(w) + >>> plt.plot(w, mag_lp, label='Lowpass') + >>> plt.plot(w, mag_bs, label='Bandstop') + >>> plt.semilogx() + >>> plt.grid(True) + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.legend() + """ + a, b = map(atleast_1d, (a, b)) + D = len(a) - 1 + N = len(b) - 1 + artype = mintypecode((a, b)) + M = max([N, D]) + Np = M + M + Dp = M + M + bprime = np.empty(Np + 1, artype) + aprime = np.empty(Dp + 1, artype) + wosq = wo * wo + for j in range(Np + 1): + val = 0.0 + for i in range(0, N + 1): + for k in range(0, M - i + 1): + if i + 2 * k == j: + val += (comb(M - i, k) * b[N - i] * + (wosq) ** (M - i - k) * bw ** i) + bprime[Np - j] = val + for j in range(Dp + 1): + val = 0.0 + for i in range(0, D + 1): + for k in range(0, M - i + 1): + if i + 2 * k == j: + val += (comb(M - i, k) * a[D - i] * + (wosq) ** (M - i - k) * bw ** i) + aprime[Dp - j] = val + + return normalize(bprime, aprime) + + +def bilinear(b, a, fs=1.0): + r""" + Return a digital IIR filter from an analog one using a bilinear transform. + + Transform a set of poles and zeros from the analog s-plane to the digital + z-plane using Tustin's method, which substitutes ``2*fs*(z-1) / (z+1)`` for + ``s``, maintaining the shape of the frequency response. + + Parameters + ---------- + b : array_like + Numerator of the analog filter transfer function. + a : array_like + Denominator of the analog filter transfer function. + fs : float + Sample rate, as ordinary frequency (e.g., hertz). No prewarping is + done in this function. + + Returns + ------- + b : ndarray + Numerator of the transformed digital filter transfer function. + a : ndarray + Denominator of the transformed digital filter transfer function. + + See Also + -------- + lp2lp, lp2hp, lp2bp, lp2bs + bilinear_zpk + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> fs = 100 + >>> bf = 2 * np.pi * np.array([7, 13]) + >>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass', + ... analog=True)) + >>> filtz = signal.lti(*signal.bilinear(filts.num, filts.den, fs)) + >>> wz, hz = signal.freqz(filtz.num, filtz.den) + >>> ws, hs = signal.freqs(filts.num, filts.den, worN=fs*wz) + + >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)), + ... label=r'$|H_z(e^{j \omega})|$') + >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)), + ... label=r'$|H(j \omega)|$') + >>> plt.legend() + >>> plt.xlabel('Frequency [Hz]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(True) + """ + fs = _validate_fs(fs, allow_none=False) + a, b = map(atleast_1d, (a, b)) + D = len(a) - 1 + N = len(b) - 1 + artype = float + M = max([N, D]) + Np = M + Dp = M + bprime = np.empty(Np + 1, artype) + aprime = np.empty(Dp + 1, artype) + for j in range(Np + 1): + val = 0.0 + for i in range(N + 1): + for k in range(i + 1): + for l in range(M - i + 1): + if k + l == j: + val += (comb(i, k) * comb(M - i, l) * b[N - i] * + pow(2 * fs, i) * (-1) ** k) + bprime[j] = real(val) + for j in range(Dp + 1): + val = 0.0 + for i in range(D + 1): + for k in range(i + 1): + for l in range(M - i + 1): + if k + l == j: + val += (comb(i, k) * comb(M - i, l) * a[D - i] * + pow(2 * fs, i) * (-1) ** k) + aprime[j] = real(val) + + return normalize(bprime, aprime) + + +def _validate_gpass_gstop(gpass, gstop): + + if gpass <= 0.0: + raise ValueError("gpass should be larger than 0.0") + elif gstop <= 0.0: + raise ValueError("gstop should be larger than 0.0") + elif gpass > gstop: + raise ValueError("gpass should be smaller than gstop") + + +def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba', + fs=None): + """Complete IIR digital and analog filter design. + + Given passband and stopband frequencies and gains, construct an analog or + digital IIR filter of minimum order for a given basic type. Return the + output in numerator, denominator ('ba'), pole-zero ('zpk') or second order + sections ('sos') form. + + Parameters + ---------- + wp, ws : float or array like, shape (2,) + Passband and stopband edge frequencies. Possible values are scalars + (for lowpass and highpass filters) or ranges (for bandpass and bandstop + filters). + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). + Note, that for bandpass and bandstop filters passband must lie strictly + inside stopband or vice versa. Also note that the cutoff at the band edges + for IIR filters is defined as half-power, so -3dB, not half-amplitude (-6dB) + like for `scipy.signal.fiwin`. + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + ftype : str, optional + The type of IIR filter to design: + + - Butterworth : 'butter' + - Chebyshev I : 'cheby1' + - Chebyshev II : 'cheby2' + - Cauer/elliptic: 'ellip' + + output : {'ba', 'zpk', 'sos'}, optional + Filter form of the output: + + - second-order sections (recommended): 'sos' + - numerator/denominator (default) : 'ba' + - pole-zero : 'zpk' + + In general the second-order sections ('sos') form is + recommended because inferring the coefficients for the + numerator/denominator form ('ba') suffers from numerical + instabilities. For reasons of backward compatibility the default + form is the numerator/denominator form ('ba'), where the 'b' + and the 'a' in 'ba' refer to the commonly used names of the + coefficients used. + + Note: Using the second-order sections form ('sos') is sometimes + associated with additional computational costs: for + data-intense use cases it is therefore recommended to also + investigate the numerator/denominator form ('ba'). + + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + butter : Filter design using order and critical points + cheby1, cheby2, ellip, bessel + buttord : Find order and critical points from passband and stopband spec + cheb1ord, cheb2ord, ellipord + iirfilter : General filter design using order and critical frequencies + + Notes + ----- + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import matplotlib.ticker + + >>> wp = 0.2 + >>> ws = 0.3 + >>> gpass = 1 + >>> gstop = 40 + + >>> system = signal.iirdesign(wp, ws, gpass, gstop) + >>> w, h = signal.freqz(*system) + + >>> fig, ax1 = plt.subplots() + >>> ax1.set_title('Digital filter frequency response') + >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b') + >>> ax1.set_ylabel('Amplitude [dB]', color='b') + >>> ax1.set_xlabel('Frequency [rad/sample]') + >>> ax1.grid(True) + >>> ax1.set_ylim([-120, 20]) + >>> ax2 = ax1.twinx() + >>> phase = np.unwrap(np.angle(h)) + >>> ax2.plot(w, phase, 'g') + >>> ax2.set_ylabel('Phase [rad]', color='g') + >>> ax2.grid(True) + >>> ax2.axis('tight') + >>> ax2.set_ylim([-6, 1]) + >>> nticks = 8 + >>> ax1.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks)) + >>> ax2.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks)) + + """ + try: + ordfunc = filter_dict[ftype][1] + except KeyError as e: + raise ValueError(f"Invalid IIR filter type: {ftype}") from e + except IndexError as e: + raise ValueError(f"{ftype} does not have order selection. " + "Use iirfilter function.") from e + + _validate_gpass_gstop(gpass, gstop) + + wp = atleast_1d(wp) + ws = atleast_1d(ws) + + fs = _validate_fs(fs, allow_none=True) + + if wp.shape[0] != ws.shape[0] or wp.shape not in [(1,), (2,)]: + raise ValueError("wp and ws must have one or two elements each, and " + f"the same shape, got {wp.shape} and {ws.shape}") + + if any(wp <= 0) or any(ws <= 0): + raise ValueError("Values for wp, ws must be greater than 0") + + if not analog: + if fs is None: + if any(wp >= 1) or any(ws >= 1): + raise ValueError("Values for wp, ws must be less than 1") + elif any(wp >= fs/2) or any(ws >= fs/2): + raise ValueError("Values for wp, ws must be less than fs/2 " + f"(fs={fs} -> fs/2={fs/2})") + + if wp.shape[0] == 2: + if not ((ws[0] < wp[0] and wp[1] < ws[1]) or + (wp[0] < ws[0] and ws[1] < wp[1])): + raise ValueError("Passband must lie strictly inside stopband " + "or vice versa") + + band_type = 2 * (len(wp) - 1) + band_type += 1 + if wp[0] >= ws[0]: + band_type += 1 + + btype = {1: 'lowpass', 2: 'highpass', + 3: 'bandstop', 4: 'bandpass'}[band_type] + + N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog, fs=fs) + return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, + ftype=ftype, output=output, fs=fs) + + +def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False, + ftype='butter', output='ba', fs=None): + """ + IIR digital and analog filter design given order and critical points. + + Design an Nth-order digital or analog filter and return the filter + coefficients. + + Parameters + ---------- + N : int + The order of the filter. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + + When Wn is a length-2 sequence, ``Wn[0]`` must be less than ``Wn[1]``. + rp : float, optional + For Chebyshev and elliptic filters, provides the maximum ripple + in the passband. (dB) + rs : float, optional + For Chebyshev and elliptic filters, provides the minimum attenuation + in the stop band. (dB) + btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional + The type of filter. Default is 'bandpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + ftype : str, optional + The type of IIR filter to design: + + - Butterworth : 'butter' + - Chebyshev I : 'cheby1' + - Chebyshev II : 'cheby2' + - Cauer/elliptic: 'ellip' + - Bessel/Thomson: 'bessel' + + output : {'ba', 'zpk', 'sos'}, optional + Filter form of the output: + + - second-order sections (recommended): 'sos' + - numerator/denominator (default) : 'ba' + - pole-zero : 'zpk' + + In general the second-order sections ('sos') form is + recommended because inferring the coefficients for the + numerator/denominator form ('ba') suffers from numerical + instabilities. For reasons of backward compatibility the default + form is the numerator/denominator form ('ba'), where the 'b' + and the 'a' in 'ba' refer to the commonly used names of the + coefficients used. + + Note: Using the second-order sections form ('sos') is sometimes + associated with additional computational costs: for + data-intense use cases it is therefore recommended to also + investigate the numerator/denominator form ('ba'). + + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + butter : Filter design using order and critical points + cheby1, cheby2, ellip, bessel + buttord : Find order and critical points from passband and stopband spec + cheb1ord, cheb2ord, ellipord + iirdesign : General filter design using passband and stopband spec + + Notes + ----- + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Generate a 17th-order Chebyshev II analog bandpass filter from 50 Hz to + 200 Hz and plot the frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> b, a = signal.iirfilter(17, [2*np.pi*50, 2*np.pi*200], rs=60, + ... btype='band', analog=True, ftype='cheby2') + >>> w, h = signal.freqs(b, a, 1000) + >>> fig = plt.figure() + >>> ax = fig.add_subplot(1, 1, 1) + >>> ax.semilogx(w / (2*np.pi), 20 * np.log10(np.maximum(abs(h), 1e-5))) + >>> ax.set_title('Chebyshev Type II bandpass frequency response') + >>> ax.set_xlabel('Frequency [Hz]') + >>> ax.set_ylabel('Amplitude [dB]') + >>> ax.axis((10, 1000, -100, 10)) + >>> ax.grid(which='both', axis='both') + >>> plt.show() + + Create a digital filter with the same properties, in a system with + sampling rate of 2000 Hz, and plot the frequency response. (Second-order + sections implementation is required to ensure stability of a filter of + this order): + + >>> sos = signal.iirfilter(17, [50, 200], rs=60, btype='band', + ... analog=False, ftype='cheby2', fs=2000, + ... output='sos') + >>> w, h = signal.freqz_sos(sos, 2000, fs=2000) + >>> fig = plt.figure() + >>> ax = fig.add_subplot(1, 1, 1) + >>> ax.semilogx(w, 20 * np.log10(np.maximum(abs(h), 1e-5))) + >>> ax.set_title('Chebyshev Type II bandpass frequency response') + >>> ax.set_xlabel('Frequency [Hz]') + >>> ax.set_ylabel('Amplitude [dB]') + >>> ax.axis((10, 1000, -100, 10)) + >>> ax.grid(which='both', axis='both') + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + ftype, btype, output = (x.lower() for x in (ftype, btype, output)) + Wn = asarray(Wn) + if fs is not None: + if analog: + raise ValueError("fs cannot be specified for an analog filter") + Wn = Wn / (fs/2) + + if np.any(Wn <= 0): + raise ValueError("filter critical frequencies must be greater than 0") + + if Wn.size > 1 and not Wn[0] < Wn[1]: + raise ValueError("Wn[0] must be less than Wn[1]") + + try: + btype = band_dict[btype] + except KeyError as e: + raise ValueError(f"'{btype}' is an invalid bandtype for filter.") from e + + try: + typefunc = filter_dict[ftype][0] + except KeyError as e: + raise ValueError(f"'{ftype}' is not a valid basic IIR filter.") from e + + if output not in ['ba', 'zpk', 'sos']: + raise ValueError(f"'{output}' is not a valid output form.") + + if rp is not None and rp < 0: + raise ValueError("passband ripple (rp) must be positive") + + if rs is not None and rs < 0: + raise ValueError("stopband attenuation (rs) must be positive") + + # Get analog lowpass prototype + if typefunc == buttap: + z, p, k = typefunc(N) + elif typefunc == besselap: + z, p, k = typefunc(N, norm=bessel_norms[ftype]) + elif typefunc == cheb1ap: + if rp is None: + raise ValueError("passband ripple (rp) must be provided to " + "design a Chebyshev I filter.") + z, p, k = typefunc(N, rp) + elif typefunc == cheb2ap: + if rs is None: + raise ValueError("stopband attenuation (rs) must be provided to " + "design an Chebyshev II filter.") + z, p, k = typefunc(N, rs) + elif typefunc == ellipap: + if rs is None or rp is None: + raise ValueError("Both rp and rs must be provided to design an " + "elliptic filter.") + z, p, k = typefunc(N, rp, rs) + else: + raise NotImplementedError(f"'{ftype}' not implemented in iirfilter.") + + # Pre-warp frequencies for digital filter design + if not analog: + if np.any(Wn <= 0) or np.any(Wn >= 1): + if fs is not None: + raise ValueError("Digital filter critical frequencies must " + f"be 0 < Wn < fs/2 (fs={fs} -> fs/2={fs/2})") + raise ValueError("Digital filter critical frequencies " + "must be 0 < Wn < 1") + fs = 2.0 + warped = 2 * fs * tan(pi * Wn / fs) + else: + warped = Wn + + # transform to lowpass, bandpass, highpass, or bandstop + if btype in ('lowpass', 'highpass'): + if np.size(Wn) != 1: + raise ValueError('Must specify a single critical frequency Wn ' + 'for lowpass or highpass filter') + + if btype == 'lowpass': + z, p, k = lp2lp_zpk(z, p, k, wo=warped) + elif btype == 'highpass': + z, p, k = lp2hp_zpk(z, p, k, wo=warped) + elif btype in ('bandpass', 'bandstop'): + try: + bw = warped[1] - warped[0] + wo = sqrt(warped[0] * warped[1]) + except IndexError as e: + raise ValueError('Wn must specify start and stop frequencies for ' + 'bandpass or bandstop filter') from e + + if btype == 'bandpass': + z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw) + elif btype == 'bandstop': + z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw) + else: + raise NotImplementedError(f"'{btype}' not implemented in iirfilter.") + + # Find discrete equivalent if necessary + if not analog: + z, p, k = bilinear_zpk(z, p, k, fs=fs) + + # Transform to proper out type (pole-zero, state-space, numer-denom) + if output == 'zpk': + return z, p, k + elif output == 'ba': + return zpk2tf(z, p, k) + elif output == 'sos': + return zpk2sos(z, p, k, analog=analog) + + +def _relative_degree(z, p): + """ + Return relative degree of transfer function from zeros and poles + """ + degree = len(p) - len(z) + if degree < 0: + raise ValueError("Improper transfer function. " + "Must have at least as many poles as zeros.") + else: + return degree + + +def bilinear_zpk(z, p, k, fs): + r""" + Return a digital IIR filter from an analog one using a bilinear transform. + + Transform a set of poles and zeros from the analog s-plane to the digital + z-plane using Tustin's method, which substitutes ``2*fs*(z-1) / (z+1)`` for + ``s``, maintaining the shape of the frequency response. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + fs : float + Sample rate, as ordinary frequency (e.g., hertz). No prewarping is + done in this function. + + Returns + ------- + z : ndarray + Zeros of the transformed digital filter transfer function. + p : ndarray + Poles of the transformed digital filter transfer function. + k : float + System gain of the transformed digital filter. + + See Also + -------- + lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, lp2bs_zpk + bilinear + + Notes + ----- + .. versionadded:: 1.1.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> fs = 100 + >>> bf = 2 * np.pi * np.array([7, 13]) + >>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass', analog=True, + ... output='zpk')) + >>> filtz = signal.lti(*signal.bilinear_zpk(filts.zeros, filts.poles, + ... filts.gain, fs)) + >>> wz, hz = signal.freqz_zpk(filtz.zeros, filtz.poles, filtz.gain) + >>> ws, hs = signal.freqs_zpk(filts.zeros, filts.poles, filts.gain, + ... worN=fs*wz) + >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)), + ... label=r'$|H_z(e^{j \omega})|$') + >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)), + ... label=r'$|H(j \omega)|$') + >>> plt.legend() + >>> plt.xlabel('Frequency [Hz]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(True) + """ + z = atleast_1d(z) + p = atleast_1d(p) + + fs = _validate_fs(fs, allow_none=False) + + degree = _relative_degree(z, p) + + fs2 = 2.0*fs + + # Bilinear transform the poles and zeros + z_z = (fs2 + z) / (fs2 - z) + p_z = (fs2 + p) / (fs2 - p) + + # Any zeros that were at infinity get moved to the Nyquist frequency + z_z = append(z_z, -ones(degree)) + + # Compensate for gain change + k_z = k * real(prod(fs2 - z) / prod(fs2 - p)) + + return z_z, p_z, k_z + + +def lp2lp_zpk(z, p, k, wo=1.0): + r""" + Transform a lowpass filter prototype to a different frequency. + + Return an analog low-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, + using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired cutoff, as angular frequency (e.g., rad/s). + Defaults to no change. + + Returns + ------- + z : ndarray + Zeros of the transformed low-pass filter transfer function. + p : ndarray + Poles of the transformed low-pass filter transfer function. + k : float + System gain of the transformed low-pass filter. + + See Also + -------- + lp2hp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear + lp2lp + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s}{\omega_0} + + .. versionadded:: 1.1.0 + + Examples + -------- + Use the 'zpk' (Zero-Pole-Gain) representation of a lowpass filter to + transform it to a new 'zpk' representation associated with a cutoff frequency wo. + + >>> from scipy.signal import lp2lp_zpk + >>> z = [7, 2] + >>> p = [5, 13] + >>> k = 0.8 + >>> wo = 0.4 + >>> lp2lp_zpk(z, p, k, wo) + ( array([2.8, 0.8]), array([2. , 5.2]), 0.8) + """ + z = atleast_1d(z) + p = atleast_1d(p) + wo = float(wo) # Avoid int wraparound + + degree = _relative_degree(z, p) + + # Scale all points radially from origin to shift cutoff frequency + z_lp = wo * z + p_lp = wo * p + + # Each shifted pole decreases gain by wo, each shifted zero increases it. + # Cancel out the net change to keep overall gain the same + k_lp = k * wo**degree + + return z_lp, p_lp, k_lp + + +def lp2hp_zpk(z, p, k, wo=1.0): + r""" + Transform a lowpass filter prototype to a highpass filter. + + Return an analog high-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, + using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired cutoff, as angular frequency (e.g., rad/s). + Defaults to no change. + + Returns + ------- + z : ndarray + Zeros of the transformed high-pass filter transfer function. + p : ndarray + Poles of the transformed high-pass filter transfer function. + k : float + System gain of the transformed high-pass filter. + + See Also + -------- + lp2lp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear + lp2hp + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{\omega_0}{s} + + This maintains symmetry of the lowpass and highpass responses on a + logarithmic scale. + + .. versionadded:: 1.1.0 + + Examples + -------- + Use the 'zpk' (Zero-Pole-Gain) representation of a lowpass filter to + transform it to a highpass filter with a cutoff frequency wo. + + >>> from scipy.signal import lp2hp_zpk + >>> z = [ -2 + 3j , -0.5 - 0.8j ] + >>> p = [ -1 , -4 ] + >>> k = 10 + >>> wo = 0.6 + >>> lp2hp_zpk(z, p, k, wo) + ( array([-0.09230769-0.13846154j, -0.33707865+0.53932584j]), + array([-0.6 , -0.15]), + 8.5) + """ + z = atleast_1d(z) + p = atleast_1d(p) + wo = float(wo) + + degree = _relative_degree(z, p) + + # Invert positions radially about unit circle to convert LPF to HPF + # Scale all points radially from origin to shift cutoff frequency + z_hp = wo / z + p_hp = wo / p + + # If lowpass had zeros at infinity, inverting moves them to origin. + z_hp = append(z_hp, zeros(degree)) + + # Cancel out gain change caused by inversion + k_hp = k * real(prod(-z) / prod(-p)) + + return z_hp, p_hp, k_hp + + +def lp2bp_zpk(z, p, k, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandpass filter. + + Return an analog band-pass filter with center frequency `wo` and + bandwidth `bw` from an analog low-pass filter prototype with unity + cutoff frequency, using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired passband center, as angular frequency (e.g., rad/s). + Defaults to no change. + bw : float + Desired passband width, as angular frequency (e.g., rad/s). + Defaults to 1. + + Returns + ------- + z : ndarray + Zeros of the transformed band-pass filter transfer function. + p : ndarray + Poles of the transformed band-pass filter transfer function. + k : float + System gain of the transformed band-pass filter. + + See Also + -------- + lp2lp_zpk, lp2hp_zpk, lp2bs_zpk, bilinear + lp2bp + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}} + + This is the "wideband" transformation, producing a passband with + geometric (log frequency) symmetry about `wo`. + + .. versionadded:: 1.1.0 + + Examples + -------- + Use the 'zpk' (Zero-Pole-Gain) representation of a lowpass filter to + transform it to a bandpass filter with a center frequency wo and + bandwidth bw. + + >>> from scipy.signal import lp2bp_zpk + >>> z = [ 5 + 2j , 5 - 2j ] + >>> p = [ 7 , -16 ] + >>> k = 0.8 + >>> wo = 0.62 + >>> bw = 15 + >>> lp2bp_zpk(z, p, k, wo, bw) + ( array([7.49955815e+01+3.00017676e+01j, 7.49955815e+01-3.00017676e+01j, + 4.41850748e-03-1.76761126e-03j, 4.41850748e-03+1.76761126e-03j]), + array([1.04996339e+02+0.j, -1.60167736e-03+0.j, 3.66108003e-03+0.j, + -2.39998398e+02+0.j]), 0.8) + """ + z = atleast_1d(z) + p = atleast_1d(p) + wo = float(wo) + bw = float(bw) + + degree = _relative_degree(z, p) + + # Scale poles and zeros to desired bandwidth + z_lp = z * bw/2 + p_lp = p * bw/2 + + # Square root needs to produce complex result, not NaN + z_lp = z_lp.astype(complex) + p_lp = p_lp.astype(complex) + + # Duplicate poles and zeros and shift from baseband to +wo and -wo + z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2), + z_lp - sqrt(z_lp**2 - wo**2))) + p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2), + p_lp - sqrt(p_lp**2 - wo**2))) + + # Move degree zeros to origin, leaving degree zeros at infinity for BPF + z_bp = append(z_bp, zeros(degree)) + + # Cancel out gain change from frequency scaling + k_bp = k * bw**degree + + return z_bp, p_bp, k_bp + + +def lp2bs_zpk(z, p, k, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandstop filter. + + Return an analog band-stop filter with center frequency `wo` and + stopband width `bw` from an analog low-pass filter prototype with unity + cutoff frequency, using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired stopband center, as angular frequency (e.g., rad/s). + Defaults to no change. + bw : float + Desired stopband width, as angular frequency (e.g., rad/s). + Defaults to 1. + + Returns + ------- + z : ndarray + Zeros of the transformed band-stop filter transfer function. + p : ndarray + Poles of the transformed band-stop filter transfer function. + k : float + System gain of the transformed band-stop filter. + + See Also + -------- + lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, bilinear + lp2bs + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2} + + This is the "wideband" transformation, producing a stopband with + geometric (log frequency) symmetry about `wo`. + + .. versionadded:: 1.1.0 + + Examples + -------- + Transform a low-pass filter represented in 'zpk' (Zero-Pole-Gain) form + into a bandstop filter represented in 'zpk' form, with a center frequency wo and + bandwidth bw. + + >>> from scipy.signal import lp2bs_zpk + >>> z = [ ] + >>> p = [ 0.7 , -1 ] + >>> k = 9 + >>> wo = 0.5 + >>> bw = 10 + >>> lp2bs_zpk(z, p, k, wo, bw) + ( array([0.+0.5j, 0.+0.5j, 0.-0.5j, 0.-0.5j]), + array([14.2681928 +0.j, -0.02506281+0.j, 0.01752149+0.j, -9.97493719+0.j]), + -12.857142857142858) + """ + z = atleast_1d(z) + p = atleast_1d(p) + wo = float(wo) + bw = float(bw) + + degree = _relative_degree(z, p) + + # Invert to a highpass filter with desired bandwidth + z_hp = (bw/2) / z + p_hp = (bw/2) / p + + # Square root needs to produce complex result, not NaN + z_hp = z_hp.astype(complex) + p_hp = p_hp.astype(complex) + + # Duplicate poles and zeros and shift from baseband to +wo and -wo + z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2), + z_hp - sqrt(z_hp**2 - wo**2))) + p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2), + p_hp - sqrt(p_hp**2 - wo**2))) + + # Move any zeros that were at infinity to the center of the stopband + z_bs = append(z_bs, full(degree, +1j*wo)) + z_bs = append(z_bs, full(degree, -1j*wo)) + + # Cancel out gain change caused by inversion + k_bs = k * real(prod(-z) / prod(-p)) + + return z_bs, p_bs, k_bs + + +def butter(N, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Butterworth digital and analog filter design. + + Design an Nth-order digital or analog Butterworth filter and return + the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. For 'bandpass' and 'bandstop' filters, + the resulting order of the final second-order sections ('sos') + matrix is ``2*N``, with `N` the number of biquad sections + of the desired system. + Wn : array_like + The critical frequency or frequencies. For lowpass and highpass + filters, Wn is a scalar; for bandpass and bandstop filters, + Wn is a length-2 sequence. + + For a Butterworth filter, this is the point at which the gain + drops to 1/sqrt(2) that of the passband (the "-3 dB point"). + + For digital filters, if `fs` is not specified, `Wn` units are + normalized from 0 to 1, where 1 is the Nyquist frequency (`Wn` is + thus in half cycles / sample and defined as 2*critical frequencies + / `fs`). If `fs` is specified, `Wn` is in the same units as `fs`. + + For analog filters, `Wn` is an angular frequency (e.g. rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba' for backwards + compatibility, but 'sos' should be used for general-purpose filtering. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + buttord, buttap + + Notes + ----- + The Butterworth filter has maximally flat frequency response in the + passband. + + The ``'sos'`` output parameter was added in 0.16.0. + + If the transfer function form ``[b, a]`` is requested, numerical + problems can occur since the conversion between roots and + the polynomial coefficients is a numerically sensitive operation, + even for N >= 4. It is recommended to work with the SOS + representation. + + .. warning:: + Designing high-order and narrowband IIR filters in TF form can + result in unstable or incorrect filtering due to floating point + numerical precision issues. Consider inspecting output filter + characteristics `freqz` or designing the filters with second-order + sections via ``output='sos'``. + + Examples + -------- + Design an analog filter and plot its frequency response, showing the + critical points: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> b, a = signal.butter(4, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Butterworth filter frequency response') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.show() + + Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz + + >>> t = np.linspace(0, 1, 1000, False) # 1 second + >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) + >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) + >>> ax1.plot(t, sig) + >>> ax1.set_title('10 Hz and 20 Hz sinusoids') + >>> ax1.axis([0, 1, -2, 2]) + + Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and + apply it to the signal. (It's recommended to use second-order sections + format when filtering, to avoid numerical error with transfer function + (``ba``) format): + + >>> sos = signal.butter(10, 15, 'hp', fs=1000, output='sos') + >>> filtered = signal.sosfilt(sos, sig) + >>> ax2.plot(t, filtered) + >>> ax2.set_title('After 15 Hz high-pass filter') + >>> ax2.axis([0, 1, -2, 2]) + >>> ax2.set_xlabel('Time [s]') + >>> plt.tight_layout() + >>> plt.show() + """ + return iirfilter(N, Wn, btype=btype, analog=analog, + output=output, ftype='butter', fs=fs) + + +def cheby1(N, rp, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Chebyshev type I digital and analog filter design. + + Design an Nth-order digital or analog Chebyshev type I filter and + return the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + rp : float + The maximum ripple allowed below unity gain in the passband. + Specified in decibels, as a positive number. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + For Type I filters, this is the point in the transition band at which + the gain first drops below -`rp`. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba' for backwards + compatibility, but 'sos' should be used for general-purpose filtering. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + cheb1ord, cheb1ap + + Notes + ----- + The Chebyshev type I filter maximizes the rate of cutoff between the + frequency response's passband and stopband, at the expense of ripple in + the passband and increased ringing in the step response. + + Type I filters roll off faster than Type II (`cheby2`), but Type II + filters do not have any ripple in the passband. + + The equiripple passband has N maxima or minima (for example, a + 5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is + unity for odd-order filters, or -rp dB for even-order filters. + + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Design an analog filter and plot its frequency response, showing the + critical points: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Chebyshev Type I frequency response (rp=5)') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.axhline(-5, color='green') # rp + >>> plt.show() + + Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz + + >>> t = np.linspace(0, 1, 1000, False) # 1 second + >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) + >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) + >>> ax1.plot(t, sig) + >>> ax1.set_title('10 Hz and 20 Hz sinusoids') + >>> ax1.axis([0, 1, -2, 2]) + + Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and + apply it to the signal. (It's recommended to use second-order sections + format when filtering, to avoid numerical error with transfer function + (``ba``) format): + + >>> sos = signal.cheby1(10, 1, 15, 'hp', fs=1000, output='sos') + >>> filtered = signal.sosfilt(sos, sig) + >>> ax2.plot(t, filtered) + >>> ax2.set_title('After 15 Hz high-pass filter') + >>> ax2.axis([0, 1, -2, 2]) + >>> ax2.set_xlabel('Time [s]') + >>> plt.tight_layout() + >>> plt.show() + """ + return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog, + output=output, ftype='cheby1', fs=fs) + + +def cheby2(N, rs, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Chebyshev type II digital and analog filter design. + + Design an Nth-order digital or analog Chebyshev type II filter and + return the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + rs : float + The minimum attenuation required in the stop band. + Specified in decibels, as a positive number. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + For Type II filters, this is the point in the transition band at which + the gain first reaches -`rs`. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba' for backwards + compatibility, but 'sos' should be used for general-purpose filtering. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + cheb2ord, cheb2ap + + Notes + ----- + The Chebyshev type II filter maximizes the rate of cutoff between the + frequency response's passband and stopband, at the expense of ripple in + the stopband and increased ringing in the step response. + + Type II filters do not roll off as fast as Type I (`cheby1`). + + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Design an analog filter and plot its frequency response, showing the + critical points: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Chebyshev Type II frequency response (rs=40)') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.axhline(-40, color='green') # rs + >>> plt.show() + + Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz + + >>> t = np.linspace(0, 1, 1000, False) # 1 second + >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) + >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) + >>> ax1.plot(t, sig) + >>> ax1.set_title('10 Hz and 20 Hz sinusoids') + >>> ax1.axis([0, 1, -2, 2]) + + Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and + apply it to the signal. (It's recommended to use second-order sections + format when filtering, to avoid numerical error with transfer function + (``ba``) format): + + >>> sos = signal.cheby2(12, 20, 17, 'hp', fs=1000, output='sos') + >>> filtered = signal.sosfilt(sos, sig) + >>> ax2.plot(t, filtered) + >>> ax2.set_title('After 17 Hz high-pass filter') + >>> ax2.axis([0, 1, -2, 2]) + >>> ax2.set_xlabel('Time [s]') + >>> plt.show() + """ + return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, + output=output, ftype='cheby2', fs=fs) + + +def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Elliptic (Cauer) digital and analog filter design. + + Design an Nth-order digital or analog elliptic filter and return + the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + rp : float + The maximum ripple allowed below unity gain in the passband. + Specified in decibels, as a positive number. + rs : float + The minimum attenuation required in the stop band. + Specified in decibels, as a positive number. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + For elliptic filters, this is the point in the transition band at + which the gain first drops below -`rp`. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba' for backwards + compatibility, but 'sos' should be used for general-purpose filtering. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + ellipord, ellipap + + Notes + ----- + Also known as Cauer or Zolotarev filters, the elliptical filter maximizes + the rate of transition between the frequency response's passband and + stopband, at the expense of ripple in both, and increased ringing in the + step response. + + As `rp` approaches 0, the elliptical filter becomes a Chebyshev + type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev + type I filter (`cheby1`). As both approach 0, it becomes a Butterworth + filter (`butter`). + + The equiripple passband has N maxima or minima (for example, a + 5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is + unity for odd-order filters, or -rp dB for even-order filters. + + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Design an analog filter and plot its frequency response, showing the + critical points: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Elliptic filter frequency response (rp=5, rs=40)') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.axhline(-40, color='green') # rs + >>> plt.axhline(-5, color='green') # rp + >>> plt.show() + + Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz + + >>> t = np.linspace(0, 1, 1000, False) # 1 second + >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) + >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) + >>> ax1.plot(t, sig) + >>> ax1.set_title('10 Hz and 20 Hz sinusoids') + >>> ax1.axis([0, 1, -2, 2]) + + Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and + apply it to the signal. (It's recommended to use second-order sections + format when filtering, to avoid numerical error with transfer function + (``ba``) format): + + >>> sos = signal.ellip(8, 1, 100, 17, 'hp', fs=1000, output='sos') + >>> filtered = signal.sosfilt(sos, sig) + >>> ax2.plot(t, filtered) + >>> ax2.set_title('After 17 Hz high-pass filter') + >>> ax2.axis([0, 1, -2, 2]) + >>> ax2.set_xlabel('Time [s]') + >>> plt.tight_layout() + >>> plt.show() + """ + return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog, + output=output, ftype='elliptic', fs=fs) + + +def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase', + fs=None): + """ + Bessel/Thomson digital and analog filter design. + + Design an Nth-order digital or analog Bessel filter and return the + filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies (defined + by the `norm` parameter). + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. (See Notes.) + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba'. + norm : {'phase', 'delay', 'mag'}, optional + Critical frequency normalization: + + ``phase`` + The filter is normalized such that the phase response reaches its + midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for + both low-pass and high-pass filters, so this is the + "phase-matched" case. + + The magnitude response asymptotes are the same as a Butterworth + filter of the same order with a cutoff of `Wn`. + + This is the default, and matches MATLAB's implementation. + + ``delay`` + The filter is normalized such that the group delay in the passband + is 1/`Wn` (e.g., seconds). This is the "natural" type obtained by + solving Bessel polynomials. + + ``mag`` + The filter is normalized such that the gain magnitude is -3 dB at + angular frequency `Wn`. + + .. versionadded:: 0.18.0 + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + Notes + ----- + Also known as a Thomson filter, the analog Bessel filter has maximally + flat group delay and maximally linear phase response, with very little + ringing in the step response. [1]_ + + The Bessel is inherently an analog filter. This function generates digital + Bessel filters using the bilinear transform, which does not preserve the + phase response of the analog filter. As such, it is only approximately + correct at frequencies below about fs/4. To get maximally-flat group + delay at higher frequencies, the analog Bessel filter must be transformed + using phase-preserving techniques. + + See `besselap` for implementation details and references. + + The ``'sos'`` output parameter was added in 0.16.0. + + References + ---------- + .. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency + Characteristics", Proceedings of the Institution of Electrical + Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490. + + Examples + -------- + Plot the phase-normalized frequency response, showing the relationship + to the Butterworth's cutoff frequency (green): + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> b, a = signal.butter(4, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed') + >>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase') + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(np.abs(h))) + >>> plt.title('Bessel filter magnitude response (with Butterworth)') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.show() + + and the phase midpoint: + + >>> plt.figure() + >>> plt.semilogx(w, np.unwrap(np.angle(h))) + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.axhline(-np.pi, color='red') # phase midpoint + >>> plt.title('Bessel filter phase response') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Phase [rad]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.show() + + Plot the magnitude-normalized frequency response, showing the -3 dB cutoff: + + >>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag') + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(np.abs(h))) + >>> plt.axhline(-3, color='red') # -3 dB magnitude + >>> plt.axvline(10, color='green') # cutoff frequency + >>> plt.title('Amplitude-normalized Bessel filter frequency response') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.show() + + Plot the delay-normalized filter, showing the maximally-flat group delay + at 0.1 seconds: + + >>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay') + >>> w, h = signal.freqs(b, a) + >>> plt.figure() + >>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w)) + >>> plt.axhline(0.1, color='red') # 0.1 seconds group delay + >>> plt.title('Bessel filter group delay') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Group delay [s]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.show() + + """ + return iirfilter(N, Wn, btype=btype, analog=analog, + output=output, ftype='bessel_'+norm, fs=fs) + + +def maxflat(): + pass + + +def yulewalk(): + pass + + +def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type): + """ + Band Stop Objective Function for order minimization. + + Returns the non-integer order for an analog band stop filter. + + Parameters + ---------- + wp : scalar + Edge of passband `passb`. + ind : int, {0, 1} + Index specifying which `passb` edge to vary (0 or 1). + passb : ndarray + Two element sequence of fixed passband edges. + stopb : ndarray + Two element sequence of fixed stopband edges. + gstop : float + Amount of attenuation in stopband in dB. + gpass : float + Amount of ripple in the passband in dB. + type : {'butter', 'cheby', 'ellip'} + Type of filter. + + Returns + ------- + n : scalar + Filter order (possibly non-integer). + + Notes + ----- + Band-stop filters are used in applications where certain frequency + components need to be blocked while others are allowed; for instance, + removing noise at specific frequencies while allowing the desired signal + to pass through. The order of a filter often determines its complexity and + accuracy. Determining the right order can be a challenge. This function + aims to provide an appropriate order for an analog band stop filter. + + Examples + -------- + + >>> import numpy as np + >>> from scipy.signal import band_stop_obj + >>> wp = 2 + >>> ind = 1 + >>> passb = np.array([1, 3]) + >>> stopb = np.array([0.5, 4]) + >>> gstop = 30 + >>> gpass = 3 + >>> filter_type = 'butter' + >>> band_stop_obj(wp, ind, passb, stopb, gpass, gstop, filter_type) + np.float64(-2.758504160760643) + + """ + + _validate_gpass_gstop(gpass, gstop) + + passbC = passb.copy() + passbC[ind] = wp + nat = (stopb * (passbC[0] - passbC[1]) / + (stopb ** 2 - passbC[0] * passbC[1])) + nat = min(abs(nat)) + + if type == 'butter': + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))) + elif type == 'cheby': + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat) + elif type == 'ellip': + GSTOP = 10 ** (0.1 * gstop) + GPASS = 10 ** (0.1 * gpass) + arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0)) + arg0 = 1.0 / nat + d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2]) + d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2]) + n = (d0[0] * d1[1] / (d0[1] * d1[0])) + else: + raise ValueError(f"Incorrect type: {type}") + return n + + +def _pre_warp(wp, ws, analog): + # Pre-warp frequencies for digital filter design + if not analog: + passb = np.tan(pi * wp / 2.0) + stopb = np.tan(pi * ws / 2.0) + else: + passb = wp * 1.0 + stopb = ws * 1.0 + return passb, stopb + + +def _validate_wp_ws(wp, ws, fs, analog): + wp = atleast_1d(wp) + ws = atleast_1d(ws) + if fs is not None: + if analog: + raise ValueError("fs cannot be specified for an analog filter") + wp = 2 * wp / fs + ws = 2 * ws / fs + + filter_type = 2 * (len(wp) - 1) + 1 + if wp[0] >= ws[0]: + filter_type += 1 + + return wp, ws, filter_type + + +def _find_nat_freq(stopb, passb, gpass, gstop, filter_type, filter_kind): + if filter_type == 1: # low + nat = stopb / passb + elif filter_type == 2: # high + nat = passb / stopb + elif filter_type == 3: # stop + + ### breakpoint() + + wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, + args=(0, passb, stopb, gpass, gstop, + filter_kind), + disp=0) + passb[0] = wp0 + wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], + args=(1, passb, stopb, gpass, gstop, + filter_kind), + disp=0) + passb[1] = wp1 + nat = ((stopb * (passb[0] - passb[1])) / + (stopb ** 2 - passb[0] * passb[1])) + elif filter_type == 4: # pass + nat = ((stopb ** 2 - passb[0] * passb[1]) / + (stopb * (passb[0] - passb[1]))) + else: + raise ValueError(f"should not happen: {filter_type =}.") + + nat = min(abs(nat)) + return nat, passb + + +def _postprocess_wn(WN, analog, fs): + wn = WN if analog else np.arctan(WN) * 2.0 / pi + if len(wn) == 1: + wn = wn[0] + if fs is not None: + wn = wn * fs / 2 + return wn + + +def buttord(wp, ws, gpass, gstop, analog=False, fs=None): + """Butterworth filter order selection. + + Return the order of the lowest order digital or analog Butterworth filter + that loses no more than `gpass` dB in the passband and has at least + `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + ord : int + The lowest order for a Butterworth filter which meets specs. + wn : ndarray or float + The Butterworth natural frequency (i.e. the "3dB frequency"). Should + be used with `butter` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `butter`. + + See Also + -------- + butter : Filter design using order and critical points + cheb1ord : Find order and critical points from passband and stopband spec + cheb2ord, ellipord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + Examples + -------- + Design an analog bandpass filter with passband within 3 dB from 20 to + 50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s. + Plot its frequency response, showing the passband and stopband + constraints in gray. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True) + >>> b, a = signal.butter(N, Wn, 'band', True) + >>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500)) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Butterworth bandpass filter fit to constraints') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(which='both', axis='both') + >>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop + >>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass + >>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop + >>> plt.axis([10, 100, -60, 3]) + >>> plt.show() + + """ + _validate_gpass_gstop(gpass, gstop) + fs = _validate_fs(fs, allow_none=True) + wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) + passb, stopb = _pre_warp(wp, ws, analog) + nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'butter') + + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))) + + # Find the Butterworth natural frequency WN (or the "3dB" frequency") + # to give exactly gpass at passb. + try: + W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord)) + except ZeroDivisionError: + W0 = 1.0 + warnings.warn("Order is zero...check input parameters.", + RuntimeWarning, stacklevel=2) + + # now convert this frequency back from lowpass prototype + # to the original analog filter + + if filter_type == 1: # low + WN = W0 * passb + elif filter_type == 2: # high + WN = passb / W0 + elif filter_type == 3: # stop + WN = np.empty(2, float) + discr = sqrt((passb[1] - passb[0]) ** 2 + + 4 * W0 ** 2 * passb[0] * passb[1]) + WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0) + WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0) + WN = np.sort(abs(WN)) + elif filter_type == 4: # pass + W0 = np.array([-W0, W0], float) + WN = (-W0 * (passb[1] - passb[0]) / 2.0 + + sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 + + passb[0] * passb[1])) + WN = np.sort(abs(WN)) + else: + raise ValueError(f"Bad type: {filter_type}") + + wn = _postprocess_wn(WN, analog, fs) + + return ord, wn + + +def cheb1ord(wp, ws, gpass, gstop, analog=False, fs=None): + """Chebyshev type I filter order selection. + + Return the order of the lowest order digital or analog Chebyshev Type I + filter that loses no more than `gpass` dB in the passband and has at + least `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + ord : int + The lowest order for a Chebyshev type I filter that meets specs. + wn : ndarray or float + The Chebyshev natural frequency (the "3dB frequency") for use with + `cheby1` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `cheby1`. + + See Also + -------- + cheby1 : Filter design using order and critical points + buttord : Find order and critical points from passband and stopband spec + cheb2ord, ellipord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + Examples + -------- + Design a digital lowpass filter such that the passband is within 3 dB up + to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its + frequency response, showing the passband and stopband constraints in gray. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40) + >>> b, a = signal.cheby1(N, 3, Wn, 'low') + >>> w, h = signal.freqz(b, a) + >>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h))) + >>> plt.title('Chebyshev I lowpass filter fit to constraints') + >>> plt.xlabel('Normalized frequency') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(which='both', axis='both') + >>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop + >>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass + >>> plt.axis([0.08, 1, -60, 3]) + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + _validate_gpass_gstop(gpass, gstop) + wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) + passb, stopb = _pre_warp(wp, ws, analog) + nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'cheby') + + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + v_pass_stop = np.arccosh(np.sqrt((GSTOP - 1.0) / (GPASS - 1.0))) + ord = int(ceil(v_pass_stop / np.arccosh(nat))) + + # Natural frequencies are just the passband edges + wn = _postprocess_wn(passb, analog, fs) + + return ord, wn + + +def cheb2ord(wp, ws, gpass, gstop, analog=False, fs=None): + """Chebyshev type II filter order selection. + + Return the order of the lowest order digital or analog Chebyshev Type II + filter that loses no more than `gpass` dB in the passband and has at least + `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + ord : int + The lowest order for a Chebyshev type II filter that meets specs. + wn : ndarray or float + The Chebyshev natural frequency (the "3dB frequency") for use with + `cheby2` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `cheby2`. + + See Also + -------- + cheby2 : Filter design using order and critical points + buttord : Find order and critical points from passband and stopband spec + cheb1ord, ellipord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + Examples + -------- + Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to + 0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above + 0.6*(fs/2). Plot its frequency response, showing the passband and + stopband constraints in gray. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) + >>> b, a = signal.cheby2(N, 60, Wn, 'stop') + >>> w, h = signal.freqz(b, a) + >>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h))) + >>> plt.title('Chebyshev II bandstop filter fit to constraints') + >>> plt.xlabel('Normalized frequency') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(which='both', axis='both') + >>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop + >>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass + >>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop + >>> plt.axis([0.06, 1, -80, 3]) + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + _validate_gpass_gstop(gpass, gstop) + wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) + passb, stopb = _pre_warp(wp, ws, analog) + nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'cheby') + + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + v_pass_stop = np.arccosh(np.sqrt((GSTOP - 1.0) / (GPASS - 1.0))) + ord = int(ceil(v_pass_stop / arccosh(nat))) + + # Find frequency where analog response is -gpass dB. + # Then convert back from low-pass prototype to the original filter. + + new_freq = cosh(1.0 / ord * v_pass_stop) + new_freq = 1.0 / new_freq + + if filter_type == 1: + nat = passb / new_freq + elif filter_type == 2: + nat = passb * new_freq + elif filter_type == 3: + nat = np.empty(2, float) + nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) + + sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 + + passb[1] * passb[0])) + nat[1] = passb[1] * passb[0] / nat[0] + elif filter_type == 4: + nat = np.empty(2, float) + nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) + + sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) + + passb[1] * passb[0])) + nat[1] = passb[0] * passb[1] / nat[0] + + wn = _postprocess_wn(nat, analog, fs) + + return ord, wn + + +_POW10_LOG10 = np.log(10) + + +def _pow10m1(x): + """10 ** x - 1 for x near 0""" + return np.expm1(_POW10_LOG10 * x) + + +def ellipord(wp, ws, gpass, gstop, analog=False, fs=None): + """Elliptic (Cauer) filter order selection. + + Return the order of the lowest order digital or analog elliptic filter + that loses no more than `gpass` dB in the passband and has at least + `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + ord : int + The lowest order for an Elliptic (Cauer) filter that meets specs. + wn : ndarray or float + The Chebyshev natural frequency (the "3dB frequency") for use with + `ellip` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `ellip`. + + See Also + -------- + ellip : Filter design using order and critical points + buttord : Find order and critical points from passband and stopband spec + cheb1ord, cheb2ord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + Examples + -------- + Design an analog highpass filter such that the passband is within 3 dB + above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its + frequency response, showing the passband and stopband constraints in gray. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> N, Wn = signal.ellipord(30, 10, 3, 60, True) + >>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True) + >>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500)) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Elliptical highpass filter fit to constraints') + >>> plt.xlabel('Frequency [rad/s]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(which='both', axis='both') + >>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop + >>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass + >>> plt.axis([1, 300, -80, 3]) + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + _validate_gpass_gstop(gpass, gstop) + wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) + passb, stopb = _pre_warp(wp, ws, analog) + nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'ellip') + + arg1_sq = _pow10m1(0.1 * gpass) / _pow10m1(0.1 * gstop) + arg0 = 1.0 / nat + d0 = special.ellipk(arg0 ** 2), special.ellipkm1(arg0 ** 2) + d1 = special.ellipk(arg1_sq), special.ellipkm1(arg1_sq) + ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0]))) + + wn = _postprocess_wn(passb, analog, fs) + + return ord, wn + + +def buttap(N): + """Return (z,p,k) for analog prototype of Nth-order Butterworth filter. + + The filter will have an angular (e.g., rad/s) cutoff frequency of 1. + + See Also + -------- + butter : Filter design function using this prototype + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + z = np.array([]) + m = np.arange(-N+1, N, 2) + # Middle value is 0 to ensure an exactly real pole + p = -np.exp(1j * pi * m / (2 * N)) + k = 1 + return z, p, k + + +def cheb1ap(N, rp): + """ + Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter. + + The returned filter prototype has `rp` decibels of ripple in the passband. + + The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, + defined as the point at which the gain first drops below ``-rp``. + + See Also + -------- + cheby1 : Filter design function using this prototype + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + elif N == 0: + # Avoid divide-by-zero error + # Even order filters have DC gain of -rp dB + return np.array([]), np.array([]), 10**(-rp/20) + z = np.array([]) + + # Ripple factor (epsilon) + eps = np.sqrt(10 ** (0.1 * rp) - 1.0) + mu = 1.0 / N * arcsinh(1 / eps) + + # Arrange poles in an ellipse on the left half of the S-plane + m = np.arange(-N+1, N, 2) + theta = pi * m / (2*N) + p = -sinh(mu + 1j*theta) + + k = np.prod(-p, axis=0).real + if N % 2 == 0: + k = k / sqrt(1 + eps * eps) + + return z, p, k + + +def cheb2ap(N, rs): + """ + Return (z,p,k) for Nth-order Chebyshev type II analog lowpass filter. + + The returned filter prototype has attenuation of at least ``rs`` decibels + in the stopband. + + The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, + defined as the point at which the attenuation first reaches ``rs``. + + See Also + -------- + cheby2 : Filter design function using this prototype + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + elif N == 0: + # Avoid divide-by-zero warning + return np.array([]), np.array([]), 1 + + # Ripple factor (epsilon) + de = 1.0 / sqrt(10 ** (0.1 * rs) - 1) + mu = arcsinh(1.0 / de) / N + + if N % 2: + m = np.concatenate((np.arange(-N+1, 0, 2), np.arange(2, N, 2))) + else: + m = np.arange(-N+1, N, 2) + + z = -conjugate(1j / sin(m * pi / (2.0 * N))) + + # Poles around the unit circle like Butterworth + p = -exp(1j * pi * np.arange(-N+1, N, 2) / (2 * N)) + # Warp into Chebyshev II + p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag + p = 1.0 / p + + k = (np.prod(-p, axis=0) / np.prod(-z, axis=0)).real + return z, p, k + + +EPSILON = 2e-16 + +# number of terms in solving degree equation +_ELLIPDEG_MMAX = 7 + + +def _ellipdeg(n, m1): + """Solve degree equation using nomes + + Given n, m1, solve + n * K(m) / K'(m) = K1(m1) / K1'(m1) + for m + + See [1], Eq. (49) + + References + ---------- + .. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design", + https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf + """ + K1 = special.ellipk(m1) + K1p = special.ellipkm1(m1) + + q1 = np.exp(-np.pi * K1p / K1) + q = q1 ** (1/n) + + mnum = np.arange(_ELLIPDEG_MMAX + 1) + mden = np.arange(1, _ELLIPDEG_MMAX + 2) + + num = np.sum(q ** (mnum * (mnum+1))) + den = 1 + 2 * np.sum(q ** (mden**2)) + + return 16 * q * (num / den) ** 4 + + +# Maximum number of iterations in Landen transformation recursion +# sequence. 10 is conservative; unit tests pass with 4, Orfanidis +# (see _arc_jac_cn [1]) suggests 5. +_ARC_JAC_SN_MAXITER = 10 + + +def _arc_jac_sn(w, m): + """Inverse Jacobian elliptic sn + + Solve for z in w = sn(z, m) + + Parameters + ---------- + w : complex scalar + argument + + m : scalar + modulus; in interval [0, 1] + + + See [1], Eq. (56) + + References + ---------- + .. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design", + https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf + + """ + + def _complement(kx): + # (1-k**2) ** 0.5; the expression below + # works for small kx + return ((1 - kx) * (1 + kx)) ** 0.5 + + k = m ** 0.5 + + if k > 1: + return np.nan + elif k == 1: + return np.arctanh(w) + + ks = [k] + niter = 0 + while ks[-1] != 0: + k_ = ks[-1] + k_p = _complement(k_) + ks.append((1 - k_p) / (1 + k_p)) + niter += 1 + if niter > _ARC_JAC_SN_MAXITER: + raise ValueError('Landen transformation not converging') + + K = np.prod(1 + np.array(ks[1:])) * np.pi/2 + + wns = [w] + + for kn, knext in zip(ks[:-1], ks[1:]): + wn = wns[-1] + wnext = (2 * wn / + ((1 + knext) * (1 + _complement(kn * wn)))) + wns.append(wnext) + + u = 2 / np.pi * np.arcsin(wns[-1]) + + z = K * u + return z + + +def _arc_jac_sc1(w, m): + """Real inverse Jacobian sc, with complementary modulus + + Solve for z in w = sc(z, 1-m) + + w - real scalar + + m - modulus + + From [1], sc(z, m) = -i * sn(i * z, 1 - m) + + References + ---------- + # noqa: E501 + .. [1] https://functions.wolfram.com/EllipticFunctions/JacobiSC/introductions/JacobiPQs/ShowAll.html, + "Representations through other Jacobi functions" + + """ + + zcomplex = _arc_jac_sn(1j * w, m) + if abs(zcomplex.real) > 1e-14: + raise ValueError + + return zcomplex.imag + + +def ellipap(N, rp, rs): + """Return (z,p,k) of Nth-order elliptic analog lowpass filter. + + The filter is a normalized prototype that has `rp` decibels of ripple + in the passband and a stopband `rs` decibels down. + + The filter's angular (e.g., rad/s) cutoff frequency is normalized to 1, + defined as the point at which the gain first drops below ``-rp``. + + See Also + -------- + ellip : Filter design function using this prototype + + References + ---------- + .. [1] Lutovac, Tosic, and Evans, "Filter Design for Signal Processing", + Chapters 5 and 12. + + .. [2] Orfanidis, "Lecture Notes on Elliptic Filter Design", + https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + elif N == 0: + # Avoid divide-by-zero warning + # Even order filters have DC gain of -rp dB + return np.array([]), np.array([]), 10**(-rp/20) + elif N == 1: + p = -sqrt(1.0 / _pow10m1(0.1 * rp)) + k = -p + z = [] + return asarray(z), asarray(p), k + + eps_sq = _pow10m1(0.1 * rp) + + eps = np.sqrt(eps_sq) + ck1_sq = eps_sq / _pow10m1(0.1 * rs) + if ck1_sq == 0: + raise ValueError("Cannot design a filter with given rp and rs" + " specifications.") + + val = special.ellipk(ck1_sq), special.ellipkm1(ck1_sq) + + m = _ellipdeg(N, ck1_sq) + + capk = special.ellipk(m) + + j = np.arange(1 - N % 2, N, 2) + jj = len(j) + + [s, c, d, phi] = special.ellipj(j * capk / N, m * np.ones(jj)) + snew = np.compress(abs(s) > EPSILON, s, axis=-1) + z = 1.0 / (sqrt(m) * snew) + z = 1j * z + z = np.concatenate((z, conjugate(z))) + + r = _arc_jac_sc1(1. / eps, ck1_sq) + v0 = capk * r / (N * val[0]) + + [sv, cv, dv, phi] = special.ellipj(v0, 1 - m) + p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0) + + if N % 2: + newp = np.compress( + abs(p.imag) > EPSILON * np.sqrt(np.sum(p * np.conjugate(p), axis=0).real), + p, axis=-1 + ) + p = np.concatenate((p, conjugate(newp))) + else: + p = np.concatenate((p, conjugate(p))) + + k = (np.prod(-p, axis=0) / np.prod(-z, axis=0)).real + if N % 2 == 0: + k = k / np.sqrt(1 + eps_sq) + + return z, p, k + + +# TODO: Make this a real public function scipy.misc.ff +def _falling_factorial(x, n): + r""" + Return the factorial of `x` to the `n` falling. + + This is defined as: + + .. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1) + + This can more efficiently calculate ratios of factorials, since: + + n!/m! == falling_factorial(n, n-m) + + where n >= m + + skipping the factors that cancel out + + the usual factorial n! == ff(n, n) + """ + val = 1 + for k in range(x - n + 1, x + 1): + val *= k + return val + + +def _bessel_poly(n, reverse=False): + """ + Return the coefficients of Bessel polynomial of degree `n` + + If `reverse` is true, a reverse Bessel polynomial is output. + + Output is a list of coefficients: + [1] = 1 + [1, 1] = 1*s + 1 + [1, 3, 3] = 1*s^2 + 3*s + 3 + [1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15 + [1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105 + etc. + + Output is a Python list of arbitrary precision long ints, so n is only + limited by your hardware's memory. + + Sequence is http://oeis.org/A001498, and output can be confirmed to + match http://oeis.org/A001498/b001498.txt : + + >>> from scipy.signal._filter_design import _bessel_poly + >>> i = 0 + >>> for n in range(51): + ... for x in _bessel_poly(n, reverse=True): + ... print(i, x) + ... i += 1 + + """ + if abs(int(n)) != n: + raise ValueError("Polynomial order must be a nonnegative integer") + else: + n = int(n) # np.int32 doesn't work, for instance + + out = [] + for k in range(n + 1): + num = _falling_factorial(2*n - k, n) + den = 2**(n - k) * math.factorial(k) + out.append(num // den) + + if reverse: + return out[::-1] + else: + return out + + +def _campos_zeros(n): + """ + Return approximate zero locations of Bessel polynomials y_n(x) for order + `n` using polynomial fit (Campos-Calderon 2011) + """ + if n == 1: + return asarray([-1+0j]) + + s = npp_polyval(n, [0, 0, 2, 0, -3, 1]) + b3 = npp_polyval(n, [16, -8]) / s + b2 = npp_polyval(n, [-24, -12, 12]) / s + b1 = npp_polyval(n, [8, 24, -12, -2]) / s + b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s + + r = npp_polyval(n, [0, 0, 2, 1]) + a1 = npp_polyval(n, [-6, -6]) / r + a2 = 6 / r + + k = np.arange(1, n+1) + x = npp_polyval(k, [0, a1, a2]) + y = npp_polyval(k, [b0, b1, b2, b3]) + + return x + 1j*y + + +def _aberth(f, fp, x0, tol=1e-15, maxiter=50): + """ + Given a function `f`, its first derivative `fp`, and a set of initial + guesses `x0`, simultaneously find the roots of the polynomial using the + Aberth-Ehrlich method. + + ``len(x0)`` should equal the number of roots of `f`. + + (This is not a complete implementation of Bini's algorithm.) + """ + + N = len(x0) + + x = array(x0, complex) + beta = np.empty_like(x0) + + for iteration in range(maxiter): + alpha = -f(x) / fp(x) # Newton's method + + # Model "repulsion" between zeros + for k in range(N): + beta[k] = np.sum(1/(x[k] - x[k+1:])) + beta[k] += np.sum(1/(x[k] - x[:k])) + + x += alpha / (1 + alpha * beta) + + if not all(np.isfinite(x)): + raise RuntimeError('Root-finding calculation failed') + + # Mekwi: The iterative process can be stopped when |hn| has become + # less than the largest error one is willing to permit in the root. + if all(abs(alpha) <= tol): + break + else: + raise Exception('Zeros failed to converge') + + return x + + +def _bessel_zeros(N): + """ + Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of + modified Bessel function of the second kind + """ + if N == 0: + return asarray([]) + + # Generate starting points + x0 = _campos_zeros(N) + + # Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary + # Bessel polynomial y_N(x) + def f(x): + return special.kve(N+0.5, 1/x) + + # First derivative of above + def fp(x): + return (special.kve(N-0.5, 1/x)/(2*x**2) - + special.kve(N+0.5, 1/x)/(x**2) + + special.kve(N+1.5, 1/x)/(2*x**2)) + + # Starting points converge to true zeros + x = _aberth(f, fp, x0) + + # Improve precision using Newton's method on each + for i in range(len(x)): + x[i] = optimize.newton(f, x[i], fp, tol=1e-15) + + # Average complex conjugates to make them exactly symmetrical + x = np.mean((x, x[::-1].conj()), 0) + + # Zeros should sum to -1 + if abs(np.sum(x) + 1) > 1e-15: + raise RuntimeError('Generated zeros are inaccurate') + + return x + + +def _norm_factor(p, k): + """ + Numerically find frequency shift to apply to delay-normalized filter such + that -3 dB point is at 1 rad/sec. + + `p` is an array_like of polynomial poles + `k` is a float gain + + First 10 values are listed in "Bessel Scale Factors" table, + "Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond." + """ + p = asarray(p, dtype=complex) + + def G(w): + """ + Gain of filter + """ + return abs(k / prod(1j*w - p)) + + def cutoff(w): + """ + When gain = -3 dB, return 0 + """ + return G(w) - 1/np.sqrt(2) + + return optimize.newton(cutoff, 1.5) + + +def besselap(N, norm='phase'): + """ + Return (z,p,k) for analog prototype of an Nth-order Bessel filter. + + Parameters + ---------- + N : int + The order of the filter. + norm : {'phase', 'delay', 'mag'}, optional + Frequency normalization: + + ``phase`` + The filter is normalized such that the phase response reaches its + midpoint at an angular (e.g., rad/s) cutoff frequency of 1. This + happens for both low-pass and high-pass filters, so this is the + "phase-matched" case. [6]_ + + The magnitude response asymptotes are the same as a Butterworth + filter of the same order with a cutoff of `Wn`. + + This is the default, and matches MATLAB's implementation. + + ``delay`` + The filter is normalized such that the group delay in the passband + is 1 (e.g., 1 second). This is the "natural" type obtained by + solving Bessel polynomials + + ``mag`` + The filter is normalized such that the gain magnitude is -3 dB at + angular frequency 1. This is called "frequency normalization" by + Bond. [1]_ + + .. versionadded:: 0.18.0 + + Returns + ------- + z : ndarray + Zeros of the transfer function. Is always an empty array. + p : ndarray + Poles of the transfer function. + k : scalar + Gain of the transfer function. For phase-normalized, this is always 1. + + See Also + -------- + bessel : Filter design function using this prototype + + Notes + ----- + To find the pole locations, approximate starting points are generated [2]_ + for the zeros of the ordinary Bessel polynomial [3]_, then the + Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to + calculate more accurate zeros, and these locations are then inverted about + the unit circle. + + References + ---------- + .. [1] C.R. Bond, "Bessel Filter Constants", + http://www.crbond.com/papers/bsf.pdf + .. [2] Campos and Calderon, "Approximate closed-form formulas for the + zeros of the Bessel Polynomials", :arXiv:`1105.0957`. + .. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency + Characteristics", Proceedings of the Institution of Electrical + Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490. + .. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial + Simultaneously", Mathematics of Computation, Vol. 27, No. 122, + April 1973 + .. [5] Ehrlich, "A modified Newton method for polynomials", Communications + of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967, + :DOI:`10.1145/363067.363115` + .. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to + Others", RaneNote 147, 1998, + https://www.ranecommercial.com/legacy/note147.html + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + + N = int(N) # calculation below doesn't always fit in np.int64 + if N == 0: + p = [] + k = 1 + else: + # Find roots of reverse Bessel polynomial + p = 1/_bessel_zeros(N) + + a_last = _falling_factorial(2*N, N) // 2**N + + # Shift them to a different normalization if required + if norm in ('delay', 'mag'): + # Normalized for group delay of 1 + k = a_last + if norm == 'mag': + # -3 dB magnitude point is at 1 rad/sec + norm_factor = _norm_factor(p, k) + p /= norm_factor + k = norm_factor**-N * a_last + elif norm == 'phase': + # Phase-matched (1/2 max phase shift at 1 rad/sec) + # Asymptotes are same as Butterworth filter + p *= 10**(-math.log10(a_last)/N) + k = 1 + else: + raise ValueError('normalization not understood') + + return asarray([]), asarray(p, dtype=complex), float(k) + + +def iirnotch(w0, Q, fs=2.0): + """ + Design second-order IIR notch digital filter. + + A notch filter is a band-stop filter with a narrow bandwidth + (high quality factor). It rejects a narrow frequency band and + leaves the rest of the spectrum little changed. + + Parameters + ---------- + w0 : float + Frequency to remove from a signal. If `fs` is specified, this is in + the same units as `fs`. By default, it is a normalized scalar that must + satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the + sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + notch filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + + See Also + -------- + iirpeak + + Notes + ----- + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing", + Prentice-Hall, 1996 + + Examples + -------- + Design and plot filter to remove the 60 Hz component from a + signal sampled at 200 Hz, using a quality factor Q = 30 + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> fs = 200.0 # Sample frequency (Hz) + >>> f0 = 60.0 # Frequency to be removed from signal (Hz) + >>> Q = 30.0 # Quality factor + >>> # Design notch filter + >>> b, a = signal.iirnotch(f0, Q, fs) + + >>> # Frequency response + >>> freq, h = signal.freqz(b, a, fs=fs) + >>> # Plot + >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6)) + >>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue') + >>> ax[0].set_title("Frequency Response") + >>> ax[0].set_ylabel("Amplitude [dB]", color='blue') + >>> ax[0].set_xlim([0, 100]) + >>> ax[0].set_ylim([-25, 10]) + >>> ax[0].grid(True) + >>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green') + >>> ax[1].set_ylabel("Phase [deg]", color='green') + >>> ax[1].set_xlabel("Frequency [Hz]") + >>> ax[1].set_xlim([0, 100]) + >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) + >>> ax[1].set_ylim([-90, 90]) + >>> ax[1].grid(True) + >>> plt.show() + """ + + return _design_notch_peak_filter(w0, Q, "notch", fs) + + +def iirpeak(w0, Q, fs=2.0): + """ + Design second-order IIR peak (resonant) digital filter. + + A peak filter is a band-pass filter with a narrow bandwidth + (high quality factor). It rejects components outside a narrow + frequency band. + + Parameters + ---------- + w0 : float + Frequency to be retained in a signal. If `fs` is specified, this is in + the same units as `fs`. By default, it is a normalized scalar that must + satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the + sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + peak filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + + See Also + -------- + iirnotch + + Notes + ----- + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing", + Prentice-Hall, 1996 + + Examples + -------- + Design and plot filter to remove the frequencies other than the 300 Hz + component from a signal sampled at 1000 Hz, using a quality factor Q = 30 + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> fs = 1000.0 # Sample frequency (Hz) + >>> f0 = 300.0 # Frequency to be retained (Hz) + >>> Q = 30.0 # Quality factor + >>> # Design peak filter + >>> b, a = signal.iirpeak(f0, Q, fs) + + >>> # Frequency response + >>> freq, h = signal.freqz(b, a, fs=fs) + >>> # Plot + >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6)) + >>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue') + >>> ax[0].set_title("Frequency Response") + >>> ax[0].set_ylabel("Amplitude [dB]", color='blue') + >>> ax[0].set_xlim([0, 500]) + >>> ax[0].set_ylim([-50, 10]) + >>> ax[0].grid(True) + >>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green') + >>> ax[1].set_ylabel("Phase [deg]", color='green') + >>> ax[1].set_xlabel("Frequency [Hz]") + >>> ax[1].set_xlim([0, 500]) + >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) + >>> ax[1].set_ylim([-90, 90]) + >>> ax[1].grid(True) + >>> plt.show() + """ + + return _design_notch_peak_filter(w0, Q, "peak", fs) + + +def _design_notch_peak_filter(w0, Q, ftype, fs=2.0): + """ + Design notch or peak digital filter. + + Parameters + ---------- + w0 : float + Normalized frequency to remove from a signal. If `fs` is specified, + this is in the same units as `fs`. By default, it is a normalized + scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` + corresponding to half of the sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + notch filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + ftype : str + The type of IIR filter to design: + + - notch filter : ``notch`` + - peak filter : ``peak`` + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0: + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + """ + fs = _validate_fs(fs, allow_none=False) + + # Guarantee that the inputs are floats + w0 = float(w0) + Q = float(Q) + w0 = 2*w0/fs + + # Checks if w0 is within the range + if w0 > 1.0 or w0 < 0.0: + raise ValueError("w0 should be such that 0 < w0 < 1") + + # Get bandwidth + bw = w0/Q + + # Normalize inputs + bw = bw*np.pi + w0 = w0*np.pi + + if ftype not in ("notch", "peak"): + raise ValueError("Unknown ftype.") + + # Compute beta according to Eqs. 11.3.4 (p.575) and 11.3.19 (p.579) from + # reference [1]. Due to assuming a -3 dB attenuation value, i.e, assuming + # gb = 1 / np.sqrt(2), the following terms simplify to: + # (np.sqrt(1.0 - gb**2.0) / gb) = 1 + # (gb / np.sqrt(1.0 - gb**2.0)) = 1 + beta = np.tan(bw/2.0) + + # Compute gain: formula 11.3.6 (p.575) from reference [1] + gain = 1.0/(1.0+beta) + + # Compute numerator b and denominator a + # formulas 11.3.7 (p.575) and 11.3.21 (p.579) + # from reference [1] + if ftype == "notch": + b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0]) + else: + b = (1.0-gain)*np.array([1.0, 0.0, -1.0]) + a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)]) + + return b, a + + +def iircomb(w0, Q, ftype='notch', fs=2.0, *, pass_zero=False): + """ + Design IIR notching or peaking digital comb filter. + + A notching comb filter consists of regularly-spaced band-stop filters with + a narrow bandwidth (high quality factor). Each rejects a narrow frequency + band and leaves the rest of the spectrum little changed. + + A peaking comb filter consists of regularly-spaced band-pass filters with + a narrow bandwidth (high quality factor). Each rejects components outside + a narrow frequency band. + + Parameters + ---------- + w0 : float + The fundamental frequency of the comb filter (the spacing between its + peaks). This must evenly divide the sampling frequency. If `fs` is + specified, this is in the same units as `fs`. By default, it is + a normalized scalar that must satisfy ``0 < w0 < 1``, with + ``w0 = 1`` corresponding to half of the sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + notch filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + ftype : {'notch', 'peak'} + The type of comb filter generated by the function. If 'notch', then + the Q factor applies to the notches. If 'peak', then the Q factor + applies to the peaks. Default is 'notch'. + fs : float, optional + The sampling frequency of the signal. Default is 2.0. + pass_zero : bool, optional + If False (default), the notches (nulls) of the filter are centered on + frequencies [0, w0, 2*w0, ...], and the peaks are centered on the + midpoints [w0/2, 3*w0/2, 5*w0/2, ...]. If True, the peaks are centered + on [0, w0, 2*w0, ...] (passing zero frequency) and vice versa. + + .. versionadded:: 1.9.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + + Raises + ------ + ValueError + If `w0` is less than or equal to 0 or greater than or equal to + ``fs/2``, if `fs` is not divisible by `w0`, if `ftype` + is not 'notch' or 'peak' + + See Also + -------- + iirnotch + iirpeak + + Notes + ----- + For implementation details, see [1]_. The TF implementation of the + comb filter is numerically stable even at higher orders due to the + use of a single repeated pole, which won't suffer from precision loss. + + References + ---------- + .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing", + Prentice-Hall, 1996, ch. 11, "Digital Filter Design" + + Examples + -------- + Design and plot notching comb filter at 20 Hz for a + signal sampled at 200 Hz, using quality factor Q = 30 + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> fs = 200.0 # Sample frequency (Hz) + >>> f0 = 20.0 # Frequency to be removed from signal (Hz) + >>> Q = 30.0 # Quality factor + >>> # Design notching comb filter + >>> b, a = signal.iircomb(f0, Q, ftype='notch', fs=fs) + + >>> # Frequency response + >>> freq, h = signal.freqz(b, a, fs=fs) + >>> response = abs(h) + >>> # To avoid divide by zero when graphing + >>> response[response == 0] = 1e-20 + >>> # Plot + >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True) + >>> ax[0].plot(freq, 20*np.log10(abs(response)), color='blue') + >>> ax[0].set_title("Frequency Response") + >>> ax[0].set_ylabel("Amplitude [dB]", color='blue') + >>> ax[0].set_xlim([0, 100]) + >>> ax[0].set_ylim([-30, 10]) + >>> ax[0].grid(True) + >>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green') + >>> ax[1].set_ylabel("Phase [deg]", color='green') + >>> ax[1].set_xlabel("Frequency [Hz]") + >>> ax[1].set_xlim([0, 100]) + >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) + >>> ax[1].set_ylim([-90, 90]) + >>> ax[1].grid(True) + >>> plt.show() + + Design and plot peaking comb filter at 250 Hz for a + signal sampled at 1000 Hz, using quality factor Q = 30 + + >>> fs = 1000.0 # Sample frequency (Hz) + >>> f0 = 250.0 # Frequency to be retained (Hz) + >>> Q = 30.0 # Quality factor + >>> # Design peaking filter + >>> b, a = signal.iircomb(f0, Q, ftype='peak', fs=fs, pass_zero=True) + + >>> # Frequency response + >>> freq, h = signal.freqz(b, a, fs=fs) + >>> response = abs(h) + >>> # To avoid divide by zero when graphing + >>> response[response == 0] = 1e-20 + >>> # Plot + >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True) + >>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue') + >>> ax[0].set_title("Frequency Response") + >>> ax[0].set_ylabel("Amplitude [dB]", color='blue') + >>> ax[0].set_xlim([0, 500]) + >>> ax[0].set_ylim([-80, 10]) + >>> ax[0].grid(True) + >>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green') + >>> ax[1].set_ylabel("Phase [deg]", color='green') + >>> ax[1].set_xlabel("Frequency [Hz]") + >>> ax[1].set_xlim([0, 500]) + >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) + >>> ax[1].set_ylim([-90, 90]) + >>> ax[1].grid(True) + >>> plt.show() + """ + + # Convert w0, Q, and fs to float + w0 = float(w0) + Q = float(Q) + fs = _validate_fs(fs, allow_none=False) + + # Check for invalid cutoff frequency or filter type + ftype = ftype.lower() + if not 0 < w0 < fs / 2: + raise ValueError(f"w0 must be between 0 and {fs / 2}" + f" (Nyquist), but given {w0}.") + if ftype not in ('notch', 'peak'): + raise ValueError('ftype must be either notch or peak.') + + # Compute the order of the filter + N = round(fs / w0) + + # Check for cutoff frequency divisibility + if abs(w0 - fs/N)/fs > 1e-14: + raise ValueError('fs must be divisible by w0.') + + # Compute frequency in radians and filter bandwidth + # Eq. 11.3.1 (p. 574) from reference [1] + w0 = (2 * np.pi * w0) / fs + w_delta = w0 / Q + + # Define base gain values depending on notch or peak filter + # Compute -3dB attenuation + # Eqs. 11.4.1 and 11.4.2 (p. 582) from reference [1] + if ftype == 'notch': + G0, G = 1, 0 + elif ftype == 'peak': + G0, G = 0, 1 + + # Compute beta according to Eq. 11.5.3 (p. 591) from reference [1]. Due to + # assuming a -3 dB attenuation value, i.e, assuming GB = 1 / np.sqrt(2), + # the following term simplifies to: + # np.sqrt((GB**2 - G0**2) / (G**2 - GB**2)) = 1 + beta = np.tan(N * w_delta / 4) + + # Compute filter coefficients + # Eq 11.5.1 (p. 590) variables a, b, c from reference [1] + ax = (1 - beta) / (1 + beta) + bx = (G0 + G * beta) / (1 + beta) + cx = (G0 - G * beta) / (1 + beta) + + # Last coefficients are negative to get peaking comb that passes zero or + # notching comb that doesn't. + negative_coef = ((ftype == 'peak' and pass_zero) or + (ftype == 'notch' and not pass_zero)) + + # Compute numerator coefficients + # Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1] + # b - cz^-N or b + cz^-N + b = np.zeros(N + 1) + b[0] = bx + if negative_coef: + b[-1] = -cx + else: + b[-1] = +cx + + # Compute denominator coefficients + # Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1] + # 1 - az^-N or 1 + az^-N + a = np.zeros(N + 1) + a[0] = 1 + if negative_coef: + a[-1] = -ax + else: + a[-1] = +ax + + return b, a + + +def _hz_to_erb(hz): + """ + Utility for converting from frequency (Hz) to the + Equivalent Rectangular Bandwidth (ERB) scale + ERB = frequency / EarQ + minBW + """ + EarQ = 9.26449 + minBW = 24.7 + return hz / EarQ + minBW + + +def gammatone(freq, ftype, order=None, numtaps=None, fs=None): + """ + Gammatone filter design. + + This function computes the coefficients of an FIR or IIR gammatone + digital filter [1]_. + + Parameters + ---------- + freq : float + Center frequency of the filter (expressed in the same units + as `fs`). + ftype : {'fir', 'iir'} + The type of filter the function generates. If 'fir', the function + will generate an Nth order FIR gammatone filter. If 'iir', the + function will generate an 8th order digital IIR filter, modeled as + as 4th order gammatone filter. + order : int, optional + The order of the filter. Only used when ``ftype='fir'``. + Default is 4 to model the human auditory system. Must be between + 0 and 24. + numtaps : int, optional + Length of the filter. Only used when ``ftype='fir'``. + Default is ``fs*0.015`` if `fs` is greater than 1000, + 15 if `fs` is less than or equal to 1000. + fs : float, optional + The sampling frequency of the signal. `freq` must be between + 0 and ``fs/2``. Default is 2. + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials of the filter. + + Raises + ------ + ValueError + If `freq` is less than or equal to 0 or greater than or equal to + ``fs/2``, if `ftype` is not 'fir' or 'iir', if `order` is less than + or equal to 0 or greater than 24 when ``ftype='fir'`` + + See Also + -------- + firwin + iirfilter + + References + ---------- + .. [1] Slaney, Malcolm, "An Efficient Implementation of the + Patterson-Holdsworth Auditory Filter Bank", Apple Computer + Technical Report 35, 1993, pp.3-8, 34-39. + + Examples + -------- + 16-sample 4th order FIR Gammatone filter centered at 440 Hz + + >>> from scipy import signal + >>> signal.gammatone(440, 'fir', numtaps=16, fs=16000) + (array([ 0.00000000e+00, 2.22196719e-07, 1.64942101e-06, 4.99298227e-06, + 1.01993969e-05, 1.63125770e-05, 2.14648940e-05, 2.29947263e-05, + 1.76776931e-05, 2.04980537e-06, -2.72062858e-05, -7.28455299e-05, + -1.36651076e-04, -2.19066855e-04, -3.18905076e-04, -4.33156712e-04]), + [1.0]) + + IIR Gammatone filter centered at 440 Hz + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> fc, fs = 440, 16000 + >>> b, a = signal.gammatone(fc, 'iir', fs=fs) + >>> w, h = signal.freqz(b, a) + >>> plt.plot(w * fs / (2 * np.pi), 20 * np.log10(abs(h))) + >>> plt.xscale('log') + >>> plt.title('Gammatone filter frequency response') + >>> plt.xlabel('Frequency [Hz]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(fc, color='green') # cutoff frequency + >>> plt.show() + """ + # Converts freq to float + freq = float(freq) + + # Set sampling rate if not passed + if fs is None: + fs = 2 + fs = _validate_fs(fs, allow_none=False) + + # Check for invalid cutoff frequency or filter type + ftype = ftype.lower() + filter_types = ['fir', 'iir'] + if not 0 < freq < fs / 2: + raise ValueError(f"The frequency must be between 0 and {fs / 2}" + f" (Nyquist), but given {freq}.") + if ftype not in filter_types: + raise ValueError('ftype must be either fir or iir.') + + # Calculate FIR gammatone filter + if ftype == 'fir': + # Set order and numtaps if not passed + if order is None: + order = 4 + order = operator.index(order) + + if numtaps is None: + numtaps = max(int(fs * 0.015), 15) + numtaps = operator.index(numtaps) + + # Check for invalid order + if not 0 < order <= 24: + raise ValueError("Invalid order: order must be > 0 and <= 24.") + + # Gammatone impulse response settings + t = np.arange(numtaps) / fs + bw = 1.019 * _hz_to_erb(freq) + + # Calculate the FIR gammatone filter + b = (t ** (order - 1)) * np.exp(-2 * np.pi * bw * t) + b *= np.cos(2 * np.pi * freq * t) + + # Scale the FIR filter so the frequency response is 1 at cutoff + scale_factor = 2 * (2 * np.pi * bw) ** (order) + scale_factor /= float_factorial(order - 1) + scale_factor /= fs + b *= scale_factor + a = [1.0] + + # Calculate IIR gammatone filter + elif ftype == 'iir': + # Raise warning if order and/or numtaps is passed + if order is not None: + warnings.warn('order is not used for IIR gammatone filter.', stacklevel=2) + if numtaps is not None: + warnings.warn('numtaps is not used for IIR gammatone filter.', stacklevel=2) + + # Gammatone impulse response settings + T = 1./fs + bw = 2 * np.pi * 1.019 * _hz_to_erb(freq) + fr = 2 * freq * np.pi * T + bwT = bw * T + + # Calculate the gain to normalize the volume at the center frequency + g1 = -2 * np.exp(2j * fr) * T + g2 = 2 * np.exp(-(bwT) + 1j * fr) * T + g3 = np.sqrt(3 + 2 ** (3 / 2)) * np.sin(fr) + g4 = np.sqrt(3 - 2 ** (3 / 2)) * np.sin(fr) + g5 = np.exp(2j * fr) + + g = g1 + g2 * (np.cos(fr) - g4) + g *= (g1 + g2 * (np.cos(fr) + g4)) + g *= (g1 + g2 * (np.cos(fr) - g3)) + g *= (g1 + g2 * (np.cos(fr) + g3)) + g /= ((-2 / np.exp(2 * bwT) - 2 * g5 + 2 * (1 + g5) / np.exp(bwT)) ** 4) + g = np.abs(g) + + # Create empty filter coefficient lists + b = np.empty(5) + a = np.empty(9) + + # Calculate the numerator coefficients + b[0] = (T ** 4) / g + b[1] = -4 * T ** 4 * np.cos(fr) / np.exp(bw * T) / g + b[2] = 6 * T ** 4 * np.cos(2 * fr) / np.exp(2 * bw * T) / g + b[3] = -4 * T ** 4 * np.cos(3 * fr) / np.exp(3 * bw * T) / g + b[4] = T ** 4 * np.cos(4 * fr) / np.exp(4 * bw * T) / g + + # Calculate the denominator coefficients + a[0] = 1 + a[1] = -8 * np.cos(fr) / np.exp(bw * T) + a[2] = 4 * (4 + 3 * np.cos(2 * fr)) / np.exp(2 * bw * T) + a[3] = -8 * (6 * np.cos(fr) + np.cos(3 * fr)) + a[3] /= np.exp(3 * bw * T) + a[4] = 2 * (18 + 16 * np.cos(2 * fr) + np.cos(4 * fr)) + a[4] /= np.exp(4 * bw * T) + a[5] = -8 * (6 * np.cos(fr) + np.cos(3 * fr)) + a[5] /= np.exp(5 * bw * T) + a[6] = 4 * (4 + 3 * np.cos(2 * fr)) / np.exp(6 * bw * T) + a[7] = -8 * np.cos(fr) / np.exp(7 * bw * T) + a[8] = np.exp(-8 * bw * T) + + return b, a + + +filter_dict = {'butter': [buttap, buttord], + 'butterworth': [buttap, buttord], + + 'cauer': [ellipap, ellipord], + 'elliptic': [ellipap, ellipord], + 'ellip': [ellipap, ellipord], + + 'bessel': [besselap], + 'bessel_phase': [besselap], + 'bessel_delay': [besselap], + 'bessel_mag': [besselap], + + 'cheby1': [cheb1ap, cheb1ord], + 'chebyshev1': [cheb1ap, cheb1ord], + 'chebyshevi': [cheb1ap, cheb1ord], + + 'cheby2': [cheb2ap, cheb2ord], + 'chebyshev2': [cheb2ap, cheb2ord], + 'chebyshevii': [cheb2ap, cheb2ord], + } + +band_dict = {'band': 'bandpass', + 'bandpass': 'bandpass', + 'pass': 'bandpass', + 'bp': 'bandpass', + + 'bs': 'bandstop', + 'bandstop': 'bandstop', + 'bands': 'bandstop', + 'stop': 'bandstop', + + 'l': 'lowpass', + 'low': 'lowpass', + 'lowpass': 'lowpass', + 'lp': 'lowpass', + + 'high': 'highpass', + 'highpass': 'highpass', + 'h': 'highpass', + 'hp': 'highpass', + } + +bessel_norms = {'bessel': 'phase', + 'bessel_phase': 'phase', + 'bessel_delay': 'delay', + 'bessel_mag': 'mag'} diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_fir_filter_design.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_fir_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..b46d1bcc72f5fb03965b66cf131c25c2ca2f5585 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_fir_filter_design.py @@ -0,0 +1,1286 @@ +"""Functions for FIR filter design.""" + +from math import ceil, log +import operator +import warnings +from typing import Literal + +import numpy as np +from numpy.fft import irfft, fft, ifft +from scipy.special import sinc +from scipy.linalg import (toeplitz, hankel, solve, LinAlgError, LinAlgWarning, + lstsq) +from scipy.signal._arraytools import _validate_fs + +from . import _sigtools + +__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord', + 'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase'] + + +# Some notes on function parameters: +# +# `cutoff` and `width` are given as numbers between 0 and 1. These are +# relative frequencies, expressed as a fraction of the Nyquist frequency. +# For example, if the Nyquist frequency is 2 KHz, then width=0.15 is a width +# of 300 Hz. +# +# The `order` of a FIR filter is one less than the number of taps. +# This is a potential source of confusion, so in the following code, +# we will always use the number of taps as the parameterization of +# the 'size' of the filter. The "number of taps" means the number +# of coefficients, which is the same as the length of the impulse +# response of the filter. + + +def kaiser_beta(a): + """Compute the Kaiser parameter `beta`, given the attenuation `a`. + + Parameters + ---------- + a : float + The desired attenuation in the stopband and maximum ripple in + the passband, in dB. This should be a *positive* number. + + Returns + ------- + beta : float + The `beta` parameter to be used in the formula for a Kaiser window. + + References + ---------- + Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476. + + Examples + -------- + Suppose we want to design a lowpass filter, with 65 dB attenuation + in the stop band. The Kaiser window parameter to be used in the + window method is computed by ``kaiser_beta(65)``: + + >>> from scipy.signal import kaiser_beta + >>> kaiser_beta(65) + 6.20426 + + """ + if a > 50: + beta = 0.1102 * (a - 8.7) + elif a > 21: + beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21) + else: + beta = 0.0 + return beta + + +def kaiser_atten(numtaps, width): + """Compute the attenuation of a Kaiser FIR filter. + + Given the number of taps `N` and the transition width `width`, compute the + attenuation `a` in dB, given by Kaiser's formula: + + a = 2.285 * (N - 1) * pi * width + 7.95 + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. + width : float + The desired width of the transition region between passband and + stopband (or, in general, at any discontinuity) for the filter, + expressed as a fraction of the Nyquist frequency. + + Returns + ------- + a : float + The attenuation of the ripple, in dB. + + See Also + -------- + kaiserord, kaiser_beta + + Examples + -------- + Suppose we want to design a FIR filter using the Kaiser window method + that will have 211 taps and a transition width of 9 Hz for a signal that + is sampled at 480 Hz. Expressed as a fraction of the Nyquist frequency, + the width is 9/(0.5*480) = 0.0375. The approximate attenuation (in dB) + is computed as follows: + + >>> from scipy.signal import kaiser_atten + >>> kaiser_atten(211, 0.0375) + 64.48099630593983 + + """ + a = 2.285 * (numtaps - 1) * np.pi * width + 7.95 + return a + + +def kaiserord(ripple, width): + """ + Determine the filter window parameters for the Kaiser window method. + + The parameters returned by this function are generally used to create + a finite impulse response filter using the window method, with either + `firwin` or `firwin2`. + + Parameters + ---------- + ripple : float + Upper bound for the deviation (in dB) of the magnitude of the + filter's frequency response from that of the desired filter (not + including frequencies in any transition intervals). That is, if w + is the frequency expressed as a fraction of the Nyquist frequency, + A(w) is the actual frequency response of the filter and D(w) is the + desired frequency response, the design requirement is that:: + + abs(A(w) - D(w))) < 10**(-ripple/20) + + for 0 <= w <= 1 and w not in a transition interval. + width : float + Width of transition region, normalized so that 1 corresponds to pi + radians / sample. That is, the frequency is expressed as a fraction + of the Nyquist frequency. + + Returns + ------- + numtaps : int + The length of the Kaiser window. + beta : float + The beta parameter for the Kaiser window. + + See Also + -------- + kaiser_beta, kaiser_atten + + Notes + ----- + There are several ways to obtain the Kaiser window: + + - ``signal.windows.kaiser(numtaps, beta, sym=True)`` + - ``signal.get_window(beta, numtaps)`` + - ``signal.get_window(('kaiser', beta), numtaps)`` + + The empirical equations discovered by Kaiser are used. + + References + ---------- + Oppenheim, Schafer, "Discrete-Time Signal Processing", pp.475-476. + + Examples + -------- + We will use the Kaiser window method to design a lowpass FIR filter + for a signal that is sampled at 1000 Hz. + + We want at least 65 dB rejection in the stop band, and in the pass + band the gain should vary no more than 0.5%. + + We want a cutoff frequency of 175 Hz, with a transition between the + pass band and the stop band of 24 Hz. That is, in the band [0, 163], + the gain varies no more than 0.5%, and in the band [187, 500], the + signal is attenuated by at least 65 dB. + + >>> import numpy as np + >>> from scipy.signal import kaiserord, firwin, freqz + >>> import matplotlib.pyplot as plt + >>> fs = 1000.0 + >>> cutoff = 175 + >>> width = 24 + + The Kaiser method accepts just a single parameter to control the pass + band ripple and the stop band rejection, so we use the more restrictive + of the two. In this case, the pass band ripple is 0.005, or 46.02 dB, + so we will use 65 dB as the design parameter. + + Use `kaiserord` to determine the length of the filter and the + parameter for the Kaiser window. + + >>> numtaps, beta = kaiserord(65, width/(0.5*fs)) + >>> numtaps + 167 + >>> beta + 6.20426 + + Use `firwin` to create the FIR filter. + + >>> taps = firwin(numtaps, cutoff, window=('kaiser', beta), + ... scale=False, fs=fs) + + Compute the frequency response of the filter. ``w`` is the array of + frequencies, and ``h`` is the corresponding complex array of frequency + responses. + + >>> w, h = freqz(taps, worN=8000) + >>> w *= 0.5*fs/np.pi # Convert w to Hz. + + Compute the deviation of the magnitude of the filter's response from + that of the ideal lowpass filter. Values in the transition region are + set to ``nan``, so they won't appear in the plot. + + >>> ideal = w < cutoff # The "ideal" frequency response. + >>> deviation = np.abs(np.abs(h) - ideal) + >>> deviation[(w > cutoff - 0.5*width) & (w < cutoff + 0.5*width)] = np.nan + + Plot the deviation. A close look at the left end of the stop band shows + that the requirement for 65 dB attenuation is violated in the first lobe + by about 0.125 dB. This is not unusual for the Kaiser window method. + + >>> plt.plot(w, 20*np.log10(np.abs(deviation))) + >>> plt.xlim(0, 0.5*fs) + >>> plt.ylim(-90, -60) + >>> plt.grid(alpha=0.25) + >>> plt.axhline(-65, color='r', ls='--', alpha=0.3) + >>> plt.xlabel('Frequency (Hz)') + >>> plt.ylabel('Deviation from ideal (dB)') + >>> plt.title('Lowpass Filter Frequency Response') + >>> plt.show() + + """ + A = abs(ripple) # in case somebody is confused as to what's meant + if A < 8: + # Formula for N is not valid in this range. + raise ValueError("Requested maximum ripple attenuation " + f"{A:f} is too small for the Kaiser formula.") + beta = kaiser_beta(A) + + # Kaiser's formula (as given in Oppenheim and Schafer) is for the filter + # order, so we have to add 1 to get the number of taps. + numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1 + + return int(ceil(numtaps)), beta + + +def firwin(numtaps, cutoff, *, width=None, window='hamming', pass_zero=True, + scale=True, fs=None): + """ + FIR filter design using the window method. + + This function computes the coefficients of a finite impulse response + filter. The filter will have linear phase; it will be Type I if + `numtaps` is odd and Type II if `numtaps` is even. + + Type II filters always have zero response at the Nyquist frequency, so a + ValueError exception is raised if firwin is called with `numtaps` even and + having a passband whose right end is at the Nyquist frequency. + + Parameters + ---------- + numtaps : int + Length of the filter (number of coefficients, i.e. the filter + order + 1). `numtaps` must be odd if a passband includes the + Nyquist frequency. + cutoff : float or 1-D array_like + Cutoff frequency of filter (expressed in the same units as `fs`) + OR an array of cutoff frequencies (that is, band edges). In the + former case, as a float, the cutoff frequency should correspond + with the half-amplitude point, where the attenuation will be -6dB. + In the latter case, the frequencies in `cutoff` should be positive + and monotonically increasing between 0 and `fs/2`. The values 0 + and `fs/2` must not be included in `cutoff`. It should be noted + that this is different than the behavior of `scipy.signal.iirdesign`, + where the cutoff is the half-power point (-3dB). + width : float or None, optional + If `width` is not None, then assume it is the approximate width + of the transition region (expressed in the same units as `fs`) + for use in Kaiser FIR filter design. In this case, the `window` + argument is ignored. + window : string or tuple of string and parameter values, optional + Desired window to use. See `scipy.signal.get_window` for a list + of windows and required parameters. + pass_zero : {True, False, 'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional + If True, the gain at the frequency 0 (i.e., the "DC gain") is 1. + If False, the DC gain is 0. Can also be a string argument for the + desired filter type (equivalent to ``btype`` in IIR design functions). + + .. versionadded:: 1.3.0 + Support for string arguments. + scale : bool, optional + Set to True to scale the coefficients so that the frequency + response is exactly unity at a certain frequency. + That frequency is either: + + - 0 (DC) if the first passband starts at 0 (i.e. pass_zero + is True) + - `fs/2` (the Nyquist frequency) if the first passband ends at + `fs/2` (i.e the filter is a single band highpass filter); + center of first passband otherwise + + fs : float, optional + The sampling frequency of the signal. Each frequency in `cutoff` + must be between 0 and ``fs/2``. Default is 2. + + Returns + ------- + h : (numtaps,) ndarray + Coefficients of length `numtaps` FIR filter. + + Raises + ------ + ValueError + If any value in `cutoff` is less than or equal to 0 or greater + than or equal to ``fs/2``, if the values in `cutoff` are not strictly + monotonically increasing, or if `numtaps` is even but a passband + includes the Nyquist frequency. + + See Also + -------- + firwin2 + firls + minimum_phase + remez + + Examples + -------- + Low-pass from 0 to f: + + >>> from scipy import signal + >>> numtaps = 3 + >>> f = 0.1 + >>> signal.firwin(numtaps, f) + array([ 0.06799017, 0.86401967, 0.06799017]) + + Use a specific window function: + + >>> signal.firwin(numtaps, f, window='nuttall') + array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04]) + + High-pass ('stop' from 0 to f): + + >>> signal.firwin(numtaps, f, pass_zero=False) + array([-0.00859313, 0.98281375, -0.00859313]) + + Band-pass: + + >>> f1, f2 = 0.1, 0.2 + >>> signal.firwin(numtaps, [f1, f2], pass_zero=False) + array([ 0.06301614, 0.88770441, 0.06301614]) + + Band-stop: + + >>> signal.firwin(numtaps, [f1, f2]) + array([-0.00801395, 1.0160279 , -0.00801395]) + + Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]): + + >>> f3, f4 = 0.3, 0.4 + >>> signal.firwin(numtaps, [f1, f2, f3, f4]) + array([-0.01376344, 1.02752689, -0.01376344]) + + Multi-band (passbands are [f1, f2] and [f3,f4]): + + >>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False) + array([ 0.04890915, 0.91284326, 0.04890915]) + + """ + # The major enhancements to this function added in November 2010 were + # developed by Tom Krauss (see ticket #902). + fs = _validate_fs(fs, allow_none=True) + fs = 2 if fs is None else fs + + nyq = 0.5 * fs + + cutoff = np.atleast_1d(cutoff) / float(nyq) + + # Check for invalid input. + if cutoff.ndim > 1: + raise ValueError("The cutoff argument must be at most " + "one-dimensional.") + if cutoff.size == 0: + raise ValueError("At least one cutoff frequency must be given.") + if cutoff.min() <= 0 or cutoff.max() >= 1: + raise ValueError("Invalid cutoff frequency: frequencies must be " + "greater than 0 and less than fs/2.") + if np.any(np.diff(cutoff) <= 0): + raise ValueError("Invalid cutoff frequencies: the frequencies " + "must be strictly increasing.") + + if width is not None: + # A width was given. Find the beta parameter of the Kaiser window + # and set `window`. This overrides the value of `window` passed in. + atten = kaiser_atten(numtaps, float(width) / nyq) + beta = kaiser_beta(atten) + window = ('kaiser', beta) + + if isinstance(pass_zero, str): + if pass_zero in ('bandstop', 'lowpass'): + if pass_zero == 'lowpass': + if cutoff.size != 1: + raise ValueError('cutoff must have one element if ' + f'pass_zero=="lowpass", got {cutoff.shape}') + elif cutoff.size <= 1: + raise ValueError('cutoff must have at least two elements if ' + f'pass_zero=="bandstop", got {cutoff.shape}') + pass_zero = True + elif pass_zero in ('bandpass', 'highpass'): + if pass_zero == 'highpass': + if cutoff.size != 1: + raise ValueError('cutoff must have one element if ' + f'pass_zero=="highpass", got {cutoff.shape}') + elif cutoff.size <= 1: + raise ValueError('cutoff must have at least two elements if ' + f'pass_zero=="bandpass", got {cutoff.shape}') + pass_zero = False + else: + raise ValueError('pass_zero must be True, False, "bandpass", ' + '"lowpass", "highpass", or "bandstop", got ' + f'{pass_zero}') + pass_zero = bool(operator.index(pass_zero)) # ensure bool-like + + pass_nyquist = bool(cutoff.size & 1) ^ pass_zero + if pass_nyquist and numtaps % 2 == 0: + raise ValueError("A filter with an even number of coefficients must " + "have zero response at the Nyquist frequency.") + + # Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff + # is even, and each pair in cutoff corresponds to passband. + cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist)) + + # `bands` is a 2-D array; each row gives the left and right edges of + # a passband. + bands = cutoff.reshape(-1, 2) + + # Build up the coefficients. + alpha = 0.5 * (numtaps - 1) + m = np.arange(0, numtaps) - alpha + h = 0 + for left, right in bands: + h += right * sinc(right * m) + h -= left * sinc(left * m) + + # Get and apply the window function. + from .windows import get_window + win = get_window(window, numtaps, fftbins=False) + h *= win + + # Now handle scaling if desired. + if scale: + # Get the first passband. + left, right = bands[0] + if left == 0: + scale_frequency = 0.0 + elif right == 1: + scale_frequency = 1.0 + else: + scale_frequency = 0.5 * (left + right) + c = np.cos(np.pi * m * scale_frequency) + s = np.sum(h * c) + h /= s + + return h + + +# Original version of firwin2 from scipy ticket #457, submitted by "tash". +# +# Rewritten by Warren Weckesser, 2010. +def firwin2(numtaps, freq, gain, *, nfreqs=None, window='hamming', + antisymmetric=False, fs=None): + """ + FIR filter design using the window method. + + From the given frequencies `freq` and corresponding gains `gain`, + this function constructs an FIR filter with linear phase and + (approximately) the given frequency response. + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. `numtaps` must be less than + `nfreqs`. + freq : array_like, 1-D + The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being + Nyquist. The Nyquist frequency is half `fs`. + The values in `freq` must be nondecreasing. A value can be repeated + once to implement a discontinuity. The first value in `freq` must + be 0, and the last value must be ``fs/2``. Values 0 and ``fs/2`` must + not be repeated. + gain : array_like + The filter gains at the frequency sampling points. Certain + constraints to gain values, depending on the filter type, are applied, + see Notes for details. + nfreqs : int, optional + The size of the interpolation mesh used to construct the filter. + For most efficient behavior, this should be a power of 2 plus 1 + (e.g, 129, 257, etc). The default is one more than the smallest + power of 2 that is not less than `numtaps`. `nfreqs` must be greater + than `numtaps`. + window : string or (string, float) or float, or None, optional + Window function to use. Default is "hamming". See + `scipy.signal.get_window` for the complete list of possible values. + If None, no window function is applied. + antisymmetric : bool, optional + Whether resulting impulse response is symmetric/antisymmetric. + See Notes for more details. + fs : float, optional + The sampling frequency of the signal. Each frequency in `cutoff` + must be between 0 and ``fs/2``. Default is 2. + + Returns + ------- + taps : ndarray + The filter coefficients of the FIR filter, as a 1-D array of length + `numtaps`. + + See Also + -------- + firls + firwin + minimum_phase + remez + + Notes + ----- + From the given set of frequencies and gains, the desired response is + constructed in the frequency domain. The inverse FFT is applied to the + desired response to create the associated convolution kernel, and the + first `numtaps` coefficients of this kernel, scaled by `window`, are + returned. + + The FIR filter will have linear phase. The type of filter is determined by + the value of 'numtaps` and `antisymmetric` flag. + There are four possible combinations: + + - odd `numtaps`, `antisymmetric` is False, type I filter is produced + - even `numtaps`, `antisymmetric` is False, type II filter is produced + - odd `numtaps`, `antisymmetric` is True, type III filter is produced + - even `numtaps`, `antisymmetric` is True, type IV filter is produced + + Magnitude response of all but type I filters are subjects to following + constraints: + + - type II -- zero at the Nyquist frequency + - type III -- zero at zero and Nyquist frequencies + - type IV -- zero at zero frequency + + .. versionadded:: 0.9.0 + + References + ---------- + .. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal + Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989). + (See, for example, Section 7.4.) + + .. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital + Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm + + Examples + -------- + A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and + that decreases linearly on [0.5, 1.0] from 1 to 0: + + >>> from scipy import signal + >>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) + >>> print(taps[72:78]) + [-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961] + + """ + fs = _validate_fs(fs, allow_none=True) + fs = 2 if fs is None else fs + nyq = 0.5 * fs + + if len(freq) != len(gain): + raise ValueError('freq and gain must be of same length.') + + if nfreqs is not None and numtaps >= nfreqs: + raise ValueError(('ntaps must be less than nfreqs, but firwin2 was ' + 'called with ntaps=%d and nfreqs=%s') % + (numtaps, nfreqs)) + + if freq[0] != 0 or freq[-1] != nyq: + raise ValueError('freq must start with 0 and end with fs/2.') + d = np.diff(freq) + if (d < 0).any(): + raise ValueError('The values in freq must be nondecreasing.') + d2 = d[:-1] + d[1:] + if (d2 == 0).any(): + raise ValueError('A value in freq must not occur more than twice.') + if freq[1] == 0: + raise ValueError('Value 0 must not be repeated in freq') + if freq[-2] == nyq: + raise ValueError('Value fs/2 must not be repeated in freq') + + if antisymmetric: + if numtaps % 2 == 0: + ftype = 4 + else: + ftype = 3 + else: + if numtaps % 2 == 0: + ftype = 2 + else: + ftype = 1 + + if ftype == 2 and gain[-1] != 0.0: + raise ValueError("A Type II filter must have zero gain at the " + "Nyquist frequency.") + elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0): + raise ValueError("A Type III filter must have zero gain at zero " + "and Nyquist frequencies.") + elif ftype == 4 and gain[0] != 0.0: + raise ValueError("A Type IV filter must have zero gain at zero " + "frequency.") + + if nfreqs is None: + nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2))) + + if (d == 0).any(): + # Tweak any repeated values in freq so that interp works. + freq = np.array(freq, copy=True) + eps = np.finfo(float).eps * nyq + for k in range(len(freq) - 1): + if freq[k] == freq[k + 1]: + freq[k] = freq[k] - eps + freq[k + 1] = freq[k + 1] + eps + # Check if freq is strictly increasing after tweak + d = np.diff(freq) + if (d <= 0).any(): + raise ValueError("freq cannot contain numbers that are too close " + "(within eps * (fs/2): " + f"{eps}) to a repeated value") + + # Linearly interpolate the desired response on a uniform mesh `x`. + x = np.linspace(0.0, nyq, nfreqs) + fx = np.interp(x, freq, gain) + + # Adjust the phases of the coefficients so that the first `ntaps` of the + # inverse FFT are the desired filter coefficients. + shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq) + if ftype > 2: + shift *= 1j + + fx2 = fx * shift + + # Use irfft to compute the inverse FFT. + out_full = irfft(fx2) + + if window is not None: + # Create the window to apply to the filter coefficients. + from .windows import get_window + wind = get_window(window, numtaps, fftbins=False) + else: + wind = 1 + + # Keep only the first `numtaps` coefficients in `out`, and multiply by + # the window. + out = out_full[:numtaps] * wind + + if ftype == 3: + out[out.size // 2] = 0.0 + + return out + + +def remez(numtaps, bands, desired, *, weight=None, type='bandpass', + maxiter=25, grid_density=16, fs=None): + """ + Calculate the minimax optimal filter using the Remez exchange algorithm. + + Calculate the filter-coefficients for the finite impulse response + (FIR) filter whose transfer function minimizes the maximum error + between the desired gain and the realized gain in the specified + frequency bands using the Remez exchange algorithm. + + Parameters + ---------- + numtaps : int + The desired number of taps in the filter. The number of taps is + the number of terms in the filter, or the filter order plus one. + bands : array_like + A monotonic sequence containing the band edges. + All elements must be non-negative and less than half the sampling + frequency as given by `fs`. + desired : array_like + A sequence half the size of bands containing the desired gain + in each of the specified bands. + weight : array_like, optional + A relative weighting to give to each band region. The length of + `weight` has to be half the length of `bands`. + type : {'bandpass', 'differentiator', 'hilbert'}, optional + The type of filter: + + * 'bandpass' : flat response in bands. This is the default. + + * 'differentiator' : frequency proportional response in bands. + + * 'hilbert' : filter with odd symmetry, that is, type III + (for even order) or type IV (for odd order) + linear phase filters. + + maxiter : int, optional + Maximum number of iterations of the algorithm. Default is 25. + grid_density : int, optional + Grid density. The dense grid used in `remez` is of size + ``(numtaps + 1) * grid_density``. Default is 16. + fs : float, optional + The sampling frequency of the signal. Default is 1. + + Returns + ------- + out : ndarray + A rank-1 array containing the coefficients of the optimal + (in a minimax sense) filter. + + See Also + -------- + firls + firwin + firwin2 + minimum_phase + + References + ---------- + .. [1] J. H. McClellan and T. W. Parks, "A unified approach to the + design of optimum FIR linear phase digital filters", + IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973. + .. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer + Program for Designing Optimum FIR Linear Phase Digital + Filters", IEEE Trans. Audio Electroacoust., vol. AU-21, + pp. 506-525, 1973. + + Examples + -------- + In these examples, `remez` is used to design low-pass, high-pass, + band-pass and band-stop filters. The parameters that define each filter + are the filter order, the band boundaries, the transition widths of the + boundaries, the desired gains in each band, and the sampling frequency. + + We'll use a sample frequency of 22050 Hz in all the examples. In each + example, the desired gain in each band is either 0 (for a stop band) + or 1 (for a pass band). + + `freqz` is used to compute the frequency response of each filter, and + the utility function ``plot_response`` defined below is used to plot + the response. + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> fs = 22050 # Sample rate, Hz + + >>> def plot_response(w, h, title): + ... "Utility function to plot response functions" + ... fig = plt.figure() + ... ax = fig.add_subplot(111) + ... ax.plot(w, 20*np.log10(np.abs(h))) + ... ax.set_ylim(-40, 5) + ... ax.grid(True) + ... ax.set_xlabel('Frequency (Hz)') + ... ax.set_ylabel('Gain (dB)') + ... ax.set_title(title) + + The first example is a low-pass filter, with cutoff frequency 8 kHz. + The filter length is 325, and the transition width from pass to stop + is 100 Hz. + + >>> cutoff = 8000.0 # Desired cutoff frequency, Hz + >>> trans_width = 100 # Width of transition from pass to stop, Hz + >>> numtaps = 325 # Size of the FIR filter. + >>> taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs], + ... [1, 0], fs=fs) + >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) + >>> plot_response(w, h, "Low-pass Filter") + >>> plt.show() + + This example shows a high-pass filter: + + >>> cutoff = 2000.0 # Desired cutoff frequency, Hz + >>> trans_width = 250 # Width of transition from pass to stop, Hz + >>> numtaps = 125 # Size of the FIR filter. + >>> taps = signal.remez(numtaps, [0, cutoff - trans_width, cutoff, 0.5*fs], + ... [0, 1], fs=fs) + >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) + >>> plot_response(w, h, "High-pass Filter") + >>> plt.show() + + This example shows a band-pass filter with a pass-band from 2 kHz to + 5 kHz. The transition width is 260 Hz and the length of the filter + is 63, which is smaller than in the other examples: + + >>> band = [2000, 5000] # Desired pass band, Hz + >>> trans_width = 260 # Width of transition from pass to stop, Hz + >>> numtaps = 63 # Size of the FIR filter. + >>> edges = [0, band[0] - trans_width, band[0], band[1], + ... band[1] + trans_width, 0.5*fs] + >>> taps = signal.remez(numtaps, edges, [0, 1, 0], fs=fs) + >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) + >>> plot_response(w, h, "Band-pass Filter") + >>> plt.show() + + The low order leads to higher ripple and less steep transitions. + + The next example shows a band-stop filter. + + >>> band = [6000, 8000] # Desired stop band, Hz + >>> trans_width = 200 # Width of transition from pass to stop, Hz + >>> numtaps = 175 # Size of the FIR filter. + >>> edges = [0, band[0] - trans_width, band[0], band[1], + ... band[1] + trans_width, 0.5*fs] + >>> taps = signal.remez(numtaps, edges, [1, 0, 1], fs=fs) + >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) + >>> plot_response(w, h, "Band-stop Filter") + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + fs = 1.0 if fs is None else fs + + # Convert type + try: + tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type] + except KeyError as e: + raise ValueError("Type must be 'bandpass', 'differentiator', " + "or 'hilbert'") from e + + # Convert weight + if weight is None: + weight = [1] * len(desired) + + bands = np.asarray(bands).copy() + return _sigtools._remez(numtaps, bands, desired, weight, tnum, fs, + maxiter, grid_density) + + +def firls(numtaps, bands, desired, *, weight=None, fs=None): + """ + FIR filter design using least-squares error minimization. + + Calculate the filter coefficients for the linear-phase finite + impulse response (FIR) filter which has the best approximation + to the desired frequency response described by `bands` and + `desired` in the least squares sense (i.e., the integral of the + weighted mean-squared error within the specified bands is + minimized). + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. `numtaps` must be odd. + bands : array_like + A monotonic nondecreasing sequence containing the band edges in + Hz. All elements must be non-negative and less than or equal to + the Nyquist frequency given by `nyq`. The bands are specified as + frequency pairs, thus, if using a 1D array, its length must be + even, e.g., `np.array([0, 1, 2, 3, 4, 5])`. Alternatively, the + bands can be specified as an nx2 sized 2D array, where n is the + number of bands, e.g, `np.array([[0, 1], [2, 3], [4, 5]])`. + desired : array_like + A sequence the same size as `bands` containing the desired gain + at the start and end point of each band. + weight : array_like, optional + A relative weighting to give to each band region when solving + the least squares problem. `weight` has to be half the size of + `bands`. + fs : float, optional + The sampling frequency of the signal. Each frequency in `bands` + must be between 0 and ``fs/2`` (inclusive). Default is 2. + + Returns + ------- + coeffs : ndarray + Coefficients of the optimal (in a least squares sense) FIR filter. + + See Also + -------- + firwin + firwin2 + minimum_phase + remez + + Notes + ----- + This implementation follows the algorithm given in [1]_. + As noted there, least squares design has multiple advantages: + + 1. Optimal in a least-squares sense. + 2. Simple, non-iterative method. + 3. The general solution can obtained by solving a linear + system of equations. + 4. Allows the use of a frequency dependent weighting function. + + This function constructs a Type I linear phase FIR filter, which + contains an odd number of `coeffs` satisfying for :math:`n < numtaps`: + + .. math:: coeffs(n) = coeffs(numtaps - 1 - n) + + The odd number of coefficients and filter symmetry avoid boundary + conditions that could otherwise occur at the Nyquist and 0 frequencies + (e.g., for Type II, III, or IV variants). + + .. versionadded:: 0.18 + + References + ---------- + .. [1] Ivan Selesnick, Linear-Phase Fir Filter Design By Least Squares. + OpenStax CNX. Aug 9, 2005. + https://eeweb.engineering.nyu.edu/iselesni/EL713/firls/firls.pdf + + Examples + -------- + We want to construct a band-pass filter. Note that the behavior in the + frequency ranges between our stop bands and pass bands is unspecified, + and thus may overshoot depending on the parameters of our filter: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> fig, axs = plt.subplots(2) + >>> fs = 10.0 # Hz + >>> desired = (0, 0, 1, 1, 0, 0) + >>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))): + ... fir_firls = signal.firls(73, bands, desired, fs=fs) + ... fir_remez = signal.remez(73, bands, desired[::2], fs=fs) + ... fir_firwin2 = signal.firwin2(73, bands, desired, fs=fs) + ... hs = list() + ... ax = axs[bi] + ... for fir in (fir_firls, fir_remez, fir_firwin2): + ... freq, response = signal.freqz(fir) + ... hs.append(ax.semilogy(0.5*fs*freq/np.pi, np.abs(response))[0]) + ... for band, gains in zip(zip(bands[::2], bands[1::2]), + ... zip(desired[::2], desired[1::2])): + ... ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2) + ... if bi == 0: + ... ax.legend(hs, ('firls', 'remez', 'firwin2'), + ... loc='lower center', frameon=False) + ... else: + ... ax.set_xlabel('Frequency (Hz)') + ... ax.grid(True) + ... ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude') + ... + >>> fig.tight_layout() + >>> plt.show() + + """ + fs = _validate_fs(fs, allow_none=True) + fs = 2 if fs is None else fs + nyq = 0.5 * fs + + numtaps = int(numtaps) + if numtaps % 2 == 0 or numtaps < 1: + raise ValueError("numtaps must be odd and >= 1") + M = (numtaps-1) // 2 + + # normalize bands 0->1 and make it 2 columns + nyq = float(nyq) + if nyq <= 0: + raise ValueError(f'nyq must be positive, got {nyq} <= 0.') + bands = np.asarray(bands).flatten() / nyq + if len(bands) % 2 != 0: + raise ValueError("bands must contain frequency pairs.") + if (bands < 0).any() or (bands > 1).any(): + raise ValueError("bands must be between 0 and 1 relative to Nyquist") + bands.shape = (-1, 2) + + # check remaining params + desired = np.asarray(desired).flatten() + if bands.size != desired.size: + raise ValueError( + f"desired must have one entry per frequency, got {desired.size} " + f"gains for {bands.size} frequencies." + ) + desired.shape = (-1, 2) + if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any(): + raise ValueError("bands must be monotonically nondecreasing and have " + "width > 0.") + if (bands[:-1, 1] > bands[1:, 0]).any(): + raise ValueError("bands must not overlap.") + if (desired < 0).any(): + raise ValueError("desired must be non-negative.") + if weight is None: + weight = np.ones(len(desired)) + weight = np.asarray(weight).flatten() + if len(weight) != len(desired): + raise ValueError("weight must be the same size as the number of " + f"band pairs ({len(bands)}).") + if (weight < 0).any(): + raise ValueError("weight must be non-negative.") + + # Set up the linear matrix equation to be solved, Qa = b + + # We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n) + # where Q1(k,n)=q(k-n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel. + + # We omit the factor of 0.5 above, instead adding it during coefficient + # calculation. + + # We also omit the 1/π from both Q and b equations, as they cancel + # during solving. + + # We have that: + # q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π) + # Using our normalization ω=πf and with a constant weight W over each + # interval f1->f2 we get: + # q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf + # integrated over each f1->f2 pair (i.e., value at f2 - value at f1). + n = np.arange(numtaps)[:, np.newaxis, np.newaxis] + q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weight) + + # Now we assemble our sum of Toeplitz and Hankel + Q1 = toeplitz(q[:M+1]) + Q2 = hankel(q[:M+1], q[M:]) + Q = Q1 + Q2 + + # Now for b(n) we have that: + # b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π) + # Using our normalization ω=πf and with a constant weight W over each + # interval and a linear term for D(ω) we get (over each f1->f2 interval): + # b(n) = W ∫ (mf+c)cos(πnf)df + # = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2 + # integrated over each f1->f2 pair (i.e., value at f2 - value at f1). + n = n[:M + 1] # only need this many coefficients here + # Choose m and c such that we are at the start and end weights + m = (np.diff(desired, axis=1) / np.diff(bands, axis=1)) + c = desired[:, [0]] - bands[:, [0]] * m + b = bands * (m*bands + c) * np.sinc(bands * n) + # Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0 + b[0] -= m * bands * bands / 2. + b[1:] += m * np.cos(n[1:] * np.pi * bands) / (np.pi * n[1:]) ** 2 + b = np.dot(np.diff(b, axis=2)[:, :, 0], weight) + + # Now we can solve the equation + try: # try the fast way + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + a = solve(Q, b, assume_a="pos", check_finite=False) + for ww in w: + if (ww.category == LinAlgWarning and + str(ww.message).startswith('Ill-conditioned matrix')): + raise LinAlgError(str(ww.message)) + except LinAlgError: # in case Q is rank deficient + # This is faster than pinvh, even though we don't explicitly use + # the symmetry here. gelsy was faster than gelsd and gelss in + # some non-exhaustive tests. + a = lstsq(Q, b, lapack_driver='gelsy')[0] + + # make coefficients symmetric (linear phase) + coeffs = np.hstack((a[:0:-1], 2 * a[0], a[1:])) + return coeffs + + +def _dhtm(mag): + """Compute the modified 1-D discrete Hilbert transform + + Parameters + ---------- + mag : ndarray + The magnitude spectrum. Should be 1-D with an even length, and + preferably a fast length for FFT/IFFT. + """ + # Adapted based on code by Niranjan Damera-Venkata, + # Brian L. Evans and Shawn R. McCaslin (see refs for `minimum_phase`) + sig = np.zeros(len(mag)) + # Leave Nyquist and DC at 0, knowing np.abs(fftfreq(N)[midpt]) == 0.5 + midpt = len(mag) // 2 + sig[1:midpt] = 1 + sig[midpt+1:] = -1 + # eventually if we want to support complex filters, we will need a + # np.abs() on the mag inside the log, and should remove the .real + recon = ifft(mag * np.exp(fft(sig * ifft(np.log(mag))))).real + return recon + + +def minimum_phase(h: np.ndarray, + method: Literal['homomorphic', 'hilbert'] = 'homomorphic', + n_fft: int | None = None, *, half: bool = True) -> np.ndarray: + """Convert a linear-phase FIR filter to minimum phase + + Parameters + ---------- + h : array + Linear-phase FIR filter coefficients. + method : {'hilbert', 'homomorphic'} + The provided methods are: + + 'homomorphic' (default) + This method [4]_ [5]_ works best with filters with an + odd number of taps, and the resulting minimum phase filter + will have a magnitude response that approximates the square + root of the original filter's magnitude response using half + the number of taps when ``half=True`` (default), or the + original magnitude spectrum using the same number of taps + when ``half=False``. + + 'hilbert' + This method [1]_ is designed to be used with equiripple + filters (e.g., from `remez`) with unity or zero gain + regions. + + n_fft : int + The number of points to use for the FFT. Should be at least a + few times larger than the signal length (see Notes). + half : bool + If ``True``, create a filter that is half the length of the original, with a + magnitude spectrum that is the square root of the original. If ``False``, + create a filter that is the same length as the original, with a magnitude + spectrum that is designed to match the original (only supported when + ``method='homomorphic'``). + + .. versionadded:: 1.14.0 + + Returns + ------- + h_minimum : array + The minimum-phase version of the filter, with length + ``(len(h) + 1) // 2`` when ``half is True`` or ``len(h)`` otherwise. + + See Also + -------- + firwin + firwin2 + remez + + Notes + ----- + Both the Hilbert [1]_ or homomorphic [4]_ [5]_ methods require selection + of an FFT length to estimate the complex cepstrum of the filter. + + In the case of the Hilbert method, the deviation from the ideal + spectrum ``epsilon`` is related to the number of stopband zeros + ``n_stop`` and FFT length ``n_fft`` as:: + + epsilon = 2. * n_stop / n_fft + + For example, with 100 stopband zeros and a FFT length of 2048, + ``epsilon = 0.0976``. If we conservatively assume that the number of + stopband zeros is one less than the filter length, we can take the FFT + length to be the next power of 2 that satisfies ``epsilon=0.01`` as:: + + n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01))) + + This gives reasonable results for both the Hilbert and homomorphic + methods, and gives the value used when ``n_fft=None``. + + Alternative implementations exist for creating minimum-phase filters, + including zero inversion [2]_ and spectral factorization [3]_ [4]_. + For more information, see `this DSPGuru page + `__. + + References + ---------- + .. [1] N. Damera-Venkata and B. L. Evans, "Optimal design of real and + complex minimum phase digital FIR filters," Acoustics, Speech, + and Signal Processing, 1999. Proceedings., 1999 IEEE International + Conference on, Phoenix, AZ, 1999, pp. 1145-1148 vol.3. + :doi:`10.1109/ICASSP.1999.756179` + .. [2] X. Chen and T. W. Parks, "Design of optimal minimum phase FIR + filters by direct factorization," Signal Processing, + vol. 10, no. 4, pp. 369-383, Jun. 1986. + .. [3] T. Saramaki, "Finite Impulse Response Filter Design," in + Handbook for Digital Signal Processing, chapter 4, + New York: Wiley-Interscience, 1993. + .. [4] J. S. Lim, Advanced Topics in Signal Processing. + Englewood Cliffs, N.J.: Prentice Hall, 1988. + .. [5] A. V. Oppenheim, R. W. Schafer, and J. R. Buck, + "Discrete-Time Signal Processing," 3rd edition. + Upper Saddle River, N.J.: Pearson, 2009. + + Examples + -------- + Create an optimal linear-phase low-pass filter `h` with a transition band of + [0.2, 0.3] (assuming a Nyquist frequency of 1): + + >>> import numpy as np + >>> from scipy.signal import remez, minimum_phase, freqz, group_delay + >>> import matplotlib.pyplot as plt + >>> freq = [0, 0.2, 0.3, 1.0] + >>> desired = [1, 0] + >>> h_linear = remez(151, freq, desired, fs=2) + + Convert it to minimum phase: + + >>> h_hil = minimum_phase(h_linear, method='hilbert') + >>> h_hom = minimum_phase(h_linear, method='homomorphic') + >>> h_hom_full = minimum_phase(h_linear, method='homomorphic', half=False) + + Compare the impulse and frequency response of the four filters: + + >>> fig0, ax0 = plt.subplots(figsize=(6, 3), tight_layout=True) + >>> fig1, axs = plt.subplots(3, sharex='all', figsize=(6, 6), tight_layout=True) + >>> ax0.set_title("Impulse response") + >>> ax0.set(xlabel='Samples', ylabel='Amplitude', xlim=(0, len(h_linear) - 1)) + >>> axs[0].set_title("Frequency Response") + >>> axs[0].set(xlim=(0, .65), ylabel="Magnitude / dB") + >>> axs[1].set(ylabel="Phase / rad") + >>> axs[2].set(ylabel="Group Delay / samples", ylim=(-31, 81), + ... xlabel='Normalized Frequency (Nyqist frequency: 1)') + >>> for h, lb in ((h_linear, f'Linear ({len(h_linear)})'), + ... (h_hil, f'Min-Hilbert ({len(h_hil)})'), + ... (h_hom, f'Min-Homomorphic ({len(h_hom)})'), + ... (h_hom_full, f'Min-Homom. Full ({len(h_hom_full)})')): + ... w_H, H = freqz(h, fs=2) + ... w_gd, gd = group_delay((h, 1), fs=2) + ... + ... alpha = 1.0 if lb == 'linear' else 0.5 # full opacity for 'linear' line + ... ax0.plot(h, '.-', alpha=alpha, label=lb) + ... axs[0].plot(w_H, 20 * np.log10(np.abs(H)), alpha=alpha) + ... axs[1].plot(w_H, np.unwrap(np.angle(H)), alpha=alpha, label=lb) + ... axs[2].plot(w_gd, gd, alpha=alpha) + >>> ax0.grid(True) + >>> ax0.legend(title='Filter Phase (Order)') + >>> axs[1].legend(title='Filter Phase (Order)', loc='lower right') + >>> for ax_ in axs: # shade transition band: + ... ax_.axvspan(freq[1], freq[2], color='y', alpha=.25) + ... ax_.grid(True) + >>> plt.show() + + The impulse response and group delay plot depict the 75 sample delay of the linear + phase filter `h`. The phase should also be linear in the stop band--due to the small + magnitude, numeric noise dominates there. Furthermore, the plots show that the + minimum phase filters clearly show a reduced (negative) phase slope in the pass and + transition band. The plots also illustrate that the filter with parameters + ``method='homomorphic', half=False`` has same order and magnitude response as the + linear filter `h` whereas the other minimum phase filters have only half the order + and the square root of the magnitude response. + """ + h = np.asarray(h) + if np.iscomplexobj(h): + raise ValueError('Complex filters not supported') + if h.ndim != 1 or h.size <= 2: + raise ValueError('h must be 1-D and at least 2 samples long') + n_half = len(h) // 2 + if not np.allclose(h[-n_half:][::-1], h[:n_half]): + warnings.warn('h does not appear to by symmetric, conversion may fail', + RuntimeWarning, stacklevel=2) + if not isinstance(method, str) or method not in \ + ('homomorphic', 'hilbert',): + raise ValueError(f'method must be "homomorphic" or "hilbert", got {method!r}') + if method == "hilbert" and not half: + raise ValueError("`half=False` is only supported when `method='homomorphic'`") + if n_fft is None: + n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01))) + n_fft = int(n_fft) + if n_fft < len(h): + raise ValueError(f'n_fft must be at least len(h)=={len(h)}') + if method == 'hilbert': + w = np.arange(n_fft) * (2 * np.pi / n_fft * n_half) + H = np.real(fft(h, n_fft) * np.exp(1j * w)) + dp = max(H) - 1 + ds = 0 - min(H) + S = 4. / (np.sqrt(1+dp+ds) + np.sqrt(1-dp+ds)) ** 2 + H += ds + H *= S + H = np.sqrt(H, out=H) + H += 1e-10 # ensure that the log does not explode + h_minimum = _dhtm(H) + else: # method == 'homomorphic' + # zero-pad; calculate the DFT + h_temp = np.abs(fft(h, n_fft)) + # take 0.25*log(|H|**2) = 0.5*log(|H|) + h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up + np.log(h_temp, out=h_temp) + if half: # halving of magnitude spectrum optional + h_temp *= 0.5 + # IDFT + h_temp = ifft(h_temp).real + # multiply pointwise by the homomorphic filter + # lmin[n] = 2u[n] - d[n] + # i.e., double the positive frequencies and zero out the negative ones; + # Oppenheim+Shafer 3rd ed p991 eq13.42b and p1004 fig13.7 + win = np.zeros(n_fft) + win[0] = 1 + stop = n_fft // 2 + win[1:stop] = 2 + if n_fft % 2: + win[stop] = 1 + h_temp *= win + h_temp = ifft(np.exp(fft(h_temp))) + h_minimum = h_temp.real + n_out = (n_half + len(h) % 2) if half else len(h) + return h_minimum[:n_out] diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_lti_conversion.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_lti_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..52c6efbbfa53288934d12918566016db6c742ef1 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_lti_conversion.py @@ -0,0 +1,533 @@ +""" +ltisys -- a collection of functions to convert linear time invariant systems +from one representation to another. +""" + +import numpy as np +from numpy import (r_, eye, atleast_2d, poly, dot, + asarray, zeros, array, outer) +from scipy import linalg + +from ._filter_design import tf2zpk, zpk2tf, normalize + + +__all__ = ['tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk', + 'cont2discrete'] + + +def tf2ss(num, den): + r"""Transfer function to state-space representation. + + Parameters + ---------- + num, den : array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree. The + denominator needs to be at least as long as the numerator. + + Returns + ------- + A, B, C, D : ndarray + State space representation of the system, in controller canonical + form. + + Examples + -------- + Convert the transfer function: + + .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + to the state-space representation: + + .. math:: + + \dot{\textbf{x}}(t) = + \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\ + + \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t) + + >>> from scipy.signal import tf2ss + >>> A, B, C, D = tf2ss(num, den) + >>> A + array([[-2., -1.], + [ 1., 0.]]) + >>> B + array([[ 1.], + [ 0.]]) + >>> C + array([[ 1., 2.]]) + >>> D + array([[ 1.]]) + """ + # Controller canonical state-space representation. + # if M+1 = len(num) and K+1 = len(den) then we must have M <= K + # states are found by asserting that X(s) = U(s) / D(s) + # then Y(s) = N(s) * X(s) + # + # A, B, C, and D follow quite naturally. + # + num, den = normalize(num, den) # Strips zeros, checks arrays + nn = len(num.shape) + if nn == 1: + num = asarray([num], num.dtype) + M = num.shape[1] + K = len(den) + if M > K: + msg = "Improper transfer function. `num` is longer than `den`." + raise ValueError(msg) + if M == 0 or K == 0: # Null system + return (array([], float), array([], float), array([], float), + array([], float)) + + # pad numerator to have same number of columns has denominator + num = np.hstack((np.zeros((num.shape[0], K - M), dtype=num.dtype), num)) + + if num.shape[-1] > 0: + D = atleast_2d(num[:, 0]) + + else: + # We don't assign it an empty array because this system + # is not 'null'. It just doesn't have a non-zero D + # matrix. Thus, it should have a non-zero shape so that + # it can be operated on by functions like 'ss2tf' + D = array([[0]], float) + + if K == 1: + D = D.reshape(num.shape) + + return (zeros((1, 1)), zeros((1, D.shape[1])), + zeros((D.shape[0], 1)), D) + + frow = -array([den[1:]]) + A = r_[frow, eye(K - 2, K - 1)] + B = eye(K - 1, 1) + C = num[:, 1:] - outer(num[:, 0], den[1:]) + D = D.reshape((C.shape[0], B.shape[1])) + + return A, B, C, D + + +def _none_to_empty_2d(arg): + if arg is None: + return zeros((0, 0)) + else: + return arg + + +def _atleast_2d_or_none(arg): + if arg is not None: + return atleast_2d(arg) + + +def _shape_or_none(M): + if M is not None: + return M.shape + else: + return (None,) * 2 + + +def _choice_not_none(*args): + for arg in args: + if arg is not None: + return arg + + +def _restore(M, shape): + if M.shape == (0, 0): + return zeros(shape) + else: + if M.shape != shape: + raise ValueError("The input arrays have incompatible shapes.") + return M + + +def abcd_normalize(A=None, B=None, C=None, D=None): + """Check state-space matrices and ensure they are 2-D. + + If enough information on the system is provided, that is, enough + properly-shaped arrays are passed to the function, the missing ones + are built from this information, ensuring the correct number of + rows and columns. Otherwise a ValueError is raised. + + Parameters + ---------- + A, B, C, D : array_like, optional + State-space matrices. All of them are None (missing) by default. + See `ss2tf` for format. + + Returns + ------- + A, B, C, D : array + Properly shaped state-space matrices. + + Raises + ------ + ValueError + If not enough information on the system was provided. + + """ + A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D)) + + MA, NA = _shape_or_none(A) + MB, NB = _shape_or_none(B) + MC, NC = _shape_or_none(C) + MD, ND = _shape_or_none(D) + + p = _choice_not_none(MA, MB, NC) + q = _choice_not_none(NB, ND) + r = _choice_not_none(MC, MD) + if p is None or q is None or r is None: + raise ValueError("Not enough information on the system.") + + A, B, C, D = map(_none_to_empty_2d, (A, B, C, D)) + A = _restore(A, (p, p)) + B = _restore(B, (p, q)) + C = _restore(C, (r, p)) + D = _restore(D, (r, q)) + + return A, B, C, D + + +def ss2tf(A, B, C, D, input=0): + r"""State-space to transfer function. + + A, B, C, D defines a linear state-space system with `p` inputs, + `q` outputs, and `n` state variables. + + Parameters + ---------- + A : array_like + State (or system) matrix of shape ``(n, n)`` + B : array_like + Input matrix of shape ``(n, p)`` + C : array_like + Output matrix of shape ``(q, n)`` + D : array_like + Feedthrough (or feedforward) matrix of shape ``(q, p)`` + input : int, optional + For multiple-input systems, the index of the input to use. + + Returns + ------- + num : 2-D ndarray + Numerator(s) of the resulting transfer function(s). `num` has one row + for each of the system's outputs. Each row is a sequence representation + of the numerator polynomial. + den : 1-D ndarray + Denominator of the resulting transfer function(s). `den` is a sequence + representation of the denominator polynomial. + + Examples + -------- + Convert the state-space representation: + + .. math:: + + \dot{\textbf{x}}(t) = + \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\ + + \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t) + + >>> A = [[-2, -1], [1, 0]] + >>> B = [[1], [0]] # 2-D column vector + >>> C = [[1, 2]] # 2-D row vector + >>> D = 1 + + to the transfer function: + + .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} + + >>> from scipy.signal import ss2tf + >>> ss2tf(A, B, C, D) + (array([[1., 3., 3.]]), array([ 1., 2., 1.])) + """ + # transfer function is C (sI - A)**(-1) B + D + + # Check consistency and make them all rank-2 arrays + A, B, C, D = abcd_normalize(A, B, C, D) + + nout, nin = D.shape + if input >= nin: + raise ValueError("System does not have the input specified.") + + # make SIMO from possibly MIMO system. + B = B[:, input:input + 1] + D = D[:, input:input + 1] + + try: + den = poly(A) + except ValueError: + den = 1 + + if (B.size == 0) and (C.size == 0): + num = np.ravel(D) + if (D.size == 0) and (A.size == 0): + den = [] + return num, den + + num_states = A.shape[0] + type_test = A[:, 0] + B[:, 0] + C[0, :] + D + 0.0 + num = np.empty((nout, num_states + 1), type_test.dtype) + for k in range(nout): + Ck = atleast_2d(C[k, :]) + num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den + + return num, den + + +def zpk2ss(z, p, k): + """Zero-pole-gain representation to state-space representation + + Parameters + ---------- + z, p : sequence + Zeros and poles. + k : float + System gain. + + Returns + ------- + A, B, C, D : ndarray + State space representation of the system, in controller canonical + form. + + """ + return tf2ss(*zpk2tf(z, p, k)) + + +def ss2zpk(A, B, C, D, input=0): + """State-space representation to zero-pole-gain representation. + + A, B, C, D defines a linear state-space system with `p` inputs, + `q` outputs, and `n` state variables. + + Parameters + ---------- + A : array_like + State (or system) matrix of shape ``(n, n)`` + B : array_like + Input matrix of shape ``(n, p)`` + C : array_like + Output matrix of shape ``(q, n)`` + D : array_like + Feedthrough (or feedforward) matrix of shape ``(q, p)`` + input : int, optional + For multiple-input systems, the index of the input to use. + + Returns + ------- + z, p : sequence + Zeros and poles. + k : float + System gain. + + """ + return tf2zpk(*ss2tf(A, B, C, D, input=input)) + + +def cont2discrete(system, dt, method="zoh", alpha=None): + """ + Transform a continuous to a discrete state-space system. + + Parameters + ---------- + system : a tuple describing the system or an instance of `lti` + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `lti`) + * 2: (num, den) + * 3: (zeros, poles, gain) + * 4: (A, B, C, D) + + dt : float + The discretization time step. + method : str, optional + Which method to use: + + * gbt: generalized bilinear transformation + * bilinear: Tustin's approximation ("gbt" with alpha=0.5) + * euler: Euler (or forward differencing) method ("gbt" with alpha=0) + * backward_diff: Backwards differencing ("gbt" with alpha=1.0) + * zoh: zero-order hold (default) + * foh: first-order hold (*versionadded: 1.3.0*) + * impulse: equivalent impulse response (*versionadded: 1.3.0*) + + alpha : float within [0, 1], optional + The generalized bilinear transformation weighting parameter, which + should only be specified with method="gbt", and is ignored otherwise + + Returns + ------- + sysd : tuple containing the discrete system + Based on the input type, the output will be of the form + + * (num, den, dt) for transfer function input + * (zeros, poles, gain, dt) for zeros-poles-gain input + * (A, B, C, D, dt) for state-space system input + + Notes + ----- + By default, the routine uses a Zero-Order Hold (zoh) method to perform + the transformation. Alternatively, a generalized bilinear transformation + may be used, which includes the common Tustin's bilinear approximation, + an Euler's method technique, or a backwards differencing technique. + + The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear + approximation is based on [2]_ and [3]_, the First-Order Hold (foh) method + is based on [4]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models + + .. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf + + .. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized + bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754, + 2009. + (https://www.mypolyuweb.hk/~magzhang/Research/ZCC09_IJC.pdf) + + .. [4] G. F. Franklin, J. D. Powell, and M. L. Workman, Digital control + of dynamic systems, 3rd ed. Menlo Park, Calif: Addison-Wesley, + pp. 204-206, 1998. + + Examples + -------- + We can transform a continuous state-space system to a discrete one: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import cont2discrete, lti, dlti, dstep + + Define a continuous state-space system. + + >>> A = np.array([[0, 1],[-10., -3]]) + >>> B = np.array([[0],[10.]]) + >>> C = np.array([[1., 0]]) + >>> D = np.array([[0.]]) + >>> l_system = lti(A, B, C, D) + >>> t, x = l_system.step(T=np.linspace(0, 5, 100)) + >>> fig, ax = plt.subplots() + >>> ax.plot(t, x, label='Continuous', linewidth=3) + + Transform it to a discrete state-space system using several methods. + + >>> dt = 0.1 + >>> for method in ['zoh', 'bilinear', 'euler', 'backward_diff', 'foh', 'impulse']: + ... d_system = cont2discrete((A, B, C, D), dt, method=method) + ... s, x_d = dstep(d_system) + ... ax.step(s, np.squeeze(x_d), label=method, where='post') + >>> ax.axis([t[0], t[-1], x[0], 1.4]) + >>> ax.legend(loc='best') + >>> fig.tight_layout() + >>> plt.show() + + """ + if len(system) == 1: + return system.to_discrete() + if len(system) == 2: + sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method, + alpha=alpha) + return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) + elif len(system) == 3: + sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt, + method=method, alpha=alpha) + return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) + elif len(system) == 4: + a, b, c, d = system + else: + raise ValueError("First argument must either be a tuple of 2 (tf), " + "3 (zpk), or 4 (ss) arrays.") + + if method == 'gbt': + if alpha is None: + raise ValueError("Alpha parameter must be specified for the " + "generalized bilinear transform (gbt) method") + elif alpha < 0 or alpha > 1: + raise ValueError("Alpha parameter must be within the interval " + "[0,1] for the gbt method") + + if method == 'gbt': + # This parameter is used repeatedly - compute once here + ima = np.eye(a.shape[0]) - alpha*dt*a + ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a) + bd = linalg.solve(ima, dt*b) + + # Similarly solve for the output equation matrices + cd = linalg.solve(ima.transpose(), c.transpose()) + cd = cd.transpose() + dd = d + alpha*np.dot(c, bd) + + elif method == 'bilinear' or method == 'tustin': + return cont2discrete(system, dt, method="gbt", alpha=0.5) + + elif method == 'euler' or method == 'forward_diff': + return cont2discrete(system, dt, method="gbt", alpha=0.0) + + elif method == 'backward_diff': + return cont2discrete(system, dt, method="gbt", alpha=1.0) + + elif method == 'zoh': + # Build an exponential matrix + em_upper = np.hstack((a, b)) + + # Need to stack zeros under the a and b matrices + em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])), + np.zeros((b.shape[1], b.shape[1])))) + + em = np.vstack((em_upper, em_lower)) + ms = linalg.expm(dt * em) + + # Dispose of the lower rows + ms = ms[:a.shape[0], :] + + ad = ms[:, 0:a.shape[1]] + bd = ms[:, a.shape[1]:] + + cd = c + dd = d + + elif method == 'foh': + # Size parameters for convenience + n = a.shape[0] + m = b.shape[1] + + # Build an exponential matrix similar to 'zoh' method + em_upper = linalg.block_diag(np.block([a, b]) * dt, np.eye(m)) + em_lower = zeros((m, n + 2 * m)) + em = np.block([[em_upper], [em_lower]]) + + ms = linalg.expm(em) + + # Get the three blocks from upper rows + ms11 = ms[:n, 0:n] + ms12 = ms[:n, n:n + m] + ms13 = ms[:n, n + m:] + + ad = ms11 + bd = ms12 - ms13 + ms11 @ ms13 + cd = c + dd = d + c @ ms13 + + elif method == 'impulse': + if not np.allclose(d, 0): + raise ValueError("Impulse method is only applicable " + "to strictly proper systems") + + ad = linalg.expm(a * dt) + bd = ad @ b * dt + cd = c + dd = c @ b * dt + + else: + raise ValueError(f"Unknown transformation method '{method}'") + + return ad, bd, cd, dd, dt diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_ltisys.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_ltisys.py new file mode 100644 index 0000000000000000000000000000000000000000..3992797a09a3ceee4be1f052603fcd6593ae6274 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_ltisys.py @@ -0,0 +1,3519 @@ +""" +ltisys -- a collection of classes and functions for modeling linear +time invariant systems. +""" +# +# Author: Travis Oliphant 2001 +# +# Feb 2010: Warren Weckesser +# Rewrote lsim2 and added impulse2. +# Apr 2011: Jeffrey Armstrong +# Added dlsim, dstep, dimpulse, cont2discrete +# Aug 2013: Juan Luis Cano +# Rewrote abcd_normalize. +# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr +# Added pole placement +# Mar 2015: Clancy Rowley +# Rewrote lsim +# May 2015: Felix Berkenkamp +# Split lti class into subclasses +# Merged discrete systems and added dlti + +import warnings + +# np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7 +# use scipy's qr until this is solved + +from scipy.linalg import qr as s_qr +from scipy import linalg +from scipy.interpolate import make_interp_spline +from ._filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk, + freqz_zpk) +from ._lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk, + cont2discrete, _atleast_2d_or_none) + +import numpy as np +from numpy import (real, atleast_1d, squeeze, asarray, zeros, + dot, transpose, ones, linspace) +import copy + +__all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace', + 'lsim', 'impulse', 'step', 'bode', + 'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse', + 'dfreqresp', 'dbode'] + + +class LinearTimeInvariant: + def __new__(cls, *system, **kwargs): + """Create a new object, don't allow direct instances.""" + if cls is LinearTimeInvariant: + raise NotImplementedError('The LinearTimeInvariant class is not ' + 'meant to be used directly, use `lti` ' + 'or `dlti` instead.') + return super().__new__(cls) + + def __init__(self): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + super().__init__() + + self.inputs = None + self.outputs = None + self._dt = None + + @property + def dt(self): + """Return the sampling time of the system, `None` for `lti` systems.""" + return self._dt + + @property + def _dt_dict(self): + if self.dt is None: + return {} + else: + return {'dt': self.dt} + + @property + def zeros(self): + """Zeros of the system.""" + return self.to_zpk().zeros + + @property + def poles(self): + """Poles of the system.""" + return self.to_zpk().poles + + def _as_ss(self): + """Convert to `StateSpace` system, without copying. + + Returns + ------- + sys: StateSpace + The `StateSpace` system. If the class is already an instance of + `StateSpace` then this instance is returned. + """ + if isinstance(self, StateSpace): + return self + else: + return self.to_ss() + + def _as_zpk(self): + """Convert to `ZerosPolesGain` system, without copying. + + Returns + ------- + sys: ZerosPolesGain + The `ZerosPolesGain` system. If the class is already an instance of + `ZerosPolesGain` then this instance is returned. + """ + if isinstance(self, ZerosPolesGain): + return self + else: + return self.to_zpk() + + def _as_tf(self): + """Convert to `TransferFunction` system, without copying. + + Returns + ------- + sys: ZerosPolesGain + The `TransferFunction` system. If the class is already an instance of + `TransferFunction` then this instance is returned. + """ + if isinstance(self, TransferFunction): + return self + else: + return self.to_tf() + + +class lti(LinearTimeInvariant): + r""" + Continuous-time linear time invariant system base class. + + Parameters + ---------- + *system : arguments + The `lti` class can be instantiated with either 2, 3 or 4 arguments. + The following gives the number of arguments and the corresponding + continuous-time subclass that is created: + + * 2: `TransferFunction`: (numerator, denominator) + * 3: `ZerosPolesGain`: (zeros, poles, gain) + * 4: `StateSpace`: (A, B, C, D) + + Each argument can be an array or a sequence. + + See Also + -------- + ZerosPolesGain, StateSpace, TransferFunction, dlti + + Notes + ----- + `lti` instances do not exist directly. Instead, `lti` creates an instance + of one of its subclasses: `StateSpace`, `TransferFunction` or + `ZerosPolesGain`. + + If (numerator, denominator) is passed in for ``*system``, coefficients for + both the numerator and denominator should be specified in descending + exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3, + 5]``). + + Changing the value of properties that are not directly part of the current + system representation (such as the `zeros` of a `StateSpace` system) is + very inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> from scipy import signal + + >>> signal.lti(1, 2, 3, 4) + StateSpaceContinuous( + array([[1]]), + array([[2]]), + array([[3]]), + array([[4]]), + dt: None + ) + + Construct the transfer function + :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: + + >>> signal.lti([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + Construct the transfer function :math:`H(s) = \frac{3s + 4}{1s + 2}`: + + >>> signal.lti([3, 4], [1, 2]) + TransferFunctionContinuous( + array([3., 4.]), + array([1., 2.]), + dt: None + ) + + """ + def __new__(cls, *system): + """Create an instance of the appropriate subclass.""" + if cls is lti: + N = len(system) + if N == 2: + return TransferFunctionContinuous.__new__( + TransferFunctionContinuous, *system) + elif N == 3: + return ZerosPolesGainContinuous.__new__( + ZerosPolesGainContinuous, *system) + elif N == 4: + return StateSpaceContinuous.__new__(StateSpaceContinuous, + *system) + else: + raise ValueError("`system` needs to be an instance of `lti` " + "or have 2, 3 or 4 arguments.") + # __new__ was called from a subclass, let it call its own functions + return super().__new__(cls) + + def __init__(self, *system): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + super().__init__(*system) + + def impulse(self, X0=None, T=None, N=None): + """ + Return the impulse response of a continuous-time system. + See `impulse` for details. + """ + return impulse(self, X0=X0, T=T, N=N) + + def step(self, X0=None, T=None, N=None): + """ + Return the step response of a continuous-time system. + See `step` for details. + """ + return step(self, X0=X0, T=T, N=N) + + def output(self, U, T, X0=None): + """ + Return the response of a continuous-time system to input `U`. + See `lsim` for details. + """ + return lsim(self, U, T, X0=X0) + + def bode(self, w=None, n=100): + """ + Calculate Bode magnitude and phase data of a continuous-time system. + + Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude + [dB] and phase [deg]. See `bode` for details. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> sys = signal.TransferFunction([1], [1, 1]) + >>> w, mag, phase = sys.bode() + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + return bode(self, w=w, n=n) + + def freqresp(self, w=None, n=10000): + """ + Calculate the frequency response of a continuous-time system. + + Returns a 2-tuple containing arrays of frequencies [rad/s] and + complex magnitude. + See `freqresp` for details. + """ + return freqresp(self, w=w, n=n) + + def to_discrete(self, dt, method='zoh', alpha=None): + """Return a discretized version of the current system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` + """ + raise NotImplementedError('to_discrete is not implemented for this ' + 'system class.') + + +class dlti(LinearTimeInvariant): + r""" + Discrete-time linear time invariant system base class. + + Parameters + ---------- + *system: arguments + The `dlti` class can be instantiated with either 2, 3 or 4 arguments. + The following gives the number of arguments and the corresponding + discrete-time subclass that is created: + + * 2: `TransferFunction`: (numerator, denominator) + * 3: `ZerosPolesGain`: (zeros, poles, gain) + * 4: `StateSpace`: (A, B, C, D) + + Each argument can be an array or a sequence. + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to ``True`` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + ZerosPolesGain, StateSpace, TransferFunction, lti + + Notes + ----- + `dlti` instances do not exist directly. Instead, `dlti` creates an instance + of one of its subclasses: `StateSpace`, `TransferFunction` or + `ZerosPolesGain`. + + Changing the value of properties that are not directly part of the current + system representation (such as the `zeros` of a `StateSpace` system) is + very inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + If (numerator, denominator) is passed in for ``*system``, coefficients for + both the numerator and denominator should be specified in descending + exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3, + 5]``). + + .. versionadded:: 0.18.0 + + Examples + -------- + >>> from scipy import signal + + >>> signal.dlti(1, 2, 3, 4) + StateSpaceDiscrete( + array([[1]]), + array([[2]]), + array([[3]]), + array([[4]]), + dt: True + ) + + >>> signal.dlti(1, 2, 3, 4, dt=0.1) + StateSpaceDiscrete( + array([[1]]), + array([[2]]), + array([[3]]), + array([[4]]), + dt: 0.1 + ) + + Construct the transfer function + :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time + of 0.1 seconds: + + >>> signal.dlti([1, 2], [3, 4], 5, dt=0.1) + ZerosPolesGainDiscrete( + array([1, 2]), + array([3, 4]), + 5, + dt: 0.1 + ) + + Construct the transfer function :math:`H(z) = \frac{3z + 4}{1z + 2}` with + a sampling time of 0.1 seconds: + + >>> signal.dlti([3, 4], [1, 2], dt=0.1) + TransferFunctionDiscrete( + array([3., 4.]), + array([1., 2.]), + dt: 0.1 + ) + + """ + def __new__(cls, *system, **kwargs): + """Create an instance of the appropriate subclass.""" + if cls is dlti: + N = len(system) + if N == 2: + return TransferFunctionDiscrete.__new__( + TransferFunctionDiscrete, *system, **kwargs) + elif N == 3: + return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete, + *system, **kwargs) + elif N == 4: + return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system, + **kwargs) + else: + raise ValueError("`system` needs to be an instance of `dlti` " + "or have 2, 3 or 4 arguments.") + # __new__ was called from a subclass, let it call its own functions + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + dt = kwargs.pop('dt', True) + super().__init__(*system, **kwargs) + + self.dt = dt + + @property + def dt(self): + """Return the sampling time of the system.""" + return self._dt + + @dt.setter + def dt(self, dt): + self._dt = dt + + def impulse(self, x0=None, t=None, n=None): + """ + Return the impulse response of the discrete-time `dlti` system. + See `dimpulse` for details. + """ + return dimpulse(self, x0=x0, t=t, n=n) + + def step(self, x0=None, t=None, n=None): + """ + Return the step response of the discrete-time `dlti` system. + See `dstep` for details. + """ + return dstep(self, x0=x0, t=t, n=n) + + def output(self, u, t, x0=None): + """ + Return the response of the discrete-time system to input `u`. + See `dlsim` for details. + """ + return dlsim(self, u, t, x0=x0) + + def bode(self, w=None, n=100): + r""" + Calculate Bode magnitude and phase data of a discrete-time system. + + Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude + [dB] and phase [deg]. See `dbode` for details. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}` + with sampling time 0.5s: + + >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5) + + Equivalent: signal.dbode(sys) + + >>> w, mag, phase = sys.bode() + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + return dbode(self, w=w, n=n) + + def freqresp(self, w=None, n=10000, whole=False): + """ + Calculate the frequency response of a discrete-time system. + + Returns a 2-tuple containing arrays of frequencies [rad/s] and + complex magnitude. + See `dfreqresp` for details. + + """ + return dfreqresp(self, w=w, n=n, whole=whole) + + +class TransferFunction(LinearTimeInvariant): + r"""Linear Time Invariant system class in transfer function form. + + Represents the system as the continuous-time transfer function + :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the + discrete-time transfer function + :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + `TransferFunction` systems inherit additional + functionality from the `lti`, respectively the `dlti` classes, depending on + which system representation is used. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + See Also + -------- + ZerosPolesGain, StateSpace, lti, dlti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be + represented as ``[1, 3, 5]``) + + Examples + -------- + Construct the transfer function + :math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`: + + >>> from scipy import signal + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + >>> signal.TransferFunction(num, den) + TransferFunctionContinuous( + array([1., 3., 3.]), + array([1., 2., 1.]), + dt: None + ) + + Construct the transfer function + :math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of + 0.1 seconds: + + >>> signal.TransferFunction(num, den, dt=0.1) + TransferFunctionDiscrete( + array([1., 3., 3.]), + array([1., 2., 1.]), + dt: 0.1 + ) + + """ + def __new__(cls, *system, **kwargs): + """Handle object conversion if input is an instance of lti.""" + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_tf() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is TransferFunction: + if kwargs.get('dt') is None: + return TransferFunctionContinuous.__new__( + TransferFunctionContinuous, + *system, + **kwargs) + else: + return TransferFunctionDiscrete.__new__( + TransferFunctionDiscrete, + *system, + **kwargs) + + # No special conversion needed + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the state space LTI system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + # Remove system arguments, not needed by parents anymore + super().__init__(**kwargs) + + self._num = None + self._den = None + + self.num, self.den = normalize(*system) + + def __repr__(self): + """Return representation of the system's transfer function""" + return ( + f'{self.__class__.__name__}(\n' + f'{repr(self.num)},\n' + f'{repr(self.den)},\n' + f'dt: {repr(self.dt)}\n)' + ) + + @property + def num(self): + """Numerator of the `TransferFunction` system.""" + return self._num + + @num.setter + def num(self, num): + self._num = atleast_1d(num) + + # Update dimensions + if len(self.num.shape) > 1: + self.outputs, self.inputs = self.num.shape + else: + self.outputs = 1 + self.inputs = 1 + + @property + def den(self): + """Denominator of the `TransferFunction` system.""" + return self._den + + @den.setter + def den(self, den): + self._den = atleast_1d(den) + + def _copy(self, system): + """ + Copy the parameters of another `TransferFunction` object + + Parameters + ---------- + system : `TransferFunction` + The `StateSpace` system that is to be copied + + """ + self.num = system.num + self.den = system.den + + def to_tf(self): + """ + Return a copy of the current `TransferFunction` system. + + Returns + ------- + sys : instance of `TransferFunction` + The current system (copy) + + """ + return copy.deepcopy(self) + + def to_zpk(self): + """ + Convert system representation to `ZerosPolesGain`. + + Returns + ------- + sys : instance of `ZerosPolesGain` + Zeros, poles, gain representation of the current system + + """ + return ZerosPolesGain(*tf2zpk(self.num, self.den), + **self._dt_dict) + + def to_ss(self): + """ + Convert system representation to `StateSpace`. + + Returns + ------- + sys : instance of `StateSpace` + State space model of the current system + + """ + return StateSpace(*tf2ss(self.num, self.den), + **self._dt_dict) + + @staticmethod + def _z_to_zinv(num, den): + """Change a transfer function from the variable `z` to `z**-1`. + + Parameters + ---------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree of 'z'. + That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. + + Returns + ------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of ascending degree of 'z**-1'. + That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. + """ + diff = len(num) - len(den) + if diff > 0: + den = np.hstack((np.zeros(diff), den)) + elif diff < 0: + num = np.hstack((np.zeros(-diff), num)) + return num, den + + @staticmethod + def _zinv_to_z(num, den): + """Change a transfer function from the variable `z` to `z**-1`. + + Parameters + ---------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of ascending degree of 'z**-1'. + That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. + + Returns + ------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree of 'z'. + That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. + """ + diff = len(num) - len(den) + if diff > 0: + den = np.hstack((den, np.zeros(diff))) + elif diff < 0: + num = np.hstack((num, np.zeros(-diff))) + return num, den + + +class TransferFunctionContinuous(TransferFunction, lti): + r""" + Continuous-time Linear Time Invariant system in transfer function form. + + Represents the system as the transfer function + :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + Continuous-time `TransferFunction` systems inherit additional + functionality from the `lti` class. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + + See Also + -------- + ZerosPolesGain, StateSpace, lti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g. ``s^2 + 3s + 5`` would be represented as + ``[1, 3, 5]``) + + Examples + -------- + Construct the transfer function + :math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`: + + >>> from scipy import signal + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + >>> signal.TransferFunction(num, den) + TransferFunctionContinuous( + array([ 1., 3., 3.]), + array([ 1., 2., 1.]), + dt: None + ) + + """ + + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `TransferFunction` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `StateSpace` + """ + return TransferFunction(*cont2discrete((self.num, self.den), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class TransferFunctionDiscrete(TransferFunction, dlti): + r""" + Discrete-time Linear Time Invariant system in transfer function form. + + Represents the system as the transfer function + :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + Discrete-time `TransferFunction` systems inherit additional functionality + from the `dlti` class. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + ZerosPolesGain, StateSpace, dlti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g., ``z^2 + 3z + 5`` would be represented as + ``[1, 3, 5]``). + + Examples + -------- + Construct the transfer function + :math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of + 0.5 seconds: + + >>> from scipy import signal + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + >>> signal.TransferFunction(num, den, dt=0.5) + TransferFunctionDiscrete( + array([ 1., 3., 3.]), + array([ 1., 2., 1.]), + dt: 0.5 + ) + + """ + pass + + +class ZerosPolesGain(LinearTimeInvariant): + r""" + Linear Time Invariant system class in zeros, poles, gain form. + + Represents the system as the continuous- or discrete-time transfer function + :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is + the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. + `ZerosPolesGain` systems inherit additional functionality from the `lti`, + respectively the `dlti` classes, depending on which system representation + is used. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + + See Also + -------- + TransferFunction, StateSpace, lti, dlti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + Examples + -------- + Construct the transfer function + :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: + + >>> from scipy import signal + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + Construct the transfer function + :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time + of 0.1 seconds: + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1) + ZerosPolesGainDiscrete( + array([1, 2]), + array([3, 4]), + 5, + dt: 0.1 + ) + + """ + def __new__(cls, *system, **kwargs): + """Handle object conversion if input is an instance of `lti`""" + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_zpk() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is ZerosPolesGain: + if kwargs.get('dt') is None: + return ZerosPolesGainContinuous.__new__( + ZerosPolesGainContinuous, + *system, + **kwargs) + else: + return ZerosPolesGainDiscrete.__new__( + ZerosPolesGainDiscrete, + *system, + **kwargs + ) + + # No special conversion needed + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the zeros, poles, gain system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + super().__init__(**kwargs) + + self._zeros = None + self._poles = None + self._gain = None + + self.zeros, self.poles, self.gain = system + + def __repr__(self): + """Return representation of the `ZerosPolesGain` system.""" + return ( + f'{self.__class__.__name__}(\n' + f'{repr(self.zeros)},\n' + f'{repr(self.poles)},\n' + f'{repr(self.gain)},\n' + f'dt: {repr(self.dt)}\n)' + ) + + @property + def zeros(self): + """Zeros of the `ZerosPolesGain` system.""" + return self._zeros + + @zeros.setter + def zeros(self, zeros): + self._zeros = atleast_1d(zeros) + + # Update dimensions + if len(self.zeros.shape) > 1: + self.outputs, self.inputs = self.zeros.shape + else: + self.outputs = 1 + self.inputs = 1 + + @property + def poles(self): + """Poles of the `ZerosPolesGain` system.""" + return self._poles + + @poles.setter + def poles(self, poles): + self._poles = atleast_1d(poles) + + @property + def gain(self): + """Gain of the `ZerosPolesGain` system.""" + return self._gain + + @gain.setter + def gain(self, gain): + self._gain = gain + + def _copy(self, system): + """ + Copy the parameters of another `ZerosPolesGain` system. + + Parameters + ---------- + system : instance of `ZerosPolesGain` + The zeros, poles gain system that is to be copied + + """ + self.poles = system.poles + self.zeros = system.zeros + self.gain = system.gain + + def to_tf(self): + """ + Convert system representation to `TransferFunction`. + + Returns + ------- + sys : instance of `TransferFunction` + Transfer function of the current system + + """ + return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain), + **self._dt_dict) + + def to_zpk(self): + """ + Return a copy of the current 'ZerosPolesGain' system. + + Returns + ------- + sys : instance of `ZerosPolesGain` + The current system (copy) + + """ + return copy.deepcopy(self) + + def to_ss(self): + """ + Convert system representation to `StateSpace`. + + Returns + ------- + sys : instance of `StateSpace` + State space model of the current system + + """ + return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain), + **self._dt_dict) + + +class ZerosPolesGainContinuous(ZerosPolesGain, lti): + r""" + Continuous-time Linear Time Invariant system in zeros, poles, gain form. + + Represents the system as the continuous time transfer function + :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is + the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. + Continuous-time `ZerosPolesGain` systems inherit additional functionality + from the `lti` class. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + + See Also + -------- + TransferFunction, StateSpace, lti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + Examples + -------- + Construct the transfer function + :math:`H(s)=\frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: + + >>> from scipy import signal + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + """ + + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `ZerosPolesGain` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `ZerosPolesGain` + """ + return ZerosPolesGain( + *cont2discrete((self.zeros, self.poles, self.gain), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class ZerosPolesGainDiscrete(ZerosPolesGain, dlti): + r""" + Discrete-time Linear Time Invariant system in zeros, poles, gain form. + + Represents the system as the discrete-time transfer function + :math:`H(z)=k \prod_i (z - q[i]) / \prod_j (z - p[j])`, where :math:`k` is + the `gain`, :math:`q` are the `zeros` and :math:`p` are the `poles`. + Discrete-time `ZerosPolesGain` systems inherit additional functionality + from the `dlti` class. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + TransferFunction, StateSpace, dlti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + Examples + -------- + Construct the transfer function + :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: + + >>> from scipy import signal + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + Construct the transfer function + :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time + of 0.1 seconds: + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1) + ZerosPolesGainDiscrete( + array([1, 2]), + array([3, 4]), + 5, + dt: 0.1 + ) + + """ + pass + + +class StateSpace(LinearTimeInvariant): + r""" + Linear Time Invariant system in state-space form. + + Represents the system as the continuous-time, first order differential + equation :math:`\dot{x} = A x + B u` or the discrete-time difference + equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems + inherit additional functionality from the `lti`, respectively the `dlti` + classes, depending on which system representation is used. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 4 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + See Also + -------- + TransferFunction, ZerosPolesGain, lti, dlti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> from scipy import signal + >>> import numpy as np + >>> a = np.array([[0, 1], [0, 0]]) + >>> b = np.array([[0], [1]]) + >>> c = np.array([[1, 0]]) + >>> d = np.array([[0]]) + + >>> sys = signal.StateSpace(a, b, c, d) + >>> print(sys) + StateSpaceContinuous( + array([[0, 1], + [0, 0]]), + array([[0], + [1]]), + array([[1, 0]]), + array([[0]]), + dt: None + ) + + >>> sys.to_discrete(0.1) + StateSpaceDiscrete( + array([[1. , 0.1], + [0. , 1. ]]), + array([[0.005], + [0.1 ]]), + array([[1, 0]]), + array([[0]]), + dt: 0.1 + ) + + >>> a = np.array([[1, 0.1], [0, 1]]) + >>> b = np.array([[0.005], [0.1]]) + + >>> signal.StateSpace(a, b, c, d, dt=0.1) + StateSpaceDiscrete( + array([[1. , 0.1], + [0. , 1. ]]), + array([[0.005], + [0.1 ]]), + array([[1, 0]]), + array([[0]]), + dt: 0.1 + ) + + """ + + # Override NumPy binary operations and ufuncs + __array_priority__ = 100.0 + __array_ufunc__ = None + + def __new__(cls, *system, **kwargs): + """Create new StateSpace object and settle inheritance.""" + # Handle object conversion if input is an instance of `lti` + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_ss() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is StateSpace: + if kwargs.get('dt') is None: + return StateSpaceContinuous.__new__(StateSpaceContinuous, + *system, **kwargs) + else: + return StateSpaceDiscrete.__new__(StateSpaceDiscrete, + *system, **kwargs) + + # No special conversion needed + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the state space lti/dlti system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + # Remove system arguments, not needed by parents anymore + super().__init__(**kwargs) + + self._A = None + self._B = None + self._C = None + self._D = None + + self.A, self.B, self.C, self.D = abcd_normalize(*system) + + def __repr__(self): + """Return representation of the `StateSpace` system.""" + return ( + f'{self.__class__.__name__}(\n' + f'{repr(self.A)},\n' + f'{repr(self.B)},\n' + f'{repr(self.C)},\n' + f'{repr(self.D)},\n' + f'dt: {repr(self.dt)}\n)' + ) + + def _check_binop_other(self, other): + return isinstance(other, (StateSpace, np.ndarray, float, complex, + np.number, int)) + + def __mul__(self, other): + """ + Post-multiply another system or a scalar + + Handles multiplication of systems in the sense of a frequency domain + multiplication. That means, given two systems E1(s) and E2(s), their + multiplication, H(s) = E1(s) * E2(s), means that applying H(s) to U(s) + is equivalent to first applying E2(s), and then E1(s). + + Notes + ----- + For SISO systems the order of system application does not matter. + However, for MIMO systems, where the two systems are matrices, the + order above ensures standard Matrix multiplication rules apply. + """ + if not self._check_binop_other(other): + return NotImplemented + + if isinstance(other, StateSpace): + # Disallow mix of discrete and continuous systems. + if type(other) is not type(self): + return NotImplemented + + if self.dt != other.dt: + raise TypeError('Cannot multiply systems with different `dt`.') + + n1 = self.A.shape[0] + n2 = other.A.shape[0] + + # Interconnection of systems + # x1' = A1 x1 + B1 u1 + # y1 = C1 x1 + D1 u1 + # x2' = A2 x2 + B2 y1 + # y2 = C2 x2 + D2 y1 + # + # Plugging in with u1 = y2 yields + # [x1'] [A1 B1*C2 ] [x1] [B1*D2] + # [x2'] = [0 A2 ] [x2] + [B2 ] u2 + # [x1] + # y2 = [C1 D1*C2] [x2] + D1*D2 u2 + a = np.vstack((np.hstack((self.A, np.dot(self.B, other.C))), + np.hstack((zeros((n2, n1)), other.A)))) + b = np.vstack((np.dot(self.B, other.D), other.B)) + c = np.hstack((self.C, np.dot(self.D, other.C))) + d = np.dot(self.D, other.D) + else: + # Assume that other is a scalar / matrix + # For post multiplication the input gets scaled + a = self.A + b = np.dot(self.B, other) + c = self.C + d = np.dot(self.D, other) + + common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype) + return StateSpace(np.asarray(a, dtype=common_dtype), + np.asarray(b, dtype=common_dtype), + np.asarray(c, dtype=common_dtype), + np.asarray(d, dtype=common_dtype), + **self._dt_dict) + + def __rmul__(self, other): + """Pre-multiply a scalar or matrix (but not StateSpace)""" + if not self._check_binop_other(other) or isinstance(other, StateSpace): + return NotImplemented + + # For pre-multiplication only the output gets scaled + a = self.A + b = self.B + c = np.dot(other, self.C) + d = np.dot(other, self.D) + + common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype) + return StateSpace(np.asarray(a, dtype=common_dtype), + np.asarray(b, dtype=common_dtype), + np.asarray(c, dtype=common_dtype), + np.asarray(d, dtype=common_dtype), + **self._dt_dict) + + def __neg__(self): + """Negate the system (equivalent to pre-multiplying by -1).""" + return StateSpace(self.A, self.B, -self.C, -self.D, **self._dt_dict) + + def __add__(self, other): + """ + Adds two systems in the sense of frequency domain addition. + """ + if not self._check_binop_other(other): + return NotImplemented + + if isinstance(other, StateSpace): + # Disallow mix of discrete and continuous systems. + if type(other) is not type(self): + raise TypeError(f'Cannot add {type(self)} and {type(other)}') + + if self.dt != other.dt: + raise TypeError('Cannot add systems with different `dt`.') + # Interconnection of systems + # x1' = A1 x1 + B1 u + # y1 = C1 x1 + D1 u + # x2' = A2 x2 + B2 u + # y2 = C2 x2 + D2 u + # y = y1 + y2 + # + # Plugging in yields + # [x1'] [A1 0 ] [x1] [B1] + # [x2'] = [0 A2] [x2] + [B2] u + # [x1] + # y = [C1 C2] [x2] + [D1 + D2] u + a = linalg.block_diag(self.A, other.A) + b = np.vstack((self.B, other.B)) + c = np.hstack((self.C, other.C)) + d = self.D + other.D + else: + other = np.atleast_2d(other) + if self.D.shape == other.shape: + # A scalar/matrix is really just a static system (A=0, B=0, C=0) + a = self.A + b = self.B + c = self.C + d = self.D + other + else: + raise ValueError("Cannot add systems with incompatible " + f"dimensions ({self.D.shape} and {other.shape})") + + common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype) + return StateSpace(np.asarray(a, dtype=common_dtype), + np.asarray(b, dtype=common_dtype), + np.asarray(c, dtype=common_dtype), + np.asarray(d, dtype=common_dtype), + **self._dt_dict) + + def __sub__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return self.__add__(-other) + + def __radd__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return self.__add__(other) + + def __rsub__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return (-self).__add__(other) + + def __truediv__(self, other): + """ + Divide by a scalar + """ + # Division by non-StateSpace scalars + if not self._check_binop_other(other) or isinstance(other, StateSpace): + return NotImplemented + + if isinstance(other, np.ndarray) and other.ndim > 0: + # It's ambiguous what this means, so disallow it + raise ValueError("Cannot divide StateSpace by non-scalar numpy arrays") + + return self.__mul__(1/other) + + @property + def A(self): + """State matrix of the `StateSpace` system.""" + return self._A + + @A.setter + def A(self, A): + self._A = _atleast_2d_or_none(A) + + @property + def B(self): + """Input matrix of the `StateSpace` system.""" + return self._B + + @B.setter + def B(self, B): + self._B = _atleast_2d_or_none(B) + self.inputs = self.B.shape[-1] + + @property + def C(self): + """Output matrix of the `StateSpace` system.""" + return self._C + + @C.setter + def C(self, C): + self._C = _atleast_2d_or_none(C) + self.outputs = self.C.shape[0] + + @property + def D(self): + """Feedthrough matrix of the `StateSpace` system.""" + return self._D + + @D.setter + def D(self, D): + self._D = _atleast_2d_or_none(D) + + def _copy(self, system): + """ + Copy the parameters of another `StateSpace` system. + + Parameters + ---------- + system : instance of `StateSpace` + The state-space system that is to be copied + + """ + self.A = system.A + self.B = system.B + self.C = system.C + self.D = system.D + + def to_tf(self, **kwargs): + """ + Convert system representation to `TransferFunction`. + + Parameters + ---------- + kwargs : dict, optional + Additional keywords passed to `ss2zpk` + + Returns + ------- + sys : instance of `TransferFunction` + Transfer function of the current system + + """ + return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D, + **kwargs), **self._dt_dict) + + def to_zpk(self, **kwargs): + """ + Convert system representation to `ZerosPolesGain`. + + Parameters + ---------- + kwargs : dict, optional + Additional keywords passed to `ss2zpk` + + Returns + ------- + sys : instance of `ZerosPolesGain` + Zeros, poles, gain representation of the current system + + """ + return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D, + **kwargs), **self._dt_dict) + + def to_ss(self): + """ + Return a copy of the current `StateSpace` system. + + Returns + ------- + sys : instance of `StateSpace` + The current system (copy) + + """ + return copy.deepcopy(self) + + +class StateSpaceContinuous(StateSpace, lti): + r""" + Continuous-time Linear Time Invariant system in state-space form. + + Represents the system as the continuous-time, first order differential + equation :math:`\dot{x} = A x + B u`. + Continuous-time `StateSpace` systems inherit additional functionality + from the `lti` class. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 3 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + + See Also + -------- + TransferFunction, ZerosPolesGain, lti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + + >>> a = np.array([[0, 1], [0, 0]]) + >>> b = np.array([[0], [1]]) + >>> c = np.array([[1, 0]]) + >>> d = np.array([[0]]) + + >>> sys = signal.StateSpace(a, b, c, d) + >>> print(sys) + StateSpaceContinuous( + array([[0, 1], + [0, 0]]), + array([[0], + [1]]), + array([[1, 0]]), + array([[0]]), + dt: None + ) + + """ + + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `StateSpace` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `StateSpace` + """ + return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class StateSpaceDiscrete(StateSpace, dlti): + r""" + Discrete-time Linear Time Invariant system in state-space form. + + Represents the system as the discrete-time difference equation + :math:`x[k+1] = A x[k] + B u[k]`. + `StateSpace` systems inherit additional functionality from the `dlti` + class. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 3 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + TransferFunction, ZerosPolesGain, dlti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + + >>> a = np.array([[1, 0.1], [0, 1]]) + >>> b = np.array([[0.005], [0.1]]) + >>> c = np.array([[1, 0]]) + >>> d = np.array([[0]]) + + >>> signal.StateSpace(a, b, c, d, dt=0.1) + StateSpaceDiscrete( + array([[ 1. , 0.1], + [ 0. , 1. ]]), + array([[ 0.005], + [ 0.1 ]]), + array([[1, 0]]), + array([[0]]), + dt: 0.1 + ) + + """ + pass + + +def lsim(system, U, T, X0=None, interp=True): + """ + Simulate output of a continuous-time linear system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `lti`) + * 2: (num, den) + * 3: (zeros, poles, gain) + * 4: (A, B, C, D) + + U : array_like + An input array describing the input at each time `T` + (interpolation is assumed between given times). If there are + multiple inputs, then each column of the rank-2 array + represents an input. If U = 0 or None, a zero input is used. + T : array_like + The time steps at which the input is defined and at which the + output is desired. Must be nonnegative, increasing, and equally spaced. + X0 : array_like, optional + The initial conditions on the state vector (zero by default). + interp : bool, optional + Whether to use linear (True, the default) or zero-order-hold (False) + interpolation for the input array. + + Returns + ------- + T : 1D ndarray + Time values for the output. + yout : 1D ndarray + System response. + xout : ndarray + Time evolution of the state vector. + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + We'll use `lsim` to simulate an analog Bessel filter applied to + a signal. + + >>> import numpy as np + >>> from scipy.signal import bessel, lsim + >>> import matplotlib.pyplot as plt + + Create a low-pass Bessel filter with a cutoff of 12 Hz. + + >>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True) + + Generate data to which the filter is applied. + + >>> t = np.linspace(0, 1.25, 500, endpoint=False) + + The input signal is the sum of three sinusoidal curves, with + frequencies 4 Hz, 40 Hz, and 80 Hz. The filter should mostly + eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal. + + >>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) + + ... 0.5*np.cos(2*np.pi*80*t)) + + Simulate the filter with `lsim`. + + >>> tout, yout, xout = lsim((b, a), U=u, T=t) + + Plot the result. + + >>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input') + >>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output') + >>> plt.legend(loc='best', shadow=True, framealpha=1) + >>> plt.grid(alpha=0.3) + >>> plt.xlabel('t') + >>> plt.show() + + In a second example, we simulate a double integrator ``y'' = u``, with + a constant input ``u = 1``. We'll use the state space representation + of the integrator. + + >>> from scipy.signal import lti + >>> A = np.array([[0.0, 1.0], [0.0, 0.0]]) + >>> B = np.array([[0.0], [1.0]]) + >>> C = np.array([[1.0, 0.0]]) + >>> D = 0.0 + >>> system = lti(A, B, C, D) + + `t` and `u` define the time and input signal for the system to + be simulated. + + >>> t = np.linspace(0, 5, num=50) + >>> u = np.ones_like(t) + + Compute the simulation, and then plot `y`. As expected, the plot shows + the curve ``y = 0.5*t**2``. + + >>> tout, y, x = lsim(system, u, t) + >>> plt.plot(t, y) + >>> plt.grid(alpha=0.3) + >>> plt.xlabel('t') + >>> plt.show() + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('lsim can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + T = atleast_1d(T) + if len(T.shape) != 1: + raise ValueError("T must be a rank-1 array.") + + A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D)) + n_states = A.shape[0] + n_inputs = B.shape[1] + + n_steps = T.size + if X0 is None: + X0 = zeros(n_states, sys.A.dtype) + xout = np.empty((n_steps, n_states), sys.A.dtype) + + if T[0] == 0: + xout[0] = X0 + elif T[0] > 0: + # step forward to initial time, with zero input + xout[0] = dot(X0, linalg.expm(transpose(A) * T[0])) + else: + raise ValueError("Initial time must be nonnegative") + + no_input = (U is None or + (isinstance(U, (int, float)) and U == 0.) or + not np.any(U)) + + if n_steps == 1: + yout = squeeze(xout @ C.T) + if not no_input: + yout += squeeze(U @ D.T) + return T, yout, squeeze(xout) + + dt = T[1] - T[0] + if not np.allclose(np.diff(T), dt): + raise ValueError("Time steps are not equally spaced.") + + if no_input: + # Zero input: just use matrix exponential + # take transpose because state is a row vector + expAT_dt = linalg.expm(A.T * dt) + for i in range(1, n_steps): + xout[i] = xout[i-1] @ expAT_dt + yout = squeeze(xout @ C.T) + return T, yout, squeeze(xout) + + # Nonzero input + U = atleast_1d(U) + if U.ndim == 1: + U = U[:, np.newaxis] + + if U.shape[0] != n_steps: + raise ValueError("U must have the same number of rows " + "as elements in T.") + + if U.shape[1] != n_inputs: + raise ValueError("System does not define that many inputs.") + + if not interp: + # Zero-order hold + # Algorithm: to integrate from time 0 to time dt, we solve + # xdot = A x + B u, x(0) = x0 + # udot = 0, u(0) = u0. + # + # Solution is + # [ x(dt) ] [ A*dt B*dt ] [ x0 ] + # [ u(dt) ] = exp [ 0 0 ] [ u0 ] + M = np.vstack([np.hstack([A * dt, B * dt]), + np.zeros((n_inputs, n_states + n_inputs))]) + # transpose everything because the state and input are row vectors + expMT = linalg.expm(M.T) + Ad = expMT[:n_states, :n_states] + Bd = expMT[n_states:, :n_states] + for i in range(1, n_steps): + xout[i] = xout[i-1] @ Ad + U[i-1] @ Bd + else: + # Linear interpolation between steps + # Algorithm: to integrate from time 0 to time dt, with linear + # interpolation between inputs u(0) = u0 and u(dt) = u1, we solve + # xdot = A x + B u, x(0) = x0 + # udot = (u1 - u0) / dt, u(0) = u0. + # + # Solution is + # [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ] + # [ u(dt) ] = exp [ 0 0 I ] [ u0 ] + # [u1 - u0] [ 0 0 0 ] [u1 - u0] + M = np.vstack([np.hstack([A * dt, B * dt, + np.zeros((n_states, n_inputs))]), + np.hstack([np.zeros((n_inputs, n_states + n_inputs)), + np.identity(n_inputs)]), + np.zeros((n_inputs, n_states + 2 * n_inputs))]) + expMT = linalg.expm(M.T) + Ad = expMT[:n_states, :n_states] + Bd1 = expMT[n_states+n_inputs:, :n_states] + Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1 + for i in range(1, n_steps): + xout[i] = xout[i-1] @ Ad + U[i-1] @ Bd0 + U[i] @ Bd1 + + yout = squeeze(xout @ C.T) + squeeze(U @ D.T) + return T, yout, squeeze(xout) + + +def _default_response_times(A, n): + """Compute a reasonable set of time samples for the response time. + + This function is used by `impulse` and `step` to compute the response time + when the `T` argument to the function is None. + + Parameters + ---------- + A : array_like + The system matrix, which is square. + n : int + The number of time samples to generate. + + Returns + ------- + t : ndarray + The 1-D array of length `n` of time samples at which the response + is to be computed. + """ + # Create a reasonable time interval. + # TODO: This could use some more work. + # For example, what is expected when the system is unstable? + vals = linalg.eigvals(A) + r = min(abs(real(vals))) + if r == 0.0: + r = 1.0 + tc = 1.0 / r + t = linspace(0.0, 7 * tc, n) + return t + + +def impulse(system, X0=None, T=None, N=None): + """Impulse response of continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple of array_like + describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + X0 : array_like, optional + Initial state-vector. Defaults to zero. + T : array_like, optional + Time points. Computed if not given. + N : int, optional + The number of time points to compute (if `T` is not given). + + Returns + ------- + T : ndarray + A 1-D array of time points. + yout : ndarray + A 1-D array containing the impulse response of the system (except for + singularities at zero). + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + Compute the impulse response of a second order system with a repeated + root: ``x''(t) + 2*x'(t) + x(t) = u(t)`` + + >>> from scipy import signal + >>> system = ([1.0], [1.0, 2.0, 1.0]) + >>> t, y = signal.impulse(system) + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, y) + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('impulse can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + if X0 is None: + X = squeeze(sys.B) + else: + X = squeeze(sys.B + X0) + if N is None: + N = 100 + if T is None: + T = _default_response_times(sys.A, N) + else: + T = asarray(T) + + _, h, _ = lsim(sys, 0., T, X, interp=False) + return T, h + + +def step(system, X0=None, T=None, N=None): + """Step response of continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple of array_like + describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + X0 : array_like, optional + Initial state-vector (default is zero). + T : array_like, optional + Time points (computed if not given). + N : int, optional + Number of time points to compute if `T` is not given. + + Returns + ------- + T : 1D ndarray + Output time points. + yout : 1D ndarray + Step response of system. + + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> lti = signal.lti([1.0], [1.0, 1.0]) + >>> t, y = signal.step(lti) + >>> plt.plot(t, y) + >>> plt.xlabel('Time [s]') + >>> plt.ylabel('Amplitude') + >>> plt.title('Step response for 1. Order Lowpass') + >>> plt.grid() + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('step can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + if N is None: + N = 100 + if T is None: + T = _default_response_times(sys.A, N) + else: + T = asarray(T) + U = ones(T.shape, sys.A.dtype) + vals = lsim(sys, U, T, X0=X0, interp=False) + return vals[0], vals[1] + + +def bode(system, w=None, n=100): + """ + Calculate Bode magnitude and phase data of a continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + w : array_like, optional + Array of frequencies (in rad/s). Magnitude and phase data is calculated + for every value in this array. If not given a reasonable set will be + calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Frequency array [rad/s] + mag : 1D ndarray + Magnitude array [dB] + phase : 1D ndarray + Phase array [deg] + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> sys = signal.TransferFunction([1], [1, 1]) + >>> w, mag, phase = signal.bode(sys) + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + w, y = freqresp(system, w=w, n=n) + + mag = 20.0 * np.log10(abs(y)) + phase = np.unwrap(np.arctan2(y.imag, y.real)) * 180.0 / np.pi + + return w, mag, phase + + +def freqresp(system, w=None, n=10000): + r"""Calculate the frequency response of a continuous-time system. + + Parameters + ---------- + system : an instance of the `lti` class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + w : array_like, optional + Array of frequencies (in rad/s). Magnitude and phase data is + calculated for every value in this array. If not given, a reasonable + set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Frequency array [rad/s] + H : 1D ndarray + Array of complex magnitude values + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + Generating the Nyquist plot of a transfer function + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Construct the transfer function :math:`H(s) = \frac{5}{(s-1)^3}`: + + >>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5]) + + >>> w, H = signal.freqresp(s1) + + >>> plt.figure() + >>> plt.plot(H.real, H.imag, "b") + >>> plt.plot(H.real, -H.imag, "r") + >>> plt.show() + """ + if isinstance(system, lti): + if isinstance(system, (TransferFunction, ZerosPolesGain)): + sys = system + else: + sys = system._as_zpk() + elif isinstance(system, dlti): + raise AttributeError('freqresp can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_zpk() + + if sys.inputs != 1 or sys.outputs != 1: + raise ValueError("freqresp() requires a SISO (single input, single " + "output) system.") + + if w is not None: + worN = w + else: + worN = n + + if isinstance(sys, TransferFunction): + # In the call to freqs(), sys.num.ravel() is used because there are + # cases where sys.num is a 2-D array with a single row. + w, h = freqs(sys.num.ravel(), sys.den, worN=worN) + + elif isinstance(sys, ZerosPolesGain): + w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN) + + return w, h + + +# This class will be used by place_poles to return its results +# see https://code.activestate.com/recipes/52308/ +class Bunch: + def __init__(self, **kwds): + self.__dict__.update(kwds) + + +def _valid_inputs(A, B, poles, method, rtol, maxiter): + """ + Check the poles come in complex conjugate pairs + Check shapes of A, B and poles are compatible. + Check the method chosen is compatible with provided poles + Return update method to use and ordered poles + + """ + poles = np.asarray(poles) + if poles.ndim > 1: + raise ValueError("Poles must be a 1D array like.") + # Will raise ValueError if poles do not come in complex conjugates pairs + poles = _order_complex_poles(poles) + if A.ndim > 2: + raise ValueError("A must be a 2D array/matrix.") + if B.ndim > 2: + raise ValueError("B must be a 2D array/matrix") + if A.shape[0] != A.shape[1]: + raise ValueError("A must be square") + if len(poles) > A.shape[0]: + raise ValueError("maximum number of poles is %d but you asked for %d" % + (A.shape[0], len(poles))) + if len(poles) < A.shape[0]: + raise ValueError("number of poles is %d but you should provide %d" % + (len(poles), A.shape[0])) + r = np.linalg.matrix_rank(B) + for p in poles: + if sum(p == poles) > r: + raise ValueError("at least one of the requested pole is repeated " + "more than rank(B) times") + # Choose update method + update_loop = _YT_loop + if method not in ('KNV0','YT'): + raise ValueError("The method keyword must be one of 'YT' or 'KNV0'") + + if method == "KNV0": + update_loop = _KNV0_loop + if not all(np.isreal(poles)): + raise ValueError("Complex poles are not supported by KNV0") + + if maxiter < 1: + raise ValueError("maxiter must be at least equal to 1") + + # We do not check rtol <= 0 as the user can use a negative rtol to + # force maxiter iterations + if rtol > 1: + raise ValueError("rtol can not be greater than 1") + + return update_loop, poles + + +def _order_complex_poles(poles): + """ + Check we have complex conjugates pairs and reorder P according to YT, ie + real_poles, complex_i, conjugate complex_i, .... + The lexicographic sort on the complex poles is added to help the user to + compare sets of poles. + """ + ordered_poles = np.sort(poles[np.isreal(poles)]) + im_poles = [] + for p in np.sort(poles[np.imag(poles) < 0]): + if np.conj(p) in poles: + im_poles.extend((p, np.conj(p))) + + ordered_poles = np.hstack((ordered_poles, im_poles)) + + if poles.shape[0] != len(ordered_poles): + raise ValueError("Complex poles must come with their conjugates") + return ordered_poles + + +def _KNV0(B, ker_pole, transfer_matrix, j, poles): + """ + Algorithm "KNV0" Kautsky et Al. Robust pole + assignment in linear state feedback, Int journal of Control + 1985, vol 41 p 1129->1155 + https://la.epfl.ch/files/content/sites/la/files/ + users/105941/public/KautskyNicholsDooren + + """ + # Remove xj form the base + transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1) + # If we QR this matrix in full mode Q=Q0|Q1 + # then Q1 will be a single column orthogonal to + # Q0, that's what we are looking for ! + + # After merge of gh-4249 great speed improvements could be achieved + # using QR updates instead of full QR in the line below + + # To debug with numpy qr uncomment the line below + # Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete") + Q, R = s_qr(transfer_matrix_not_j, mode="full") + + mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T) + yj = np.dot(mat_ker_pj, Q[:, -1]) + + # If Q[:, -1] is "almost" orthogonal to ker_pole[j] its + # projection into ker_pole[j] will yield a vector + # close to 0. As we are looking for a vector in ker_pole[j] + # simply stick with transfer_matrix[:, j] (unless someone provides me with + # a better choice ?) + + if not np.allclose(yj, 0): + xj = yj/np.linalg.norm(yj) + transfer_matrix[:, j] = xj + + # KNV does not support complex poles, using YT technique the two lines + # below seem to work 9 out of 10 times but it is not reliable enough: + # transfer_matrix[:, j]=real(xj) + # transfer_matrix[:, j+1]=imag(xj) + + # Add this at the beginning of this function if you wish to test + # complex support: + # if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])): + # return + # Problems arise when imag(xj)=>0 I have no idea on how to fix this + + +def _YT_real(ker_pole, Q, transfer_matrix, i, j): + """ + Applies algorithm from YT section 6.1 page 19 related to real pairs + """ + # step 1 page 19 + u = Q[:, -2, np.newaxis] + v = Q[:, -1, np.newaxis] + + # step 2 page 19 + m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) - + np.dot(v, u.T)), ker_pole[j]) + + # step 3 page 19 + um, sm, vm = np.linalg.svd(m) + # mu1, mu2 two first columns of U => 2 first lines of U.T + mu1, mu2 = um.T[:2, :, np.newaxis] + # VM is V.T with numpy we want the first two lines of V.T + nu1, nu2 = vm[:2, :, np.newaxis] + + # what follows is a rough python translation of the formulas + # in section 6.2 page 20 (step 4) + transfer_matrix_j_mo_transfer_matrix_j = np.vstack(( + transfer_matrix[:, i, np.newaxis], + transfer_matrix[:, j, np.newaxis])) + + if not np.allclose(sm[0], sm[1]): + ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1) + ker_pole_i_nu1 = np.dot(ker_pole[j], nu1) + ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1)) + else: + ker_pole_ij = np.vstack(( + np.hstack((ker_pole[i], + np.zeros(ker_pole[i].shape))), + np.hstack((np.zeros(ker_pole[j].shape), + ker_pole[j])) + )) + mu_nu_matrix = np.vstack( + (np.hstack((mu1, mu2)), np.hstack((nu1, nu2))) + ) + ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix) + transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T), + transfer_matrix_j_mo_transfer_matrix_j) + if not np.allclose(transfer_matrix_ij, 0): + transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij / + np.linalg.norm(transfer_matrix_ij)) + transfer_matrix[:, i] = transfer_matrix_ij[ + :transfer_matrix[:, i].shape[0], 0 + ] + transfer_matrix[:, j] = transfer_matrix_ij[ + transfer_matrix[:, i].shape[0]:, 0 + ] + else: + # As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to + # Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to + # ker_pole_mu_nu and iterate. As we are looking for a vector in + # Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help + # (that's a guess, not a claim !) + transfer_matrix[:, i] = ker_pole_mu_nu[ + :transfer_matrix[:, i].shape[0], 0 + ] + transfer_matrix[:, j] = ker_pole_mu_nu[ + transfer_matrix[:, i].shape[0]:, 0 + ] + + +def _YT_complex(ker_pole, Q, transfer_matrix, i, j): + """ + Applies algorithm from YT section 6.2 page 20 related to complex pairs + """ + # step 1 page 20 + ur = np.sqrt(2)*Q[:, -2, np.newaxis] + ui = np.sqrt(2)*Q[:, -1, np.newaxis] + u = ur + 1j*ui + + # step 2 page 20 + ker_pole_ij = ker_pole[i] + m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) - + np.dot(np.conj(u), u.T)), ker_pole_ij) + + # step 3 page 20 + e_val, e_vec = np.linalg.eig(m) + # sort eigenvalues according to their module + e_val_idx = np.argsort(np.abs(e_val)) + mu1 = e_vec[:, e_val_idx[-1], np.newaxis] + mu2 = e_vec[:, e_val_idx[-2], np.newaxis] + + # what follows is a rough python translation of the formulas + # in section 6.2 page 20 (step 4) + + # remember transfer_matrix_i has been split as + # transfer_matrix[i]=real(transfer_matrix_i) and + # transfer_matrix[j]=imag(transfer_matrix_i) + transfer_matrix_j_mo_transfer_matrix_j = ( + transfer_matrix[:, i, np.newaxis] + + 1j*transfer_matrix[:, j, np.newaxis] + ) + if not np.allclose(np.abs(e_val[e_val_idx[-1]]), + np.abs(e_val[e_val_idx[-2]])): + ker_pole_mu = np.dot(ker_pole_ij, mu1) + else: + mu1_mu2_matrix = np.hstack((mu1, mu2)) + ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix) + transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)), + transfer_matrix_j_mo_transfer_matrix_j) + + if not np.allclose(transfer_matrix_i_j, 0): + transfer_matrix_i_j = (transfer_matrix_i_j / + np.linalg.norm(transfer_matrix_i_j)) + transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0]) + transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0]) + else: + # same idea as in YT_real + transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0]) + transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0]) + + +def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): + """ + Algorithm "YT" Tits, Yang. Globally Convergent + Algorithms for Robust Pole Assignment by State Feedback + https://hdl.handle.net/1903/5598 + The poles P have to be sorted accordingly to section 6.2 page 20 + + """ + # The IEEE edition of the YT paper gives useful information on the + # optimal update order for the real poles in order to minimize the number + # of times we have to loop over all poles, see page 1442 + nb_real = poles[np.isreal(poles)].shape[0] + # hnb => Half Nb Real + hnb = nb_real // 2 + + # Stick to the indices in the paper and then remove one to get numpy array + # index it is a bit easier to link the code to the paper this way even if it + # is not very clean. The paper is unclear about what should be done when + # there is only one real pole => use KNV0 on this real pole seem to work + if nb_real > 0: + #update the biggest real pole with the smallest one + update_order = [[nb_real], [1]] + else: + update_order = [[],[]] + + r_comp = np.arange(nb_real+1, len(poles)+1, 2) + # step 1.a + r_p = np.arange(1, hnb+nb_real % 2) + update_order[0].extend(2*r_p) + update_order[1].extend(2*r_p+1) + # step 1.b + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 1.c + r_p = np.arange(1, hnb+1) + update_order[0].extend(2*r_p-1) + update_order[1].extend(2*r_p) + # step 1.d + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 2.a + r_j = np.arange(2, hnb+nb_real % 2) + for j in r_j: + for i in range(1, hnb+1): + update_order[0].append(i) + update_order[1].append(i+j) + # step 2.b + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 2.c + r_j = np.arange(2, hnb+nb_real % 2) + for j in r_j: + for i in range(hnb+1, nb_real+1): + idx_1 = i+j + if idx_1 > nb_real: + idx_1 = i+j-nb_real + update_order[0].append(i) + update_order[1].append(idx_1) + # step 2.d + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 3.a + for i in range(1, hnb+1): + update_order[0].append(i) + update_order[1].append(i+hnb) + # step 3.b + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + + update_order = np.array(update_order).T-1 + stop = False + nb_try = 0 + while nb_try < maxiter and not stop: + det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix)) + for i, j in update_order: + if i == j: + assert i == 0, "i!=0 for KNV call in YT" + assert np.isreal(poles[i]), "calling KNV on a complex pole" + _KNV0(B, ker_pole, transfer_matrix, i, poles) + else: + transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j), + axis=1) + # after merge of gh-4249 great speed improvements could be + # achieved using QR updates instead of full QR in the line below + + #to debug with numpy qr uncomment the line below + #Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete") + Q, _ = s_qr(transfer_matrix_not_i_j, mode="full") + + if np.isreal(poles[i]): + assert np.isreal(poles[j]), "mixing real and complex " + \ + "in YT_real" + str(poles) + _YT_real(ker_pole, Q, transfer_matrix, i, j) + else: + assert ~np.isreal(poles[i]), "mixing real and complex " + \ + "in YT_real" + str(poles) + _YT_complex(ker_pole, Q, transfer_matrix, i, j) + + det_transfer_matrix = np.max((np.sqrt(np.spacing(1)), + np.abs(np.linalg.det(transfer_matrix)))) + cur_rtol = np.abs( + (det_transfer_matrix - + det_transfer_matrixb) / + det_transfer_matrix) + if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)): + # Convergence test from YT page 21 + stop = True + nb_try += 1 + return stop, cur_rtol, nb_try + + +def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): + """ + Loop over all poles one by one and apply KNV method 0 algorithm + """ + # This method is useful only because we need to be able to call + # _KNV0 from YT without looping over all poles, otherwise it would + # have been fine to mix _KNV0_loop and _KNV0 in a single function + stop = False + nb_try = 0 + while nb_try < maxiter and not stop: + det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix)) + for j in range(B.shape[0]): + _KNV0(B, ker_pole, transfer_matrix, j, poles) + + det_transfer_matrix = np.max((np.sqrt(np.spacing(1)), + np.abs(np.linalg.det(transfer_matrix)))) + cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) / + det_transfer_matrix) + if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)): + # Convergence test from YT page 21 + stop = True + + nb_try += 1 + return stop, cur_rtol, nb_try + + +def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30): + """ + Compute K such that eigenvalues (A - dot(B, K))=poles. + + K is the gain matrix such as the plant described by the linear system + ``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``, + as close as possible to those asked for in poles. + + SISO, MISO and MIMO systems are supported. + + Parameters + ---------- + A, B : ndarray + State-space representation of linear system ``AX + BU``. + poles : array_like + Desired real poles and/or complex conjugates poles. + Complex poles are only supported with ``method="YT"`` (default). + method: {'YT', 'KNV0'}, optional + Which method to choose to find the gain matrix K. One of: + + - 'YT': Yang Tits + - 'KNV0': Kautsky, Nichols, Van Dooren update method 0 + + See References and Notes for details on the algorithms. + rtol: float, optional + After each iteration the determinant of the eigenvectors of + ``A - B*K`` is compared to its previous value, when the relative + error between these two values becomes lower than `rtol` the algorithm + stops. Default is 1e-3. + maxiter: int, optional + Maximum number of iterations to compute the gain matrix. + Default is 30. + + Returns + ------- + full_state_feedback : Bunch object + full_state_feedback is composed of: + gain_matrix : 1-D ndarray + The closed loop matrix K such as the eigenvalues of ``A-BK`` + are as close as possible to the requested poles. + computed_poles : 1-D ndarray + The poles corresponding to ``A-BK`` sorted as first the real + poles in increasing order, then the complex conjugates in + lexicographic order. + requested_poles : 1-D ndarray + The poles the algorithm was asked to place sorted as above, + they may differ from what was achieved. + X : 2-D ndarray + The transfer matrix such as ``X * diag(poles) = (A - B*K)*X`` + (see Notes) + rtol : float + The relative tolerance achieved on ``det(X)`` (see Notes). + `rtol` will be NaN if it is possible to solve the system + ``diag(poles) = (A - B*K)``, or 0 when the optimization + algorithms can't do anything i.e when ``B.shape[1] == 1``. + nb_iter : int + The number of iterations performed before converging. + `nb_iter` will be NaN if it is possible to solve the system + ``diag(poles) = (A - B*K)``, or 0 when the optimization + algorithms can't do anything i.e when ``B.shape[1] == 1``. + + Notes + ----- + The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et + al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer + matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses + rank-2 updates. This yields on average more robust solutions (see [2]_ + pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV + does not in its original version. Only update method 0 proposed by KNV has + been implemented here, hence the name ``'KNV0'``. + + KNV extended to complex poles is used in Matlab's ``place`` function, YT is + distributed under a non-free licence by Slicot under the name ``robpole``. + It is unclear and undocumented how KNV0 has been extended to complex poles + (Tits and Yang claim on page 14 of their paper that their method can not be + used to extend KNV to complex poles), therefore only YT supports them in + this implementation. + + As the solution to the problem of pole placement is not unique for MIMO + systems, both methods start with a tentative transfer matrix which is + altered in various way to increase its determinant. Both methods have been + proven to converge to a stable solution, however depending on the way the + initial transfer matrix is chosen they will converge to different + solutions and therefore there is absolutely no guarantee that using + ``'KNV0'`` will yield results similar to Matlab's or any other + implementation of these algorithms. + + Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'`` + is only provided because it is needed by ``'YT'`` in some specific cases. + Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'`` + when ``abs(det(X))`` is used as a robustness indicator. + + [2]_ is available as a technical report on the following URL: + https://hdl.handle.net/1903/5598 + + References + ---------- + .. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment + in linear state feedback", International Journal of Control, Vol. 41 + pp. 1129-1155, 1985. + .. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust + pole assignment by state feedback", IEEE Transactions on Automatic + Control, Vol. 41, pp. 1432-1452, 1996. + + Examples + -------- + A simple example demonstrating real pole placement using both KNV and YT + algorithms. This is example number 1 from section 4 of the reference KNV + publication ([1]_): + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ], + ... [-0.5814, -4.290, 0, 0.6750 ], + ... [ 1.067, 4.273, -6.654, 5.893 ], + ... [ 0.0480, 4.273, 1.343, -2.104 ]]) + >>> B = np.array([[ 0, 5.679 ], + ... [ 1.136, 1.136 ], + ... [ 0, 0, ], + ... [-3.146, 0 ]]) + >>> P = np.array([-0.2, -0.5, -5.0566, -8.6659]) + + Now compute K with KNV method 0, with the default YT method and with the YT + method while forcing 100 iterations of the algorithm and print some results + after each call. + + >>> fsf1 = signal.place_poles(A, B, P, method='KNV0') + >>> fsf1.gain_matrix + array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785], + [ 0.50587268, 0.57779091, 0.51795763, -0.41991442]]) + + >>> fsf2 = signal.place_poles(A, B, P) # uses YT method + >>> fsf2.computed_poles + array([-8.6659, -5.0566, -0.5 , -0.2 ]) + + >>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100) + >>> fsf3.X + array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j], + [-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j], + [-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j], + [ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]]) + + The absolute value of the determinant of X is a good indicator to check the + robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing + it. Below a comparison of the robustness of the results above: + + >>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X)) + True + >>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X)) + True + + Now a simple example for complex poles: + + >>> A = np.array([[ 0, 7/3., 0, 0 ], + ... [ 0, 0, 0, 7/9. ], + ... [ 0, 0, 0, 0 ], + ... [ 0, 0, 0, 0 ]]) + >>> B = np.array([[ 0, 0 ], + ... [ 0, 0 ], + ... [ 1, 0 ], + ... [ 0, 1 ]]) + >>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3. + >>> fsf = signal.place_poles(A, B, P, method='YT') + + We can plot the desired and computed poles in the complex plane: + + >>> t = np.linspace(0, 2*np.pi, 401) + >>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle + >>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag, + ... 'wo', label='Desired') + >>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx', + ... label='Placed') + >>> plt.grid() + >>> plt.axis('image') + >>> plt.axis([-1.1, 1.1, -1.1, 1.1]) + >>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1) + + """ + # Move away all the inputs checking, it only adds noise to the code + update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter) + + # The current value of the relative tolerance we achieved + cur_rtol = 0 + # The number of iterations needed before converging + nb_iter = 0 + + # Step A: QR decomposition of B page 1132 KN + # to debug with numpy qr uncomment the line below + # u, z = np.linalg.qr(B, mode="complete") + u, z = s_qr(B, mode="full") + rankB = np.linalg.matrix_rank(B) + u0 = u[:, :rankB] + u1 = u[:, rankB:] + z = z[:rankB, :] + + # If we can use the identity matrix as X the solution is obvious + if B.shape[0] == rankB: + # if B is square and full rank there is only one solution + # such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0]) + # i.e K=inv(B)*(diag(P)-A) + # if B has as many lines as its rank (but not square) there are many + # solutions and we can choose one using least squares + # => use lstsq in both cases. + # In both cases the transfer matrix X will be eye(A.shape[0]) and I + # can hardly think of a better one so there is nothing to optimize + # + # for complex poles we use the following trick + # + # |a -b| has for eigenvalues a+b and a-b + # |b a| + # + # |a+bi 0| has the obvious eigenvalues a+bi and a-bi + # |0 a-bi| + # + # e.g solving the first one in R gives the solution + # for the second one in C + diag_poles = np.zeros(A.shape) + idx = 0 + while idx < poles.shape[0]: + p = poles[idx] + diag_poles[idx, idx] = np.real(p) + if ~np.isreal(p): + diag_poles[idx, idx+1] = -np.imag(p) + diag_poles[idx+1, idx+1] = np.real(p) + diag_poles[idx+1, idx] = np.imag(p) + idx += 1 # skip next one + idx += 1 + gain_matrix = np.linalg.lstsq(B, diag_poles-A, rcond=-1)[0] + transfer_matrix = np.eye(A.shape[0]) + cur_rtol = np.nan + nb_iter = np.nan + else: + # step A (p1144 KNV) and beginning of step F: decompose + # dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors + # in the same loop + ker_pole = [] + + # flag to skip the conjugate of a complex pole + skip_conjugate = False + # select orthonormal base ker_pole for each Pole and vectors for + # transfer_matrix + for j in range(B.shape[0]): + if skip_conjugate: + skip_conjugate = False + continue + pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T + + # after QR Q=Q0|Q1 + # only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix. + # Q1 is orthogonal to Q0 and will be multiplied by the zeros in + # R when using mode "complete". In default mode Q1 and the zeros + # in R are not computed + + # To debug with numpy qr uncomment the line below + # Q, _ = np.linalg.qr(pole_space_j, mode="complete") + Q, _ = s_qr(pole_space_j, mode="full") + + ker_pole_j = Q[:, pole_space_j.shape[1]:] + + # We want to select one vector in ker_pole_j to build the transfer + # matrix, however qr returns sometimes vectors with zeros on the + # same line for each pole and this yields very long convergence + # times. + # Or some other times a set of vectors, one with zero imaginary + # part and one (or several) with imaginary parts. After trying + # many ways to select the best possible one (eg ditch vectors + # with zero imaginary part for complex poles) I ended up summing + # all vectors in ker_pole_j, this solves 100% of the problems and + # is a valid choice for transfer_matrix. + # This way for complex poles we are sure to have a non zero + # imaginary part that way, and the problem of lines full of zeros + # in transfer_matrix is solved too as when a vector from + # ker_pole_j has a zero the other one(s) when + # ker_pole_j.shape[1]>1) for sure won't have a zero there. + + transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis] + transfer_matrix_j = (transfer_matrix_j / + np.linalg.norm(transfer_matrix_j)) + if ~np.isreal(poles[j]): # complex pole + transfer_matrix_j = np.hstack([np.real(transfer_matrix_j), + np.imag(transfer_matrix_j)]) + ker_pole.extend([ker_pole_j, ker_pole_j]) + + # Skip next pole as it is the conjugate + skip_conjugate = True + else: # real pole, nothing to do + ker_pole.append(ker_pole_j) + + if j == 0: + transfer_matrix = transfer_matrix_j + else: + transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j)) + + if rankB > 1: # otherwise there is nothing we can optimize + stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix, + poles, B, maxiter, rtol) + if not stop and rtol > 0: + # if rtol<=0 the user has probably done that on purpose, + # don't annoy them + err_msg = ( + "Convergence was not reached after maxiter iterations.\n" + f"You asked for a tolerance of {rtol}, we got {cur_rtol}." + ) + warnings.warn(err_msg, stacklevel=2) + + # reconstruct transfer_matrix to match complex conjugate pairs, + # ie transfer_matrix_j/transfer_matrix_j+1 are + # Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after + transfer_matrix = transfer_matrix.astype(complex) + idx = 0 + while idx < poles.shape[0]-1: + if ~np.isreal(poles[idx]): + rel = transfer_matrix[:, idx].copy() + img = transfer_matrix[:, idx+1] + # rel will be an array referencing a column of transfer_matrix + # if we don't copy() it will changer after the next line and + # and the line after will not yield the correct value + transfer_matrix[:, idx] = rel-1j*img + transfer_matrix[:, idx+1] = rel+1j*img + idx += 1 # skip next one + idx += 1 + + try: + m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles), + transfer_matrix.T)).T + gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A)) + except np.linalg.LinAlgError as e: + raise ValueError("The poles you've chosen can't be placed. " + "Check the controllability matrix and try " + "another set of poles") from e + + # Beware: Kautsky solves A+BK but the usual form is A-BK + gain_matrix = -gain_matrix + # K still contains complex with ~=0j imaginary parts, get rid of them + gain_matrix = np.real(gain_matrix) + + full_state_feedback = Bunch() + full_state_feedback.gain_matrix = gain_matrix + full_state_feedback.computed_poles = _order_complex_poles( + np.linalg.eig(A - np.dot(B, gain_matrix))[0] + ) + full_state_feedback.requested_poles = poles + full_state_feedback.X = transfer_matrix + full_state_feedback.rtol = cur_rtol + full_state_feedback.nb_iter = nb_iter + + return full_state_feedback + + +def dlsim(system, u, t=None, x0=None): + """ + Simulate output of a discrete-time linear system. + + Parameters + ---------- + system : tuple of array_like or instance of `dlti` + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + u : array_like + An input array describing the input at each time `t` (interpolation is + assumed between given times). If there are multiple inputs, then each + column of the rank-2 array represents an input. + t : array_like, optional + The time steps at which the input is defined. If `t` is given, it + must be the same length as `u`, and the final value in `t` determines + the number of steps returned in the output. + x0 : array_like, optional + The initial conditions on the state vector (zero by default). + + Returns + ------- + tout : ndarray + Time values for the output, as a 1-D array. + yout : ndarray + System response, as a 1-D array. + xout : ndarray, optional + Time-evolution of the state-vector. Only generated if the input is a + `StateSpace` system. + + See Also + -------- + lsim, dstep, dimpulse, cont2discrete + + Examples + -------- + A simple integrator transfer function with a discrete time step of 1.0 + could be implemented as: + + >>> import numpy as np + >>> from scipy import signal + >>> tf = ([1.0,], [1.0, -1.0], 1.0) + >>> t_in = [0.0, 1.0, 2.0, 3.0] + >>> u = np.asarray([0.0, 0.0, 1.0, 1.0]) + >>> t_out, y = signal.dlsim(tf, u, t=t_in) + >>> y.T + array([[ 0., 0., 0., 1.]]) + + """ + # Convert system to dlti-StateSpace + if isinstance(system, lti): + raise AttributeError('dlsim can only be used with discrete-time dlti ' + 'systems.') + elif not isinstance(system, dlti): + system = dlti(*system[:-1], dt=system[-1]) + + # Condition needed to ensure output remains compatible + is_ss_input = isinstance(system, StateSpace) + system = system._as_ss() + + u = np.atleast_1d(u) + + if u.ndim == 1: + u = np.atleast_2d(u).T + + if t is None: + out_samples = len(u) + stoptime = (out_samples - 1) * system.dt + else: + stoptime = t[-1] + out_samples = int(np.floor(stoptime / system.dt)) + 1 + + # Pre-build output arrays + xout = np.zeros((out_samples, system.A.shape[0])) + yout = np.zeros((out_samples, system.C.shape[0])) + tout = np.linspace(0.0, stoptime, num=out_samples) + + # Check initial condition + if x0 is None: + xout[0, :] = np.zeros((system.A.shape[1],)) + else: + xout[0, :] = np.asarray(x0) + + # Pre-interpolate inputs into the desired time steps + if t is None: + u_dt = u + else: + if len(u.shape) == 1: + u = u[:, np.newaxis] + + u_dt = make_interp_spline(t, u, k=1)(tout) + + # Simulate the system + for i in range(0, out_samples - 1): + xout[i+1, :] = (np.dot(system.A, xout[i, :]) + + np.dot(system.B, u_dt[i, :])) + yout[i, :] = (np.dot(system.C, xout[i, :]) + + np.dot(system.D, u_dt[i, :])) + + # Last point + yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) + + np.dot(system.D, u_dt[out_samples-1, :])) + + if is_ss_input: + return tout, yout, xout + else: + return tout, yout + + +def dimpulse(system, x0=None, t=None, n=None): + """ + Impulse response of discrete-time system. + + Parameters + ---------- + system : tuple of array_like or instance of `dlti` + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + x0 : array_like, optional + Initial state-vector. Defaults to zero. + t : array_like, optional + Time points. Computed if not given. + n : int, optional + The number of time points to compute (if `t` is not given). + + Returns + ------- + tout : ndarray + Time values for the output, as a 1-D array. + yout : tuple of ndarray + Impulse response of system. Each element of the tuple represents + the output of the system based on an impulse in each input. + + See Also + -------- + impulse, dstep, dlsim, cont2discrete + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> butter = signal.dlti(*signal.butter(3, 0.5)) + >>> t, y = signal.dimpulse(butter, n=25) + >>> plt.step(t, np.squeeze(y)) + >>> plt.grid() + >>> plt.xlabel('n [samples]') + >>> plt.ylabel('Amplitude') + + """ + # Convert system to dlti-StateSpace + if isinstance(system, dlti): + system = system._as_ss() + elif isinstance(system, lti): + raise AttributeError('dimpulse can only be used with discrete-time ' + 'dlti systems.') + else: + system = dlti(*system[:-1], dt=system[-1])._as_ss() + + # Default to 100 samples if unspecified + if n is None: + n = 100 + + # If time is not specified, use the number of samples + # and system dt + if t is None: + t = np.linspace(0, n * system.dt, n, endpoint=False) + else: + t = np.asarray(t) + + # For each input, implement a step change + yout = None + for i in range(0, system.inputs): + u = np.zeros((t.shape[0], system.inputs)) + u[0, i] = 1.0 + + one_output = dlsim(system, u, t=t, x0=x0) + + if yout is None: + yout = (one_output[1],) + else: + yout = yout + (one_output[1],) + + tout = one_output[0] + + return tout, yout + + +def dstep(system, x0=None, t=None, n=None): + """ + Step response of discrete-time system. + + Parameters + ---------- + system : tuple of array_like + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + x0 : array_like, optional + Initial state-vector. Defaults to zero. + t : array_like, optional + Time points. Computed if not given. + n : int, optional + The number of time points to compute (if `t` is not given). + + Returns + ------- + tout : ndarray + Output time points, as a 1-D array. + yout : tuple of ndarray + Step response of system. Each element of the tuple represents + the output of the system based on a step response to each input. + + See Also + -------- + step, dimpulse, dlsim, cont2discrete + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> butter = signal.dlti(*signal.butter(3, 0.5)) + >>> t, y = signal.dstep(butter, n=25) + >>> plt.step(t, np.squeeze(y)) + >>> plt.grid() + >>> plt.xlabel('n [samples]') + >>> plt.ylabel('Amplitude') + """ + # Convert system to dlti-StateSpace + if isinstance(system, dlti): + system = system._as_ss() + elif isinstance(system, lti): + raise AttributeError('dstep can only be used with discrete-time dlti ' + 'systems.') + else: + system = dlti(*system[:-1], dt=system[-1])._as_ss() + + # Default to 100 samples if unspecified + if n is None: + n = 100 + + # If time is not specified, use the number of samples + # and system dt + if t is None: + t = np.linspace(0, n * system.dt, n, endpoint=False) + else: + t = np.asarray(t) + + # For each input, implement a step change + yout = None + for i in range(0, system.inputs): + u = np.zeros((t.shape[0], system.inputs)) + u[:, i] = np.ones((t.shape[0],)) + + one_output = dlsim(system, u, t=t, x0=x0) + + if yout is None: + yout = (one_output[1],) + else: + yout = yout + (one_output[1],) + + tout = one_output[0] + + return tout, yout + + +def dfreqresp(system, w=None, n=10000, whole=False): + r""" + Calculate the frequency response of a discrete-time system. + + Parameters + ---------- + system : an instance of the `dlti` class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `dlti`) + * 2 (numerator, denominator, dt) + * 3 (zeros, poles, gain, dt) + * 4 (A, B, C, D, dt) + + w : array_like, optional + Array of frequencies (in radians/sample). Magnitude and phase data is + calculated for every value in this array. If not given a reasonable + set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + whole : bool, optional + Normally, if 'w' is not given, frequencies are computed from 0 to the + Nyquist frequency, pi radians/sample (upper-half of unit-circle). If + `whole` is True, compute frequencies from 0 to 2*pi radians/sample. + + Returns + ------- + w : 1D ndarray + Frequency array [radians/sample] + H : 1D ndarray + Array of complex magnitude values + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). + + .. versionadded:: 0.18.0 + + Examples + -------- + Generating the Nyquist plot of a transfer function + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Construct the transfer function + :math:`H(z) = \frac{1}{z^2 + 2z + 3}` with a sampling time of 0.05 + seconds: + + >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05) + + >>> w, H = signal.dfreqresp(sys) + + >>> plt.figure() + >>> plt.plot(H.real, H.imag, "b") + >>> plt.plot(H.real, -H.imag, "r") + >>> plt.show() + + """ + if not isinstance(system, dlti): + if isinstance(system, lti): + raise AttributeError('dfreqresp can only be used with ' + 'discrete-time systems.') + + system = dlti(*system[:-1], dt=system[-1]) + + if isinstance(system, StateSpace): + # No SS->ZPK code exists right now, just SS->TF->ZPK + system = system._as_tf() + + if not isinstance(system, (TransferFunction, ZerosPolesGain)): + raise ValueError('Unknown system type') + + if system.inputs != 1 or system.outputs != 1: + raise ValueError("dfreqresp requires a SISO (single input, single " + "output) system.") + + if w is not None: + worN = w + else: + worN = n + + if isinstance(system, TransferFunction): + # Convert numerator and denominator from polynomials in the variable + # 'z' to polynomials in the variable 'z^-1', as freqz expects. + num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den) + w, h = freqz(num, den, worN=worN, whole=whole) + + elif isinstance(system, ZerosPolesGain): + w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN, + whole=whole) + + return w, h + + +def dbode(system, w=None, n=100): + r""" + Calculate Bode magnitude and phase data of a discrete-time system. + + Parameters + ---------- + system : + An instance of the LTI class `dlti` or a tuple describing the system. + The number of elements in the tuple determine the interpretation, i.e.: + + 1. ``(sys_dlti)``: Instance of LTI class `dlti`. Note that derived instances, + such as instances of `TransferFunction`, `ZerosPolesGain`, or `StateSpace`, + are allowed as well. + 2. ``(num, den, dt)``: Rational polynomial as described in `TransferFunction`. + The coefficients of the polynomials should be specified in descending + exponent order, e.g., z² + 3z + 5 would be represented as ``[1, 3, 5]``. + 3. ``(zeros, poles, gain, dt)``: Zeros, poles, gain form as described + in `ZerosPolesGain`. + 4. ``(A, B, C, D, dt)``: State-space form as described in `StateSpace`. + + w : array_like, optional + Array of frequencies normalized to the Nyquist frequency being π, i.e., + having unit radiant / sample. Magnitude and phase data is calculated for every + value in this array. If not given, a reasonable set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Array of frequencies normalized to the Nyquist frequency being ``np.pi/dt`` + with ``dt`` being the sampling interval of the `system` parameter. + The unit is rad/s assuming ``dt`` is in seconds. + mag : 1D ndarray + Magnitude array in dB + phase : 1D ndarray + Phase array in degrees + + Notes + ----- + This function is a convenience wrapper around `dfreqresp` for extracting + magnitude and phase from the calculated complex-valued amplitude of the + frequency response. + + .. versionadded:: 0.18.0 + + See Also + -------- + dfreqresp, dlti, TransferFunction, ZerosPolesGain, StateSpace + + + Examples + -------- + The following example shows how to create a Bode plot of a 5-th order + Butterworth lowpass filter with a corner frequency of 100 Hz: + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy import signal + ... + >>> T = 1e-4 # sampling interval in s + >>> f_c, o = 1e2, 5 # corner frequency in Hz (i.e., -3 dB value) and filter order + >>> bb, aa = signal.butter(o, f_c, 'lowpass', fs=1/T) + ... + >>> w, mag, phase = signal.dbode((bb, aa, T)) + >>> w /= 2*np.pi # convert unit of frequency into Hertz + ... + >>> fg, (ax0, ax1) = plt.subplots(2, 1, sharex='all', figsize=(5, 4), + ... tight_layout=True) + >>> ax0.set_title("Bode Plot of Butterworth Lowpass Filter " + + ... rf"($f_c={f_c:g}\,$Hz, order={o})") + >>> ax0.set_ylabel(r"Magnitude in dB") + >>> ax1.set(ylabel=r"Phase in Degrees", + ... xlabel="Frequency $f$ in Hertz", xlim=(w[1], w[-1])) + >>> ax0.semilogx(w, mag, 'C0-', label=r"$20\,\log_{10}|G(f)|$") # Magnitude plot + >>> ax1.semilogx(w, phase, 'C1-', label=r"$\angle G(f)$") # Phase plot + ... + >>> for ax_ in (ax0, ax1): + ... ax_.axvline(f_c, color='m', alpha=0.25, label=rf"${f_c=:g}\,$Hz") + ... ax_.grid(which='both', axis='x') # plot major & minor vertical grid lines + ... ax_.grid(which='major', axis='y') + ... ax_.legend() + >>> plt.show() + """ + w, y = dfreqresp(system, w=w, n=n) + + if isinstance(system, dlti): + dt = system.dt + else: + dt = system[-1] + + mag = 20.0 * np.log10(abs(y)) + phase = np.rad2deg(np.unwrap(np.angle(y))) + + return w / dt, mag, phase diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_max_len_seq.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_max_len_seq.py new file mode 100644 index 0000000000000000000000000000000000000000..4d64beaca86d1da50b563668679b6fc52c954ab0 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_max_len_seq.py @@ -0,0 +1,139 @@ +# Author: Eric Larson +# 2014 + +"""Tools for MLS generation""" + +import numpy as np + +from ._max_len_seq_inner import _max_len_seq_inner + +__all__ = ['max_len_seq'] + + +# These are definitions of linear shift register taps for use in max_len_seq() +_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1], + 9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8], + 14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14], + 18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21], + 23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20], + 27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7], + 31: [28], 32: [31, 30, 10]} + +def max_len_seq(nbits, state=None, length=None, taps=None): + """ + Maximum length sequence (MLS) generator. + + Parameters + ---------- + nbits : int + Number of bits to use. Length of the resulting sequence will + be ``(2**nbits) - 1``. Note that generating long sequences + (e.g., greater than ``nbits == 16``) can take a long time. + state : array_like, optional + If array, must be of length ``nbits``, and will be cast to binary + (bool) representation. If None, a seed of ones will be used, + producing a repeatable representation. If ``state`` is all + zeros, an error is raised as this is invalid. Default: None. + length : int, optional + Number of samples to compute. If None, the entire length + ``(2**nbits) - 1`` is computed. + taps : array_like, optional + Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence). + If None, taps will be automatically selected (for up to + ``nbits == 32``). + + Returns + ------- + seq : array + Resulting MLS sequence of 0's and 1's. + state : array + The final state of the shift register. + + Notes + ----- + The algorithm for MLS generation is generically described in: + + https://en.wikipedia.org/wiki/Maximum_length_sequence + + The default values for taps are specifically taken from the first + option listed for each value of ``nbits`` in: + + https://web.archive.org/web/20181001062252/http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm + + .. versionadded:: 0.15.0 + + Examples + -------- + MLS uses binary convention: + + >>> from scipy.signal import max_len_seq + >>> max_len_seq(4)[0] + array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8) + + MLS has a white spectrum (except for DC): + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from numpy.fft import fft, ifft, fftshift, fftfreq + >>> seq = max_len_seq(6)[0]*2-1 # +1 and -1 + >>> spec = fft(seq) + >>> N = len(seq) + >>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-') + >>> plt.margins(0.1, 0.1) + >>> plt.grid(True) + >>> plt.show() + + Circular autocorrelation of MLS is an impulse: + + >>> acorrcirc = ifft(spec * np.conj(spec)).real + >>> plt.figure() + >>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-') + >>> plt.margins(0.1, 0.1) + >>> plt.grid(True) + >>> plt.show() + + Linear autocorrelation of MLS is approximately an impulse: + + >>> acorr = np.correlate(seq, seq, 'full') + >>> plt.figure() + >>> plt.plot(np.arange(-N+1, N), acorr, '.-') + >>> plt.margins(0.1, 0.1) + >>> plt.grid(True) + >>> plt.show() + + """ + taps_dtype = np.int32 if np.intp().itemsize == 4 else np.int64 + if taps is None: + if nbits not in _mls_taps: + known_taps = np.array(list(_mls_taps.keys())) + raise ValueError(f'nbits must be between {known_taps.min()} and ' + f'{known_taps.max()} if taps is None') + taps = np.array(_mls_taps[nbits], taps_dtype) + else: + taps = np.unique(np.array(taps, taps_dtype))[::-1] + if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1: + raise ValueError('taps must be non-empty with values between ' + 'zero and nbits (inclusive)') + taps = np.array(taps) # needed for Cython and Pythran + n_max = (2**nbits) - 1 + if length is None: + length = n_max + else: + length = int(length) + if length < 0: + raise ValueError('length must be greater than or equal to 0') + # We use int8 instead of bool here because NumPy arrays of bools + # don't seem to work nicely with Cython + if state is None: + state = np.ones(nbits, dtype=np.int8, order='c') + else: + # makes a copy if need be, ensuring it's 0's and 1's + state = np.array(state, dtype=bool, order='c').astype(np.int8) + if state.ndim != 1 or state.size != nbits: + raise ValueError('state must be a 1-D array of size nbits') + if np.all(state == 0): + raise ValueError('state must not be all zeros') + + seq = np.empty(length, dtype=np.int8, order='c') + state = _max_len_seq_inner(taps, state, nbits, length, seq) + return seq, state diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_max_len_seq_inner.cpython-310-x86_64-linux-gnu.so b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_max_len_seq_inner.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4eb7c7b2a3b658919f14450c20cad1e612a327d7 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_max_len_seq_inner.cpython-310-x86_64-linux-gnu.so differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_peak_finding.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_peak_finding.py new file mode 100644 index 0000000000000000000000000000000000000000..ccbeca5b7a4839bbc28e9c1cfd1ebd1d028a82cf --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_peak_finding.py @@ -0,0 +1,1310 @@ +""" +Functions for identifying peaks in signals. +""" +import math +import numpy as np + +from scipy.signal._wavelets import _cwt, _ricker +from scipy.stats import scoreatpercentile + +from ._peak_finding_utils import ( + _local_maxima_1d, + _select_by_peak_distance, + _peak_prominences, + _peak_widths +) + + +__all__ = ['argrelmin', 'argrelmax', 'argrelextrema', 'peak_prominences', + 'peak_widths', 'find_peaks', 'find_peaks_cwt'] + + +def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'): + """ + Calculate the relative extrema of `data`. + + Relative extrema are calculated by finding locations where + ``comparator(data[n], data[n+1:n+order+1])`` is True. + + Parameters + ---------- + data : ndarray + Array in which to find the relative extrema. + comparator : callable + Function to use to compare two data points. + Should take two arrays as arguments. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n,n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. 'wrap' (wrap around) or + 'clip' (treat overflow as the same as the last (or first) element). + Default 'clip'. See numpy.take. + + Returns + ------- + extrema : ndarray + Boolean array of the same shape as `data` that is True at an extrema, + False otherwise. + + See also + -------- + argrelmax, argrelmin + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._peak_finding import _boolrelextrema + >>> testdata = np.array([1,2,3,2,1]) + >>> _boolrelextrema(testdata, np.greater, axis=0) + array([False, False, True, False, False], dtype=bool) + + """ + if (int(order) != order) or (order < 1): + raise ValueError('Order must be an int >= 1') + + datalen = data.shape[axis] + locs = np.arange(0, datalen) + + results = np.ones(data.shape, dtype=bool) + main = data.take(locs, axis=axis, mode=mode) + for shift in range(1, order + 1): + plus = data.take(locs + shift, axis=axis, mode=mode) + minus = data.take(locs - shift, axis=axis, mode=mode) + results &= comparator(main, plus) + results &= comparator(main, minus) + if ~results.any(): + return results + return results + + +def argrelmin(data, axis=0, order=1, mode='clip'): + """ + Calculate the relative minima of `data`. + + Parameters + ---------- + data : ndarray + Array in which to find the relative minima. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n, n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. + Available options are 'wrap' (wrap around) or 'clip' (treat overflow + as the same as the last (or first) element). + Default 'clip'. See numpy.take. + + Returns + ------- + extrema : tuple of ndarrays + Indices of the minima in arrays of integers. ``extrema[k]`` is + the array of indices of axis `k` of `data`. Note that the + return value is a tuple even when `data` is 1-D. + + See Also + -------- + argrelextrema, argrelmax, find_peaks + + Notes + ----- + This function uses `argrelextrema` with np.less as comparator. Therefore, it + requires a strict inequality on both sides of a value to consider it a + minimum. This means flat minima (more than one sample wide) are not detected. + In case of 1-D `data` `find_peaks` can be used to detect all + local minima, including flat ones, by calling it with negated `data`. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import argrelmin + >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) + >>> argrelmin(x) + (array([1, 5]),) + >>> y = np.array([[1, 2, 1, 2], + ... [2, 2, 0, 0], + ... [5, 3, 4, 4]]) + ... + >>> argrelmin(y, axis=1) + (array([0, 2]), array([2, 1])) + + """ + return argrelextrema(data, np.less, axis, order, mode) + + +def argrelmax(data, axis=0, order=1, mode='clip'): + """ + Calculate the relative maxima of `data`. + + Parameters + ---------- + data : ndarray + Array in which to find the relative maxima. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n, n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. + Available options are 'wrap' (wrap around) or 'clip' (treat overflow + as the same as the last (or first) element). + Default 'clip'. See `numpy.take`. + + Returns + ------- + extrema : tuple of ndarrays + Indices of the maxima in arrays of integers. ``extrema[k]`` is + the array of indices of axis `k` of `data`. Note that the + return value is a tuple even when `data` is 1-D. + + See Also + -------- + argrelextrema, argrelmin, find_peaks + + Notes + ----- + This function uses `argrelextrema` with np.greater as comparator. Therefore, + it requires a strict inequality on both sides of a value to consider it a + maximum. This means flat maxima (more than one sample wide) are not detected. + In case of 1-D `data` `find_peaks` can be used to detect all + local maxima, including flat ones. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import argrelmax + >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) + >>> argrelmax(x) + (array([3, 6]),) + >>> y = np.array([[1, 2, 1, 2], + ... [2, 2, 0, 0], + ... [5, 3, 4, 4]]) + ... + >>> argrelmax(y, axis=1) + (array([0]), array([1])) + """ + return argrelextrema(data, np.greater, axis, order, mode) + + +def argrelextrema(data, comparator, axis=0, order=1, mode='clip'): + """ + Calculate the relative extrema of `data`. + + Parameters + ---------- + data : ndarray + Array in which to find the relative extrema. + comparator : callable + Function to use to compare two data points. + Should take two arrays as arguments. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n, n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. 'wrap' (wrap around) or + 'clip' (treat overflow as the same as the last (or first) element). + Default is 'clip'. See `numpy.take`. + + Returns + ------- + extrema : tuple of ndarrays + Indices of the maxima in arrays of integers. ``extrema[k]`` is + the array of indices of axis `k` of `data`. Note that the + return value is a tuple even when `data` is 1-D. + + See Also + -------- + argrelmin, argrelmax + + Notes + ----- + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import argrelextrema + >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) + >>> argrelextrema(x, np.greater) + (array([3, 6]),) + >>> y = np.array([[1, 2, 1, 2], + ... [2, 2, 0, 0], + ... [5, 3, 4, 4]]) + ... + >>> argrelextrema(y, np.less, axis=1) + (array([0, 2]), array([2, 1])) + + """ + results = _boolrelextrema(data, comparator, + axis, order, mode) + return np.nonzero(results) + + +def _arg_x_as_expected(value): + """Ensure argument `x` is a 1-D C-contiguous array of dtype('float64'). + + Used in `find_peaks`, `peak_prominences` and `peak_widths` to make `x` + compatible with the signature of the wrapped Cython functions. + + Returns + ------- + value : ndarray + A 1-D C-contiguous array with dtype('float64'). + """ + value = np.asarray(value, order='C', dtype=np.float64) + if value.ndim != 1: + raise ValueError('`x` must be a 1-D array') + return value + + +def _arg_peaks_as_expected(value): + """Ensure argument `peaks` is a 1-D C-contiguous array of dtype('intp'). + + Used in `peak_prominences` and `peak_widths` to make `peaks` compatible + with the signature of the wrapped Cython functions. + + Returns + ------- + value : ndarray + A 1-D C-contiguous array with dtype('intp'). + """ + value = np.asarray(value) + if value.size == 0: + # Empty arrays default to np.float64 but are valid input + value = np.array([], dtype=np.intp) + try: + # Safely convert to C-contiguous array of type np.intp + value = value.astype(np.intp, order='C', casting='safe', + subok=False, copy=False) + except TypeError as e: + raise TypeError("cannot safely cast `peaks` to dtype('intp')") from e + if value.ndim != 1: + raise ValueError('`peaks` must be a 1-D array') + return value + + +def _arg_wlen_as_expected(value): + """Ensure argument `wlen` is of type `np.intp` and larger than 1. + + Used in `peak_prominences` and `peak_widths`. + + Returns + ------- + value : np.intp + The original `value` rounded up to an integer or -1 if `value` was + None. + """ + if value is None: + # _peak_prominences expects an intp; -1 signals that no value was + # supplied by the user + value = -1 + elif 1 < value: + # Round up to a positive integer + if isinstance(value, float): + value = math.ceil(value) + value = np.intp(value) + else: + raise ValueError(f'`wlen` must be larger than 1, was {value}') + return value + + +def peak_prominences(x, peaks, wlen=None): + """ + Calculate the prominence of each peak in a signal. + + The prominence of a peak measures how much a peak stands out from the + surrounding baseline of the signal and is defined as the vertical distance + between the peak and its lowest contour line. + + Parameters + ---------- + x : sequence + A signal with peaks. + peaks : sequence + Indices of peaks in `x`. + wlen : int, optional + A window length in samples that optionally limits the evaluated area for + each peak to a subset of `x`. The peak is always placed in the middle of + the window therefore the given length is rounded up to the next odd + integer. This parameter can speed up the calculation (see Notes). + + Returns + ------- + prominences : ndarray + The calculated prominences for each peak in `peaks`. + left_bases, right_bases : ndarray + The peaks' bases as indices in `x` to the left and right of each peak. + The higher base of each pair is a peak's lowest contour line. + + Raises + ------ + ValueError + If a value in `peaks` is an invalid index for `x`. + + Warns + ----- + PeakPropertyWarning + For indices in `peaks` that don't point to valid local maxima in `x`, + the returned prominence will be 0 and this warning is raised. This + also happens if `wlen` is smaller than the plateau size of a peak. + + Warnings + -------- + This function may return unexpected results for data containing NaNs. To + avoid this, NaNs should either be removed or replaced. + + See Also + -------- + find_peaks + Find peaks inside a signal based on peak properties. + peak_widths + Calculate the width of peaks. + + Notes + ----- + Strategy to compute a peak's prominence: + + 1. Extend a horizontal line from the current peak to the left and right + until the line either reaches the window border (see `wlen`) or + intersects the signal again at the slope of a higher peak. An + intersection with a peak of the same height is ignored. + 2. On each side find the minimal signal value within the interval defined + above. These points are the peak's bases. + 3. The higher one of the two bases marks the peak's lowest contour line. The + prominence can then be calculated as the vertical difference between the + peaks height itself and its lowest contour line. + + Searching for the peak's bases can be slow for large `x` with periodic + behavior because large chunks or even the full signal need to be evaluated + for the first algorithmic step. This evaluation area can be limited with the + parameter `wlen` which restricts the algorithm to a window around the + current peak and can shorten the calculation time if the window length is + short in relation to `x`. + However, this may stop the algorithm from finding the true global contour + line if the peak's true bases are outside this window. Instead, a higher + contour line is found within the restricted window leading to a smaller + calculated prominence. In practice, this is only relevant for the highest set + of peaks in `x`. This behavior may even be used intentionally to calculate + "local" prominences. + + .. versionadded:: 1.1.0 + + References + ---------- + .. [1] Wikipedia Article for Topographic Prominence: + https://en.wikipedia.org/wiki/Topographic_prominence + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import find_peaks, peak_prominences + >>> import matplotlib.pyplot as plt + + Create a test signal with two overlaid harmonics + + >>> x = np.linspace(0, 6 * np.pi, 1000) + >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x) + + Find all peaks and calculate prominences + + >>> peaks, _ = find_peaks(x) + >>> prominences = peak_prominences(x, peaks)[0] + >>> prominences + array([1.24159486, 0.47840168, 0.28470524, 3.10716793, 0.284603 , + 0.47822491, 2.48340261, 0.47822491]) + + Calculate the height of each peak's contour line and plot the results + + >>> contour_heights = x[peaks] - prominences + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.vlines(x=peaks, ymin=contour_heights, ymax=x[peaks]) + >>> plt.show() + + Let's evaluate a second example that demonstrates several edge cases for + one peak at index 5. + + >>> x = np.array([0, 1, 0, 3, 1, 3, 0, 4, 0]) + >>> peaks = np.array([5]) + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.show() + >>> peak_prominences(x, peaks) # -> (prominences, left_bases, right_bases) + (array([3.]), array([2]), array([6])) + + Note how the peak at index 3 of the same height is not considered as a + border while searching for the left base. Instead, two minima at 0 and 2 + are found in which case the one closer to the evaluated peak is always + chosen. On the right side, however, the base must be placed at 6 because the + higher peak represents the right border to the evaluated area. + + >>> peak_prominences(x, peaks, wlen=3.1) + (array([2.]), array([4]), array([6])) + + Here, we restricted the algorithm to a window from 3 to 7 (the length is 5 + samples because `wlen` was rounded up to the next odd integer). Thus, the + only two candidates in the evaluated area are the two neighboring samples + and a smaller prominence is calculated. + """ + x = _arg_x_as_expected(x) + peaks = _arg_peaks_as_expected(peaks) + wlen = _arg_wlen_as_expected(wlen) + return _peak_prominences(x, peaks, wlen) + + +def peak_widths(x, peaks, rel_height=0.5, prominence_data=None, wlen=None): + """ + Calculate the width of each peak in a signal. + + This function calculates the width of a peak in samples at a relative + distance to the peak's height and prominence. + + Parameters + ---------- + x : sequence + A signal with peaks. + peaks : sequence + Indices of peaks in `x`. + rel_height : float, optional + Chooses the relative height at which the peak width is measured as a + percentage of its prominence. 1.0 calculates the width of the peak at + its lowest contour line while 0.5 evaluates at half the prominence + height. Must be at least 0. See notes for further explanation. + prominence_data : tuple, optional + A tuple of three arrays matching the output of `peak_prominences` when + called with the same arguments `x` and `peaks`. This data are calculated + internally if not provided. + wlen : int, optional + A window length in samples passed to `peak_prominences` as an optional + argument for internal calculation of `prominence_data`. This argument + is ignored if `prominence_data` is given. + + Returns + ------- + widths : ndarray + The widths for each peak in samples. + width_heights : ndarray + The height of the contour lines at which the `widths` where evaluated. + left_ips, right_ips : ndarray + Interpolated positions of left and right intersection points of a + horizontal line at the respective evaluation height. + + Raises + ------ + ValueError + If `prominence_data` is supplied but doesn't satisfy the condition + ``0 <= left_base <= peak <= right_base < x.shape[0]`` for each peak, + has the wrong dtype, is not C-contiguous or does not have the same + shape. + + Warns + ----- + PeakPropertyWarning + Raised if any calculated width is 0. This may stem from the supplied + `prominence_data` or if `rel_height` is set to 0. + + Warnings + -------- + This function may return unexpected results for data containing NaNs. To + avoid this, NaNs should either be removed or replaced. + + See Also + -------- + find_peaks + Find peaks inside a signal based on peak properties. + peak_prominences + Calculate the prominence of peaks. + + Notes + ----- + The basic algorithm to calculate a peak's width is as follows: + + * Calculate the evaluation height :math:`h_{eval}` with the formula + :math:`h_{eval} = h_{Peak} - P \\cdot R`, where :math:`h_{Peak}` is the + height of the peak itself, :math:`P` is the peak's prominence and + :math:`R` a positive ratio specified with the argument `rel_height`. + * Draw a horizontal line at the evaluation height to both sides, starting at + the peak's current vertical position until the lines either intersect a + slope, the signal border or cross the vertical position of the peak's + base (see `peak_prominences` for an definition). For the first case, + intersection with the signal, the true intersection point is estimated + with linear interpolation. + * Calculate the width as the horizontal distance between the chosen + endpoints on both sides. As a consequence of this the maximal possible + width for each peak is the horizontal distance between its bases. + + As shown above to calculate a peak's width its prominence and bases must be + known. You can supply these yourself with the argument `prominence_data`. + Otherwise, they are internally calculated (see `peak_prominences`). + + .. versionadded:: 1.1.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import chirp, find_peaks, peak_widths + >>> import matplotlib.pyplot as plt + + Create a test signal with two overlaid harmonics + + >>> x = np.linspace(0, 6 * np.pi, 1000) + >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x) + + Find all peaks and calculate their widths at the relative height of 0.5 + (contour line at half the prominence height) and 1 (at the lowest contour + line at full prominence height). + + >>> peaks, _ = find_peaks(x) + >>> results_half = peak_widths(x, peaks, rel_height=0.5) + >>> results_half[0] # widths + array([ 64.25172825, 41.29465463, 35.46943289, 104.71586081, + 35.46729324, 41.30429622, 181.93835853, 45.37078546]) + >>> results_full = peak_widths(x, peaks, rel_height=1) + >>> results_full[0] # widths + array([181.9396084 , 72.99284945, 61.28657872, 373.84622694, + 61.78404617, 72.48822812, 253.09161876, 79.36860878]) + + Plot signal, peaks and contour lines at which the widths where calculated + + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.hlines(*results_half[1:], color="C2") + >>> plt.hlines(*results_full[1:], color="C3") + >>> plt.show() + """ + x = _arg_x_as_expected(x) + peaks = _arg_peaks_as_expected(peaks) + if prominence_data is None: + # Calculate prominence if not supplied and use wlen if supplied. + wlen = _arg_wlen_as_expected(wlen) + prominence_data = _peak_prominences(x, peaks, wlen) + return _peak_widths(x, peaks, rel_height, *prominence_data) + + +def _unpack_condition_args(interval, x, peaks): + """ + Parse condition arguments for `find_peaks`. + + Parameters + ---------- + interval : number or ndarray or sequence + Either a number or ndarray or a 2-element sequence of the former. The + first value is always interpreted as `imin` and the second, if supplied, + as `imax`. + x : ndarray + The signal with `peaks`. + peaks : ndarray + An array with indices used to reduce `imin` and / or `imax` if those are + arrays. + + Returns + ------- + imin, imax : number or ndarray or None + Minimal and maximal value in `argument`. + + Raises + ------ + ValueError : + If interval border is given as array and its size does not match the size + of `x`. + + Notes + ----- + + .. versionadded:: 1.1.0 + """ + try: + imin, imax = interval + except (TypeError, ValueError): + imin, imax = (interval, None) + + # Reduce arrays if arrays + if isinstance(imin, np.ndarray): + if imin.size != x.size: + raise ValueError('array size of lower interval border must match x') + imin = imin[peaks] + if isinstance(imax, np.ndarray): + if imax.size != x.size: + raise ValueError('array size of upper interval border must match x') + imax = imax[peaks] + + return imin, imax + + +def _select_by_property(peak_properties, pmin, pmax): + """ + Evaluate where the generic property of peaks confirms to an interval. + + Parameters + ---------- + peak_properties : ndarray + An array with properties for each peak. + pmin : None or number or ndarray + Lower interval boundary for `peak_properties`. ``None`` is interpreted as + an open border. + pmax : None or number or ndarray + Upper interval boundary for `peak_properties`. ``None`` is interpreted as + an open border. + + Returns + ------- + keep : bool + A boolean mask evaluating to true where `peak_properties` confirms to the + interval. + + See Also + -------- + find_peaks + + Notes + ----- + + .. versionadded:: 1.1.0 + """ + keep = np.ones(peak_properties.size, dtype=bool) + if pmin is not None: + keep &= (pmin <= peak_properties) + if pmax is not None: + keep &= (peak_properties <= pmax) + return keep + + +def _select_by_peak_threshold(x, peaks, tmin, tmax): + """ + Evaluate which peaks fulfill the threshold condition. + + Parameters + ---------- + x : ndarray + A 1-D array which is indexable by `peaks`. + peaks : ndarray + Indices of peaks in `x`. + tmin, tmax : scalar or ndarray or None + Minimal and / or maximal required thresholds. If supplied as ndarrays + their size must match `peaks`. ``None`` is interpreted as an open + border. + + Returns + ------- + keep : bool + A boolean mask evaluating to true where `peaks` fulfill the threshold + condition. + left_thresholds, right_thresholds : ndarray + Array matching `peak` containing the thresholds of each peak on + both sides. + + Notes + ----- + + .. versionadded:: 1.1.0 + """ + # Stack thresholds on both sides to make min / max operations easier: + # tmin is compared with the smaller, and tmax with the greater threshold to + # each peak's side + stacked_thresholds = np.vstack([x[peaks] - x[peaks - 1], + x[peaks] - x[peaks + 1]]) + keep = np.ones(peaks.size, dtype=bool) + if tmin is not None: + min_thresholds = np.min(stacked_thresholds, axis=0) + keep &= (tmin <= min_thresholds) + if tmax is not None: + max_thresholds = np.max(stacked_thresholds, axis=0) + keep &= (max_thresholds <= tmax) + + return keep, stacked_thresholds[0], stacked_thresholds[1] + + +def find_peaks(x, height=None, threshold=None, distance=None, + prominence=None, width=None, wlen=None, rel_height=0.5, + plateau_size=None): + """ + Find peaks inside a signal based on peak properties. + + This function takes a 1-D array and finds all local maxima by + simple comparison of neighboring values. Optionally, a subset of these + peaks can be selected by specifying conditions for a peak's properties. + + Parameters + ---------- + x : sequence + A signal with peaks. + height : number or ndarray or sequence, optional + Required height of peaks. Either a number, ``None``, an array matching + `x` or a 2-element sequence of the former. The first element is + always interpreted as the minimal and the second, if supplied, as the + maximal required height. + threshold : number or ndarray or sequence, optional + Required threshold of peaks, the vertical distance to its neighboring + samples. Either a number, ``None``, an array matching `x` or a + 2-element sequence of the former. The first element is always + interpreted as the minimal and the second, if supplied, as the maximal + required threshold. + distance : number, optional + Required minimal horizontal distance (>= 1) in samples between + neighbouring peaks. Smaller peaks are removed first until the condition + is fulfilled for all remaining peaks. + prominence : number or ndarray or sequence, optional + Required prominence of peaks. Either a number, ``None``, an array + matching `x` or a 2-element sequence of the former. The first + element is always interpreted as the minimal and the second, if + supplied, as the maximal required prominence. + width : number or ndarray or sequence, optional + Required width of peaks in samples. Either a number, ``None``, an array + matching `x` or a 2-element sequence of the former. The first + element is always interpreted as the minimal and the second, if + supplied, as the maximal required width. + wlen : int, optional + Used for calculation of the peaks prominences, thus it is only used if + one of the arguments `prominence` or `width` is given. See argument + `wlen` in `peak_prominences` for a full description of its effects. + rel_height : float, optional + Used for calculation of the peaks width, thus it is only used if `width` + is given. See argument `rel_height` in `peak_widths` for a full + description of its effects. + plateau_size : number or ndarray or sequence, optional + Required size of the flat top of peaks in samples. Either a number, + ``None``, an array matching `x` or a 2-element sequence of the former. + The first element is always interpreted as the minimal and the second, + if supplied as the maximal required plateau size. + + .. versionadded:: 1.2.0 + + Returns + ------- + peaks : ndarray + Indices of peaks in `x` that satisfy all given conditions. + properties : dict + A dictionary containing properties of the returned peaks which were + calculated as intermediate results during evaluation of the specified + conditions: + + * 'peak_heights' + If `height` is given, the height of each peak in `x`. + * 'left_thresholds', 'right_thresholds' + If `threshold` is given, these keys contain a peaks vertical + distance to its neighbouring samples. + * 'prominences', 'right_bases', 'left_bases' + If `prominence` is given, these keys are accessible. See + `peak_prominences` for a description of their content. + * 'widths', 'width_heights', 'left_ips', 'right_ips' + If `width` is given, these keys are accessible. See `peak_widths` + for a description of their content. + * 'plateau_sizes', left_edges', 'right_edges' + If `plateau_size` is given, these keys are accessible and contain + the indices of a peak's edges (edges are still part of the + plateau) and the calculated plateau sizes. + + .. versionadded:: 1.2.0 + + To calculate and return properties without excluding peaks, provide the + open interval ``(None, None)`` as a value to the appropriate argument + (excluding `distance`). + + Warns + ----- + PeakPropertyWarning + Raised if a peak's properties have unexpected values (see + `peak_prominences` and `peak_widths`). + + Warnings + -------- + This function may return unexpected results for data containing NaNs. To + avoid this, NaNs should either be removed or replaced. + + See Also + -------- + find_peaks_cwt + Find peaks using the wavelet transformation. + peak_prominences + Directly calculate the prominence of peaks. + peak_widths + Directly calculate the width of peaks. + + Notes + ----- + In the context of this function, a peak or local maximum is defined as any + sample whose two direct neighbours have a smaller amplitude. For flat peaks + (more than one sample of equal amplitude wide) the index of the middle + sample is returned (rounded down in case the number of samples is even). + For noisy signals the peak locations can be off because the noise might + change the position of local maxima. In those cases consider smoothing the + signal before searching for peaks or use other peak finding and fitting + methods (like `find_peaks_cwt`). + + Some additional comments on specifying conditions: + + * Almost all conditions (excluding `distance`) can be given as half-open or + closed intervals, e.g., ``1`` or ``(1, None)`` defines the half-open + interval :math:`[1, \\infty]` while ``(None, 1)`` defines the interval + :math:`[-\\infty, 1]`. The open interval ``(None, None)`` can be specified + as well, which returns the matching properties without exclusion of peaks. + * The border is always included in the interval used to select valid peaks. + * For several conditions the interval borders can be specified with + arrays matching `x` in shape which enables dynamic constrains based on + the sample position. + * The conditions are evaluated in the following order: `plateau_size`, + `height`, `threshold`, `distance`, `prominence`, `width`. In most cases + this order is the fastest one because faster operations are applied first + to reduce the number of peaks that need to be evaluated later. + * While indices in `peaks` are guaranteed to be at least `distance` samples + apart, edges of flat peaks may be closer than the allowed `distance`. + * Use `wlen` to reduce the time it takes to evaluate the conditions for + `prominence` or `width` if `x` is large or has many local maxima + (see `peak_prominences`). + + .. versionadded:: 1.1.0 + + Examples + -------- + To demonstrate this function's usage we use a signal `x` supplied with + SciPy (see `scipy.datasets.electrocardiogram`). Let's find all peaks (local + maxima) in `x` whose amplitude lies above 0. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.datasets import electrocardiogram + >>> from scipy.signal import find_peaks + >>> x = electrocardiogram()[2000:4000] + >>> peaks, _ = find_peaks(x, height=0) + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.plot(np.zeros_like(x), "--", color="gray") + >>> plt.show() + + We can select peaks below 0 with ``height=(None, 0)`` or use arrays matching + `x` in size to reflect a changing condition for different parts of the + signal. + + >>> border = np.sin(np.linspace(0, 3 * np.pi, x.size)) + >>> peaks, _ = find_peaks(x, height=(-border, border)) + >>> plt.plot(x) + >>> plt.plot(-border, "--", color="gray") + >>> plt.plot(border, ":", color="gray") + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.show() + + Another useful condition for periodic signals can be given with the + `distance` argument. In this case, we can easily select the positions of + QRS complexes within the electrocardiogram (ECG) by demanding a distance of + at least 150 samples. + + >>> peaks, _ = find_peaks(x, distance=150) + >>> np.diff(peaks) + array([186, 180, 177, 171, 177, 169, 167, 164, 158, 162, 172]) + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.show() + + Especially for noisy signals peaks can be easily grouped by their + prominence (see `peak_prominences`). E.g., we can select all peaks except + for the mentioned QRS complexes by limiting the allowed prominence to 0.6. + + >>> peaks, properties = find_peaks(x, prominence=(None, 0.6)) + >>> properties["prominences"].max() + 0.5049999999999999 + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.show() + + And, finally, let's examine a different section of the ECG which contains + beat forms of different shape. To select only the atypical heart beats, we + combine two conditions: a minimal prominence of 1 and width of at least 20 + samples. + + >>> x = electrocardiogram()[17000:18000] + >>> peaks, properties = find_peaks(x, prominence=1, width=20) + >>> properties["prominences"], properties["widths"] + (array([1.495, 2.3 ]), array([36.93773946, 39.32723577])) + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.vlines(x=peaks, ymin=x[peaks] - properties["prominences"], + ... ymax = x[peaks], color = "C1") + >>> plt.hlines(y=properties["width_heights"], xmin=properties["left_ips"], + ... xmax=properties["right_ips"], color = "C1") + >>> plt.show() + """ + # _argmaxima1d expects array of dtype 'float64' + x = _arg_x_as_expected(x) + if distance is not None and distance < 1: + raise ValueError('`distance` must be greater or equal to 1') + + peaks, left_edges, right_edges = _local_maxima_1d(x) + properties = {} + + if plateau_size is not None: + # Evaluate plateau size + plateau_sizes = right_edges - left_edges + 1 + pmin, pmax = _unpack_condition_args(plateau_size, x, peaks) + keep = _select_by_property(plateau_sizes, pmin, pmax) + peaks = peaks[keep] + properties["plateau_sizes"] = plateau_sizes + properties["left_edges"] = left_edges + properties["right_edges"] = right_edges + properties = {key: array[keep] for key, array in properties.items()} + + if height is not None: + # Evaluate height condition + peak_heights = x[peaks] + hmin, hmax = _unpack_condition_args(height, x, peaks) + keep = _select_by_property(peak_heights, hmin, hmax) + peaks = peaks[keep] + properties["peak_heights"] = peak_heights + properties = {key: array[keep] for key, array in properties.items()} + + if threshold is not None: + # Evaluate threshold condition + tmin, tmax = _unpack_condition_args(threshold, x, peaks) + keep, left_thresholds, right_thresholds = _select_by_peak_threshold( + x, peaks, tmin, tmax) + peaks = peaks[keep] + properties["left_thresholds"] = left_thresholds + properties["right_thresholds"] = right_thresholds + properties = {key: array[keep] for key, array in properties.items()} + + if distance is not None: + # Evaluate distance condition + keep = _select_by_peak_distance(peaks, x[peaks], distance) + peaks = peaks[keep] + properties = {key: array[keep] for key, array in properties.items()} + + if prominence is not None or width is not None: + # Calculate prominence (required for both conditions) + wlen = _arg_wlen_as_expected(wlen) + properties.update(zip( + ['prominences', 'left_bases', 'right_bases'], + _peak_prominences(x, peaks, wlen=wlen) + )) + + if prominence is not None: + # Evaluate prominence condition + pmin, pmax = _unpack_condition_args(prominence, x, peaks) + keep = _select_by_property(properties['prominences'], pmin, pmax) + peaks = peaks[keep] + properties = {key: array[keep] for key, array in properties.items()} + + if width is not None: + # Calculate widths + properties.update(zip( + ['widths', 'width_heights', 'left_ips', 'right_ips'], + _peak_widths(x, peaks, rel_height, properties['prominences'], + properties['left_bases'], properties['right_bases']) + )) + # Evaluate width condition + wmin, wmax = _unpack_condition_args(width, x, peaks) + keep = _select_by_property(properties['widths'], wmin, wmax) + peaks = peaks[keep] + properties = {key: array[keep] for key, array in properties.items()} + + return peaks, properties + + +def _identify_ridge_lines(matr, max_distances, gap_thresh): + """ + Identify ridges in the 2-D matrix. + + Expect that the width of the wavelet feature increases with increasing row + number. + + Parameters + ---------- + matr : 2-D ndarray + Matrix in which to identify ridge lines. + max_distances : 1-D sequence + At each row, a ridge line is only connected + if the relative max at row[n] is within + `max_distances`[n] from the relative max at row[n+1]. + gap_thresh : int + If a relative maximum is not found within `max_distances`, + there will be a gap. A ridge line is discontinued if + there are more than `gap_thresh` points without connecting + a new relative maximum. + + Returns + ------- + ridge_lines : tuple + Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the + ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none + found. Each ridge-line will be sorted by row (increasing), but the + order of the ridge lines is not specified. + + References + ---------- + .. [1] Bioinformatics (2006) 22 (17): 2059-2065. + :doi:`10.1093/bioinformatics/btl355` + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal._peak_finding import _identify_ridge_lines + >>> rng = np.random.default_rng() + >>> data = rng.random((5,5)) + >>> max_dist = 3 + >>> max_distances = np.full(20, max_dist) + >>> ridge_lines = _identify_ridge_lines(data, max_distances, 1) + + Notes + ----- + This function is intended to be used in conjunction with `cwt` + as part of `find_peaks_cwt`. + + """ + if len(max_distances) < matr.shape[0]: + raise ValueError('Max_distances must have at least as many rows ' + 'as matr') + + all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1) + # Highest row for which there are any relative maxima + has_relmax = np.nonzero(all_max_cols.any(axis=1))[0] + if len(has_relmax) == 0: + return [] + start_row = has_relmax[-1] + # Each ridge line is a 3-tuple: + # rows, cols,Gap number + ridge_lines = [[[start_row], + [col], + 0] for col in np.nonzero(all_max_cols[start_row])[0]] + final_lines = [] + rows = np.arange(start_row - 1, -1, -1) + cols = np.arange(0, matr.shape[1]) + for row in rows: + this_max_cols = cols[all_max_cols[row]] + + # Increment gap number of each line, + # set it to zero later if appropriate + for line in ridge_lines: + line[2] += 1 + + # XXX These should always be all_max_cols[row] + # But the order might be different. Might be an efficiency gain + # to make sure the order is the same and avoid this iteration + prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines]) + # Look through every relative maximum found at current row + # Attempt to connect them with existing ridge lines. + for ind, col in enumerate(this_max_cols): + # If there is a previous ridge line within + # the max_distance to connect to, do so. + # Otherwise start a new one. + line = None + if len(prev_ridge_cols) > 0: + diffs = np.abs(col - prev_ridge_cols) + closest = np.argmin(diffs) + if diffs[closest] <= max_distances[row]: + line = ridge_lines[closest] + if line is not None: + # Found a point close enough, extend current ridge line + line[1].append(col) + line[0].append(row) + line[2] = 0 + else: + new_line = [[row], + [col], + 0] + ridge_lines.append(new_line) + + # Remove the ridge lines with gap_number too high + # XXX Modifying a list while iterating over it. + # Should be safe, since we iterate backwards, but + # still tacky. + for ind in range(len(ridge_lines) - 1, -1, -1): + line = ridge_lines[ind] + if line[2] > gap_thresh: + final_lines.append(line) + del ridge_lines[ind] + + out_lines = [] + for line in (final_lines + ridge_lines): + sortargs = np.array(np.argsort(line[0])) + rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs) + rows[sortargs] = line[0] + cols[sortargs] = line[1] + out_lines.append([rows, cols]) + + return out_lines + + +def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None, + min_snr=1, noise_perc=10): + """ + Filter ridge lines according to prescribed criteria. Intended + to be used for finding relative maxima. + + Parameters + ---------- + cwt : 2-D ndarray + Continuous wavelet transform from which the `ridge_lines` were defined. + ridge_lines : 1-D sequence + Each element should contain 2 sequences, the rows and columns + of the ridge line (respectively). + window_size : int, optional + Size of window to use to calculate noise floor. + Default is ``cwt.shape[1] / 20``. + min_length : int, optional + Minimum length a ridge line needs to be acceptable. + Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths. + min_snr : float, optional + Minimum SNR ratio. Default 1. The signal is the value of + the cwt matrix at the shortest length scale (``cwt[0, loc]``), the + noise is the `noise_perc`\\ th percentile of datapoints contained within a + window of `window_size` around ``cwt[0, loc]``. + noise_perc : float, optional + When calculating the noise floor, percentile of data points + examined below which to consider noise. Calculated using + scipy.stats.scoreatpercentile. + + References + ---------- + .. [1] Bioinformatics (2006) 22 (17): 2059-2065. + :doi:`10.1093/bioinformatics/btl355` + + """ + num_points = cwt.shape[1] + if min_length is None: + min_length = np.ceil(cwt.shape[0] / 4) + if window_size is None: + window_size = np.ceil(num_points / 20) + + window_size = int(window_size) + hf_window, odd = divmod(window_size, 2) + + # Filter based on SNR + row_one = cwt[0, :] + noises = np.empty_like(row_one) + for ind, val in enumerate(row_one): + window_start = max(ind - hf_window, 0) + window_end = min(ind + hf_window + odd, num_points) + noises[ind] = scoreatpercentile(row_one[window_start:window_end], + per=noise_perc) + + def filt_func(line): + if len(line[0]) < min_length: + return False + snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]]) + if snr < min_snr: + return False + return True + + return list(filter(filt_func, ridge_lines)) + + +def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None, + gap_thresh=None, min_length=None, + min_snr=1, noise_perc=10, window_size=None): + """ + Find peaks in a 1-D array with wavelet transformation. + + The general approach is to smooth `vector` by convolving it with + `wavelet(width)` for each width in `widths`. Relative maxima which + appear at enough length scales, and with sufficiently high SNR, are + accepted. + + Parameters + ---------- + vector : ndarray + 1-D array in which to find the peaks. + widths : float or sequence + Single width or 1-D array-like of widths to use for calculating + the CWT matrix. In general, + this range should cover the expected width of peaks of interest. + wavelet : callable, optional + Should take two parameters and return a 1-D array to convolve + with `vector`. The first parameter determines the number of points + of the returned wavelet array, the second parameter is the scale + (`width`) of the wavelet. Should be normalized and symmetric. + Default is the ricker wavelet. + max_distances : ndarray, optional + At each row, a ridge line is only connected if the relative max at + row[n] is within ``max_distances[n]`` from the relative max at + ``row[n+1]``. Default value is ``widths/4``. + gap_thresh : float, optional + If a relative maximum is not found within `max_distances`, + there will be a gap. A ridge line is discontinued if there are more + than `gap_thresh` points without connecting a new relative maximum. + Default is the first value of the widths array i.e. widths[0]. + min_length : int, optional + Minimum length a ridge line needs to be acceptable. + Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths. + min_snr : float, optional + Minimum SNR ratio. Default 1. The signal is the maximum CWT coefficient + on the largest ridge line. The noise is `noise_perc` th percentile of + datapoints contained within the same ridge line. + noise_perc : float, optional + When calculating the noise floor, percentile of data points + examined below which to consider noise. Calculated using + `stats.scoreatpercentile`. Default is 10. + window_size : int, optional + Size of window to use to calculate noise floor. + Default is ``cwt.shape[1] / 20``. + + Returns + ------- + peaks_indices : ndarray + Indices of the locations in the `vector` where peaks were found. + The list is sorted. + + See Also + -------- + find_peaks + Find peaks inside a signal based on peak properties. + + Notes + ----- + This approach was designed for finding sharp peaks among noisy data, + however with proper parameter selection it should function well for + different peak shapes. + + The algorithm is as follows: + 1. Perform a continuous wavelet transform on `vector`, for the supplied + `widths`. This is a convolution of `vector` with `wavelet(width)` for + each width in `widths`. See `cwt`. + 2. Identify "ridge lines" in the cwt matrix. These are relative maxima + at each row, connected across adjacent rows. See identify_ridge_lines + 3. Filter the ridge_lines using filter_ridge_lines. + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] Bioinformatics (2006) 22 (17): 2059-2065. + :doi:`10.1093/bioinformatics/btl355` + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> xs = np.arange(0, np.pi, 0.05) + >>> data = np.sin(xs) + >>> peakind = signal.find_peaks_cwt(data, np.arange(1,10)) + >>> peakind, xs[peakind], data[peakind] + ([32], array([ 1.6]), array([ 0.9995736])) + + """ + widths = np.atleast_1d(np.asarray(widths)) + + if gap_thresh is None: + gap_thresh = np.ceil(widths[0]) + if max_distances is None: + max_distances = widths / 4.0 + if wavelet is None: + wavelet = _ricker + + cwt_dat = _cwt(vector, wavelet, widths) + ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh) + filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length, + window_size=window_size, min_snr=min_snr, + noise_perc=noise_perc) + max_locs = np.asarray([x[1][0] for x in filtered]) + max_locs.sort() + + return max_locs diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_savitzky_golay.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_savitzky_golay.py new file mode 100644 index 0000000000000000000000000000000000000000..addcbe6951f8df093461c47848f8027dfdd406f2 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_savitzky_golay.py @@ -0,0 +1,357 @@ +import numpy as np +from scipy.linalg import lstsq +from scipy._lib._util import float_factorial +from scipy.ndimage import convolve1d # type: ignore[attr-defined] +from ._arraytools import axis_slice + + +def savgol_coeffs(window_length, polyorder, deriv=0, delta=1.0, pos=None, + use="conv"): + """Compute the coefficients for a 1-D Savitzky-Golay FIR filter. + + Parameters + ---------- + window_length : int + The length of the filter window (i.e., the number of coefficients). + polyorder : int + The order of the polynomial used to fit the samples. + `polyorder` must be less than `window_length`. + deriv : int, optional + The order of the derivative to compute. This must be a + nonnegative integer. The default is 0, which means to filter + the data without differentiating. + delta : float, optional + The spacing of the samples to which the filter will be applied. + This is only used if deriv > 0. + pos : int or None, optional + If pos is not None, it specifies evaluation position within the + window. The default is the middle of the window. + use : str, optional + Either 'conv' or 'dot'. This argument chooses the order of the + coefficients. The default is 'conv', which means that the + coefficients are ordered to be used in a convolution. With + use='dot', the order is reversed, so the filter is applied by + dotting the coefficients with the data set. + + Returns + ------- + coeffs : 1-D ndarray + The filter coefficients. + + See Also + -------- + savgol_filter + + Notes + ----- + .. versionadded:: 0.14.0 + + References + ---------- + A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by + Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8), + pp 1627-1639. + Jianwen Luo, Kui Ying, and Jing Bai. 2005. Savitzky-Golay smoothing and + differentiation filter for even number data. Signal Process. + 85, 7 (July 2005), 1429-1434. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import savgol_coeffs + >>> savgol_coeffs(5, 2) + array([-0.08571429, 0.34285714, 0.48571429, 0.34285714, -0.08571429]) + >>> savgol_coeffs(5, 2, deriv=1) + array([ 2.00000000e-01, 1.00000000e-01, 2.07548111e-16, -1.00000000e-01, + -2.00000000e-01]) + + Note that use='dot' simply reverses the coefficients. + + >>> savgol_coeffs(5, 2, pos=3) + array([ 0.25714286, 0.37142857, 0.34285714, 0.17142857, -0.14285714]) + >>> savgol_coeffs(5, 2, pos=3, use='dot') + array([-0.14285714, 0.17142857, 0.34285714, 0.37142857, 0.25714286]) + >>> savgol_coeffs(4, 2, pos=3, deriv=1, use='dot') + array([0.45, -0.85, -0.65, 1.05]) + + `x` contains data from the parabola x = t**2, sampled at + t = -1, 0, 1, 2, 3. `c` holds the coefficients that will compute the + derivative at the last position. When dotted with `x` the result should + be 6. + + >>> x = np.array([1, 0, 1, 4, 9]) + >>> c = savgol_coeffs(5, 2, pos=4, deriv=1, use='dot') + >>> c.dot(x) + 6.0 + """ + + # An alternative method for finding the coefficients when deriv=0 is + # t = np.arange(window_length) + # unit = (t == pos).astype(int) + # coeffs = np.polyval(np.polyfit(t, unit, polyorder), t) + # The method implemented here is faster. + + # To recreate the table of sample coefficients shown in the chapter on + # the Savitzy-Golay filter in the Numerical Recipes book, use + # window_length = nL + nR + 1 + # pos = nL + 1 + # c = savgol_coeffs(window_length, M, pos=pos, use='dot') + + if polyorder >= window_length: + raise ValueError("polyorder must be less than window_length.") + + halflen, rem = divmod(window_length, 2) + + if pos is None: + if rem == 0: + pos = halflen - 0.5 + else: + pos = halflen + + if not (0 <= pos < window_length): + raise ValueError("pos must be nonnegative and less than " + "window_length.") + + if use not in ['conv', 'dot']: + raise ValueError("`use` must be 'conv' or 'dot'") + + if deriv > polyorder: + coeffs = np.zeros(window_length) + return coeffs + + # Form the design matrix A. The columns of A are powers of the integers + # from -pos to window_length - pos - 1. The powers (i.e., rows) range + # from 0 to polyorder. (That is, A is a vandermonde matrix, but not + # necessarily square.) + x = np.arange(-pos, window_length - pos, dtype=float) + + if use == "conv": + # Reverse so that result can be used in a convolution. + x = x[::-1] + + order = np.arange(polyorder + 1).reshape(-1, 1) + A = x ** order + + # y determines which order derivative is returned. + y = np.zeros(polyorder + 1) + # The coefficient assigned to y[deriv] scales the result to take into + # account the order of the derivative and the sample spacing. + y[deriv] = float_factorial(deriv) / (delta ** deriv) + + # Find the least-squares solution of A*c = y + coeffs, _, _, _ = lstsq(A, y) + + return coeffs + + +def _polyder(p, m): + """Differentiate polynomials represented with coefficients. + + p must be a 1-D or 2-D array. In the 2-D case, each column gives + the coefficients of a polynomial; the first row holds the coefficients + associated with the highest power. m must be a nonnegative integer. + (numpy.polyder doesn't handle the 2-D case.) + """ + + if m == 0: + result = p + else: + n = len(p) + if n <= m: + result = np.zeros_like(p[:1, ...]) + else: + dp = p[:-m].copy() + for k in range(m): + rng = np.arange(n - k - 1, m - k - 1, -1) + dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1)) + result = dp + return result + + +def _fit_edge(x, window_start, window_stop, interp_start, interp_stop, + axis, polyorder, deriv, delta, y): + """ + Given an N-d array `x` and the specification of a slice of `x` from + `window_start` to `window_stop` along `axis`, create an interpolating + polynomial of each 1-D slice, and evaluate that polynomial in the slice + from `interp_start` to `interp_stop`. Put the result into the + corresponding slice of `y`. + """ + + # Get the edge into a (window_length, -1) array. + x_edge = axis_slice(x, start=window_start, stop=window_stop, axis=axis) + if axis == 0 or axis == -x.ndim: + xx_edge = x_edge + swapped = False + else: + xx_edge = x_edge.swapaxes(axis, 0) + swapped = True + xx_edge = xx_edge.reshape(xx_edge.shape[0], -1) + + # Fit the edges. poly_coeffs has shape (polyorder + 1, -1), + # where '-1' is the same as in xx_edge. + poly_coeffs = np.polyfit(np.arange(0, window_stop - window_start), + xx_edge, polyorder) + + if deriv > 0: + poly_coeffs = _polyder(poly_coeffs, deriv) + + # Compute the interpolated values for the edge. + i = np.arange(interp_start - window_start, interp_stop - window_start) + values = np.polyval(poly_coeffs, i.reshape(-1, 1)) / (delta ** deriv) + + # Now put the values into the appropriate slice of y. + # First reshape values to match y. + shp = list(y.shape) + shp[0], shp[axis] = shp[axis], shp[0] + values = values.reshape(interp_stop - interp_start, *shp[1:]) + if swapped: + values = values.swapaxes(0, axis) + # Get a view of the data to be replaced by values. + y_edge = axis_slice(y, start=interp_start, stop=interp_stop, axis=axis) + y_edge[...] = values + + +def _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y): + """ + Use polynomial interpolation of x at the low and high ends of the axis + to fill in the halflen values in y. + + This function just calls _fit_edge twice, once for each end of the axis. + """ + halflen = window_length // 2 + _fit_edge(x, 0, window_length, 0, halflen, axis, + polyorder, deriv, delta, y) + n = x.shape[axis] + _fit_edge(x, n - window_length, n, n - halflen, n, axis, + polyorder, deriv, delta, y) + + +def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0, + axis=-1, mode='interp', cval=0.0): + """ Apply a Savitzky-Golay filter to an array. + + This is a 1-D filter. If `x` has dimension greater than 1, `axis` + determines the axis along which the filter is applied. + + Parameters + ---------- + x : array_like + The data to be filtered. If `x` is not a single or double precision + floating point array, it will be converted to type ``numpy.float64`` + before filtering. + window_length : int + The length of the filter window (i.e., the number of coefficients). + If `mode` is 'interp', `window_length` must be less than or equal + to the size of `x`. + polyorder : int + The order of the polynomial used to fit the samples. + `polyorder` must be less than `window_length`. + deriv : int, optional + The order of the derivative to compute. This must be a + nonnegative integer. The default is 0, which means to filter + the data without differentiating. + delta : float, optional + The spacing of the samples to which the filter will be applied. + This is only used if deriv > 0. Default is 1.0. + axis : int, optional + The axis of the array `x` along which the filter is to be applied. + Default is -1. + mode : str, optional + Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'. This + determines the type of extension to use for the padded signal to + which the filter is applied. When `mode` is 'constant', the padding + value is given by `cval`. See the Notes for more details on 'mirror', + 'constant', 'wrap', and 'nearest'. + When the 'interp' mode is selected (the default), no extension + is used. Instead, a degree `polyorder` polynomial is fit to the + last `window_length` values of the edges, and this polynomial is + used to evaluate the last `window_length // 2` output values. + cval : scalar, optional + Value to fill past the edges of the input if `mode` is 'constant'. + Default is 0.0. + + Returns + ------- + y : ndarray, same shape as `x` + The filtered data. + + See Also + -------- + savgol_coeffs + + Notes + ----- + Details on the `mode` options: + + 'mirror': + Repeats the values at the edges in reverse order. The value + closest to the edge is not included. + 'nearest': + The extension contains the nearest input value. + 'constant': + The extension contains the value given by the `cval` argument. + 'wrap': + The extension contains the values from the other end of the array. + + For example, if the input is [1, 2, 3, 4, 5, 6, 7, 8], and + `window_length` is 7, the following shows the extended data for + the various `mode` options (assuming `cval` is 0):: + + mode | Ext | Input | Ext + -----------+---------+------------------------+--------- + 'mirror' | 4 3 2 | 1 2 3 4 5 6 7 8 | 7 6 5 + 'nearest' | 1 1 1 | 1 2 3 4 5 6 7 8 | 8 8 8 + 'constant' | 0 0 0 | 1 2 3 4 5 6 7 8 | 0 0 0 + 'wrap' | 6 7 8 | 1 2 3 4 5 6 7 8 | 1 2 3 + + .. versionadded:: 0.14.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import savgol_filter + >>> np.set_printoptions(precision=2) # For compact display. + >>> x = np.array([2, 2, 5, 2, 1, 0, 1, 4, 9]) + + Filter with a window length of 5 and a degree 2 polynomial. Use + the defaults for all other parameters. + + >>> savgol_filter(x, 5, 2) + array([1.66, 3.17, 3.54, 2.86, 0.66, 0.17, 1. , 4. , 9. ]) + + Note that the last five values in x are samples of a parabola, so + when mode='interp' (the default) is used with polyorder=2, the last + three values are unchanged. Compare that to, for example, + `mode='nearest'`: + + >>> savgol_filter(x, 5, 2, mode='nearest') + array([1.74, 3.03, 3.54, 2.86, 0.66, 0.17, 1. , 4.6 , 7.97]) + + """ + if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]: + raise ValueError("mode must be 'mirror', 'constant', 'nearest' " + "'wrap' or 'interp'.") + + x = np.asarray(x) + # Ensure that x is either single or double precision floating point. + if x.dtype != np.float64 and x.dtype != np.float32: + x = x.astype(np.float64) + + coeffs = savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta) + + if mode == "interp": + if window_length > x.shape[axis]: + raise ValueError("If mode is 'interp', window_length must be less " + "than or equal to the size of x.") + + # Do not pad. Instead, for the elements within `window_length // 2` + # of the ends of the sequence, use the polynomial that is fitted to + # the last `window_length` elements. + y = convolve1d(x, coeffs, axis=axis, mode="constant") + _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y) + else: + # Any mode other than 'interp' is passed on to ndimage.convolve1d. + y = convolve1d(x, coeffs, axis=axis, mode=mode, cval=cval) + + return y diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_short_time_fft.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_short_time_fft.py new file mode 100644 index 0000000000000000000000000000000000000000..bd52e67839bd2e0f39f82e42a4290e121a4a1443 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_short_time_fft.py @@ -0,0 +1,1710 @@ +"""Implementation of an FFT-based Short-time Fourier Transform. """ + +# Implementation Notes for this file (as of 2023-07) +# -------------------------------------------------- +# * MyPy version 1.1.1 does not seem to support decorated property methods +# properly. Hence, applying ``@property`` to methods decorated with `@cache`` +# (as tried with the ``lower_border_end`` method) causes a mypy error when +# accessing it as an index (e.g., ``SFT.lower_border_end[0]``). +# * Since the method `stft` and `istft` have identical names as the legacy +# functions in the signal module, referencing them as HTML link in the +# docstrings has to be done by an explicit `~ShortTimeFFT.stft` instead of an +# ambiguous `stft` (The ``~`` hides the class / module name). +# * The HTML documentation currently renders each method/property on a separate +# page without reference to the parent class. Thus, a link to `ShortTimeFFT` +# was added to the "See Also" section of each method/property. These links +# can be removed, when SciPy updates ``pydata-sphinx-theme`` to >= 0.13.3 +# (currently 0.9). Consult Issue 18512 and PR 16660 for further details. +# + +# Provides typing union operator ``|`` in Python 3.9: +# Linter does not allow to import ``Generator`` from ``typing`` module: +from collections.abc import Generator, Callable +from functools import cache, lru_cache, partial +from typing import get_args, Literal + +import numpy as np + +import scipy.fft as fft_lib +from scipy.signal import detrend +from scipy.signal.windows import get_window + +__all__ = ['ShortTimeFFT'] + + +#: Allowed values for parameter `padding` of method `ShortTimeFFT.stft()`: +PAD_TYPE = Literal['zeros', 'edge', 'even', 'odd'] + +#: Allowed values for property `ShortTimeFFT.fft_mode`: +FFT_MODE_TYPE = Literal['twosided', 'centered', 'onesided', 'onesided2X'] + + +def _calc_dual_canonical_window(win: np.ndarray, hop: int) -> np.ndarray: + """Calculate canonical dual window for 1d window `win` and a time step + of `hop` samples. + + A ``ValueError`` is raised, if the inversion fails. + + This is a separate function not a method, since it is also used in the + class method ``ShortTimeFFT.from_dual()``. + """ + if hop > len(win): + raise ValueError(f"{hop=} is larger than window length of {len(win)}" + + " => STFT not invertible!") + if issubclass(win.dtype.type, np.integer): + raise ValueError("Parameter 'win' cannot be of integer type, but " + + f"{win.dtype=} => STFT not invertible!") + # The calculation of `relative_resolution` does not work for ints. + # Furthermore, `win / DD` casts the integers away, thus an implicit + # cast is avoided, which can always cause confusion when using 32-Bit + # floats. + + w2 = win.real**2 + win.imag**2 # win*win.conj() does not ensure w2 is real + DD = w2.copy() + for k_ in range(hop, len(win), hop): + DD[k_:] += w2[:-k_] + DD[:-k_] += w2[k_:] + + # check DD > 0: + relative_resolution = np.finfo(win.dtype).resolution * max(DD) + if not np.all(DD >= relative_resolution): + raise ValueError("Short-time Fourier Transform not invertible!") + + return win / DD + + +# noinspection PyShadowingNames +class ShortTimeFFT: + r"""Provide a parametrized discrete Short-time Fourier transform (stft) + and its inverse (istft). + + .. currentmodule:: scipy.signal.ShortTimeFFT + + The `~ShortTimeFFT.stft` calculates sequential FFTs by sliding a + window (`win`) over an input signal by `hop` increments. It can be used to + quantify the change of the spectrum over time. + + The `~ShortTimeFFT.stft` is represented by a complex-valued matrix S[q,p] + where the p-th column represents an FFT with the window centered at the + time t[p] = p * `delta_t` = p * `hop` * `T` where `T` is the sampling + interval of the input signal. The q-th row represents the values at the + frequency f[q] = q * `delta_f` with `delta_f` = 1 / (`mfft` * `T`) being + the bin width of the FFT. + + The inverse STFT `~ShortTimeFFT.istft` is calculated by reversing the steps + of the STFT: Take the IFFT of the p-th slice of S[q,p] and multiply the + result with the so-called dual window (see `dual_win`). Shift the result by + p * `delta_t` and add the result to previous shifted results to reconstruct + the signal. If only the dual window is known and the STFT is invertible, + `from_dual` can be used to instantiate this class. + + Due to the convention of time t = 0 being at the first sample of the input + signal, the STFT values typically have negative time slots. Hence, + negative indexes like `p_min` or `k_min` do not indicate counting + backwards from an array's end like in standard Python indexing but being + left of t = 0. + + More detailed information can be found in the :ref:`tutorial_stft` section + of the :ref:`user_guide`. + + Note that all parameters of the initializer, except `scale_to` (which uses + `scaling`) have identical named attributes. + + Parameters + ---------- + win : np.ndarray + The window must be a real- or complex-valued 1d array. + hop : int + The increment in samples, by which the window is shifted in each step. + fs : float + Sampling frequency of input signal and window. Its relation to the + sampling interval `T` is ``T = 1 / fs``. + fft_mode : 'twosided', 'centered', 'onesided', 'onesided2X' + Mode of FFT to be used (default 'onesided'). + See property `fft_mode` for details. + mfft: int | None + Length of the FFT used, if a zero padded FFT is desired. + If ``None`` (default), the length of the window `win` is used. + dual_win : np.ndarray | None + The dual window of `win`. If set to ``None``, it is calculated if + needed. + scale_to : 'magnitude', 'psd' | None + If not ``None`` (default) the window function is scaled, so each STFT + column represents either a 'magnitude' or a power spectral density + ('psd') spectrum. This parameter sets the property `scaling` to the + same value. See method `scale_to` for details. + phase_shift : int | None + If set, add a linear phase `phase_shift` / `mfft` * `f` to each + frequency `f`. The default value 0 ensures that there is no phase shift + on the zeroth slice (in which t=0 is centered). See property + `phase_shift` for more details. + + Examples + -------- + The following example shows the magnitude of the STFT of a sine with + varying frequency :math:`f_i(t)` (marked by a red dashed line in the plot): + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import ShortTimeFFT + >>> from scipy.signal.windows import gaussian + ... + >>> T_x, N = 1 / 20, 1000 # 20 Hz sampling rate for 50 s signal + >>> t_x = np.arange(N) * T_x # time indexes for signal + >>> f_i = 1 * np.arctan((t_x - t_x[N // 2]) / 2) + 5 # varying frequency + >>> x = np.sin(2*np.pi*np.cumsum(f_i)*T_x) # the signal + + The utilized Gaussian window is 50 samples or 2.5 s long. The parameter + ``mfft=200`` in `ShortTimeFFT` causes the spectrum to be oversampled + by a factor of 4: + + >>> g_std = 8 # standard deviation for Gaussian window in samples + >>> w = gaussian(50, std=g_std, sym=True) # symmetric Gaussian window + >>> SFT = ShortTimeFFT(w, hop=10, fs=1/T_x, mfft=200, scale_to='magnitude') + >>> Sx = SFT.stft(x) # perform the STFT + + In the plot, the time extent of the signal `x` is marked by vertical dashed + lines. Note that the SFT produces values outside the time range of `x`. The + shaded areas on the left and the right indicate border effects caused + by the window slices in that area not fully being inside time range of + `x`: + + >>> fig1, ax1 = plt.subplots(figsize=(6., 4.)) # enlarge plot a bit + >>> t_lo, t_hi = SFT.extent(N)[:2] # time range of plot + >>> ax1.set_title(rf"STFT ({SFT.m_num*SFT.T:g}$\,s$ Gaussian window, " + + ... rf"$\sigma_t={g_std*SFT.T}\,$s)") + >>> ax1.set(xlabel=f"Time $t$ in seconds ({SFT.p_num(N)} slices, " + + ... rf"$\Delta t = {SFT.delta_t:g}\,$s)", + ... ylabel=f"Freq. $f$ in Hz ({SFT.f_pts} bins, " + + ... rf"$\Delta f = {SFT.delta_f:g}\,$Hz)", + ... xlim=(t_lo, t_hi)) + ... + >>> im1 = ax1.imshow(abs(Sx), origin='lower', aspect='auto', + ... extent=SFT.extent(N), cmap='viridis') + >>> ax1.plot(t_x, f_i, 'r--', alpha=.5, label='$f_i(t)$') + >>> fig1.colorbar(im1, label="Magnitude $|S_x(t, f)|$") + ... + >>> # Shade areas where window slices stick out to the side: + >>> for t0_, t1_ in [(t_lo, SFT.lower_border_end[0] * SFT.T), + ... (SFT.upper_border_begin(N)[0] * SFT.T, t_hi)]: + ... ax1.axvspan(t0_, t1_, color='w', linewidth=0, alpha=.2) + >>> for t_ in [0, N * SFT.T]: # mark signal borders with vertical line: + ... ax1.axvline(t_, color='y', linestyle='--', alpha=0.5) + >>> ax1.legend() + >>> fig1.tight_layout() + >>> plt.show() + + Reconstructing the signal with the `~ShortTimeFFT.istft` is + straightforward, but note that the length of `x1` should be specified, + since the SFT length increases in `hop` steps: + + >>> SFT.invertible # check if invertible + True + >>> x1 = SFT.istft(Sx, k1=N) + >>> np.allclose(x, x1) + True + + It is possible to calculate the SFT of signal parts: + + >>> p_q = SFT.nearest_k_p(N // 2) + >>> Sx0 = SFT.stft(x[:p_q]) + >>> Sx1 = SFT.stft(x[p_q:]) + + When assembling sequential STFT parts together, the overlap needs to be + considered: + + >>> p0_ub = SFT.upper_border_begin(p_q)[1] - SFT.p_min + >>> p1_le = SFT.lower_border_end[1] - SFT.p_min + >>> Sx01 = np.hstack((Sx0[:, :p0_ub], + ... Sx0[:, p0_ub:] + Sx1[:, :p1_le], + ... Sx1[:, p1_le:])) + >>> np.allclose(Sx01, Sx) # Compare with SFT of complete signal + True + + It is also possible to calculate the `itsft` for signal parts: + + >>> y_p = SFT.istft(Sx, N//3, N//2) + >>> np.allclose(y_p, x[N//3:N//2]) + True + + """ + # immutable attributes (only have getters but no setters): + _win: np.ndarray # window + _dual_win: np.ndarray | None = None # canonical dual window + _hop: int # Step of STFT in number of samples + + # mutable attributes: + _fs: float # sampling frequency of input signal and window + _fft_mode: FFT_MODE_TYPE = 'onesided' # Mode of FFT to use + _mfft: int # length of FFT used - defaults to len(win) + _scaling: Literal['magnitude', 'psd'] | None = None # Scaling of _win + _phase_shift: int | None # amount to shift phase of FFT in samples + + # attributes for caching calculated values: + _fac_mag: float | None = None + _fac_psd: float | None = None + _lower_border_end: tuple[int, int] | None = None + + def __init__(self, win: np.ndarray, hop: int, fs: float, *, + fft_mode: FFT_MODE_TYPE = 'onesided', + mfft: int | None = None, + dual_win: np.ndarray | None = None, + scale_to: Literal['magnitude', 'psd'] | None = None, + phase_shift: int | None = 0): + if not (win.ndim == 1 and win.size > 0): + raise ValueError(f"Parameter win must be 1d, but {win.shape=}!") + if not all(np.isfinite(win)): + raise ValueError("Parameter win must have finite entries!") + if not (hop >= 1 and isinstance(hop, int)): + raise ValueError(f"Parameter {hop=} is not an integer >= 1!") + self._win, self._hop, self.fs = win, hop, fs + + self.mfft = len(win) if mfft is None else mfft + + if dual_win is not None: + if dual_win.shape != win.shape: + raise ValueError(f"{dual_win.shape=} must equal {win.shape=}!") + if not all(np.isfinite(dual_win)): + raise ValueError("Parameter dual_win must be a finite array!") + self._dual_win = dual_win # needs to be set before scaling + + if scale_to is not None: # needs to be set before fft_mode + self.scale_to(scale_to) + + self.fft_mode, self.phase_shift = fft_mode, phase_shift + + @classmethod + def from_dual(cls, dual_win: np.ndarray, hop: int, fs: float, *, + fft_mode: FFT_MODE_TYPE = 'onesided', + mfft: int | None = None, + scale_to: Literal['magnitude', 'psd'] | None = None, + phase_shift: int | None = 0): + r"""Instantiate a `ShortTimeFFT` by only providing a dual window. + + If an STFT is invertible, it is possible to calculate the window `win` + from a given dual window `dual_win`. All other parameters have the + same meaning as in the initializer of `ShortTimeFFT`. + + As explained in the :ref:`tutorial_stft` section of the + :ref:`user_guide`, an invertible STFT can be interpreted as series + expansion of time-shifted and frequency modulated dual windows. E.g., + the series coefficient S[q,p] belongs to the term, which shifted + `dual_win` by p * `delta_t` and multiplied it by + exp( 2 * j * pi * t * q * `delta_f`). + + + Examples + -------- + The following example discusses decomposing a signal into time- and + frequency-shifted Gaussians. A Gaussian with standard deviation of + one made up of 51 samples will be used: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import ShortTimeFFT + >>> from scipy.signal.windows import gaussian + ... + >>> T, N = 0.1, 51 + >>> d_win = gaussian(N, std=1/T, sym=True) # symmetric Gaussian window + >>> t = T * (np.arange(N) - N//2) + ... + >>> fg1, ax1 = plt.subplots() + >>> ax1.set_title(r"Dual Window: Gaussian with $\sigma_t=1$") + >>> ax1.set(xlabel=f"Time $t$ in seconds ({N} samples, $T={T}$ s)", + ... xlim=(t[0], t[-1]), ylim=(0, 1.1*max(d_win))) + >>> ax1.plot(t, d_win, 'C0-') + + The following plot with the overlap of 41, 11 and 2 samples show how + the `hop` interval affects the shape of the window `win`: + + >>> fig2, axx = plt.subplots(3, 1, sharex='all') + ... + >>> axx[0].set_title(r"Windows for hop$\in\{10, 40, 49\}$") + >>> for c_, h_ in enumerate([10, 40, 49]): + ... SFT = ShortTimeFFT.from_dual(d_win, h_, 1/T) + ... axx[c_].plot(t + h_ * T, SFT.win, 'k--', alpha=.3, label=None) + ... axx[c_].plot(t - h_ * T, SFT.win, 'k:', alpha=.3, label=None) + ... axx[c_].plot(t, SFT.win, f'C{c_+1}', + ... label=r"$\Delta t=%0.1f\,$s" % SFT.delta_t) + ... axx[c_].set_ylim(0, 1.1*max(SFT.win)) + ... axx[c_].legend(loc='center') + >>> axx[-1].set(xlabel=f"Time $t$ in seconds ({N} samples, $T={T}$ s)", + ... xlim=(t[0], t[-1])) + >>> plt.show() + + Beside the window `win` centered at t = 0 the previous (t = -`delta_t`) + and following window (t = `delta_t`) are depicted. It can be seen that + for small `hop` intervals, the window is compact and smooth, having a + good time-frequency concentration in the STFT. For the large `hop` + interval of 4.9 s, the window has small values around t = 0, which are + not covered by the overlap of the adjacent windows, which could lead to + numeric inaccuracies. Furthermore, the peaky shape at the beginning and + the end of the window points to a higher bandwidth, resulting in a + poorer time-frequency resolution of the STFT. + Hence, the choice of the `hop` interval will be a compromise between + a time-frequency resolution and memory requirements demanded by small + `hop` sizes. + + See Also + -------- + from_window: Create instance by wrapping `get_window`. + ShortTimeFFT: Create instance using standard initializer. + """ + win = _calc_dual_canonical_window(dual_win, hop) + return cls(win=win, hop=hop, fs=fs, fft_mode=fft_mode, mfft=mfft, + dual_win=dual_win, scale_to=scale_to, + phase_shift=phase_shift) + + @classmethod + def from_window(cls, win_param: str | tuple | float, + fs: float, nperseg: int, noverlap: int, *, + symmetric_win: bool = False, + fft_mode: FFT_MODE_TYPE = 'onesided', + mfft: int | None = None, + scale_to: Literal['magnitude', 'psd'] | None = None, + phase_shift: int | None = 0): + """Instantiate `ShortTimeFFT` by using `get_window`. + + The method `get_window` is used to create a window of length + `nperseg`. The parameter names `noverlap`, and `nperseg` are used here, + since they more inline with other classical STFT libraries. + + Parameters + ---------- + win_param: Union[str, tuple, float], + Parameters passed to `get_window`. For windows with no parameters, + it may be a string (e.g., ``'hann'``), for parametrized windows a + tuple, (e.g., ``('gaussian', 2.)``) or a single float specifying + the shape parameter of a kaiser window (i.e. ``4.`` and + ``('kaiser', 4.)`` are equal. See `get_window` for more details. + fs : float + Sampling frequency of input signal. Its relation to the + sampling interval `T` is ``T = 1 / fs``. + nperseg: int + Window length in samples, which corresponds to the `m_num`. + noverlap: int + Window overlap in samples. It relates to the `hop` increment by + ``hop = npsereg - noverlap``. + symmetric_win: bool + If ``True`` then a symmetric window is generated, else a periodic + window is generated (default). Though symmetric windows seem for + most applications to be more sensible, the default of a periodic + windows was chosen to correspond to the default of `get_window`. + fft_mode : 'twosided', 'centered', 'onesided', 'onesided2X' + Mode of FFT to be used (default 'onesided'). + See property `fft_mode` for details. + mfft: int | None + Length of the FFT used, if a zero padded FFT is desired. + If ``None`` (default), the length of the window `win` is used. + scale_to : 'magnitude', 'psd' | None + If not ``None`` (default) the window function is scaled, so each + STFT column represents either a 'magnitude' or a power spectral + density ('psd') spectrum. This parameter sets the property + `scaling` to the same value. See method `scale_to` for details. + phase_shift : int | None + If set, add a linear phase `phase_shift` / `mfft` * `f` to each + frequency `f`. The default value 0 ensures that there is no phase + shift on the zeroth slice (in which t=0 is centered). See property + `phase_shift` for more details. + + Examples + -------- + The following instances ``SFT0`` and ``SFT1`` are equivalent: + + >>> from scipy.signal import ShortTimeFFT, get_window + >>> nperseg = 9 # window length + >>> w = get_window(('gaussian', 2.), nperseg) + >>> fs = 128 # sampling frequency + >>> hop = 3 # increment of STFT time slice + >>> SFT0 = ShortTimeFFT(w, hop, fs=fs) + >>> SFT1 = ShortTimeFFT.from_window(('gaussian', 2.), fs, nperseg, + ... noverlap=nperseg-hop) + + See Also + -------- + scipy.signal.get_window: Return a window of a given length and type. + from_dual: Create instance using dual window. + ShortTimeFFT: Create instance using standard initializer. + """ + win = get_window(win_param, nperseg, fftbins=not symmetric_win) + return cls(win, hop=nperseg-noverlap, fs=fs, fft_mode=fft_mode, + mfft=mfft, scale_to=scale_to, phase_shift=phase_shift) + + @property + def win(self) -> np.ndarray: + """Window function as real- or complex-valued 1d array. + + This attribute is read only, since `dual_win` depends on it. + + See Also + -------- + dual_win: Canonical dual window. + m_num: Number of samples in window `win`. + m_num_mid: Center index of window `win`. + mfft: Length of input for the FFT used - may be larger than `m_num`. + hop: ime increment in signal samples for sliding window. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + return self._win + + @property + def hop(self) -> int: + """Time increment in signal samples for sliding window. + + This attribute is read only, since `dual_win` depends on it. + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + m_num: Number of samples in window `win`. + m_num_mid: Center index of window `win`. + mfft: Length of input for the FFT used - may be larger than `m_num`. + T: Sampling interval of input signal and of the window. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + return self._hop + + @property + def T(self) -> float: + """Sampling interval of input signal and of the window. + + A ``ValueError`` is raised if it is set to a non-positive value. + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + hop: Time increment in signal samples for sliding window. + fs: Sampling frequency (being ``1/T``) + t: Times of STFT for an input signal with `n` samples. + ShortTimeFFT: Class this property belongs to. + """ + return 1 / self._fs + + @T.setter + def T(self, v: float): + """Sampling interval of input signal and of the window. + + A ``ValueError`` is raised if it is set to a non-positive value. + """ + if not (v > 0): + raise ValueError(f"Sampling interval T={v} must be positive!") + self._fs = 1 / v + + @property + def fs(self) -> float: + """Sampling frequency of input signal and of the window. + + The sampling frequency is the inverse of the sampling interval `T`. + A ``ValueError`` is raised if it is set to a non-positive value. + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + hop: Time increment in signal samples for sliding window. + T: Sampling interval of input signal and of the window (``1/fs``). + ShortTimeFFT: Class this property belongs to. + """ + return self._fs + + @fs.setter + def fs(self, v: float): + """Sampling frequency of input signal and of the window. + + The sampling frequency is the inverse of the sampling interval `T`. + A ``ValueError`` is raised if it is set to a non-positive value. + """ + if not (v > 0): + raise ValueError(f"Sampling frequency fs={v} must be positive!") + self._fs = v + + @property + def fft_mode(self) -> FFT_MODE_TYPE: + """Mode of utilized FFT ('twosided', 'centered', 'onesided' or + 'onesided2X'). + + It can have the following values: + + 'twosided': + Two-sided FFT, where values for the negative frequencies are in + upper half of the array. Corresponds to :func:`~scipy.fft.fft()`. + 'centered': + Two-sided FFT with the values being ordered along monotonically + increasing frequencies. Corresponds to applying + :func:`~scipy.fft.fftshift()` to :func:`~scipy.fft.fft()`. + 'onesided': + Calculates only values for non-negative frequency values. + Corresponds to :func:`~scipy.fft.rfft()`. + 'onesided2X': + Like `onesided`, but the non-zero frequencies are doubled if + `scaling` is set to 'magnitude' or multiplied by ``sqrt(2)`` if + set to 'psd'. If `scaling` is ``None``, setting `fft_mode` to + `onesided2X` is not allowed. + If the FFT length `mfft` is even, the last FFT value is not paired, + and thus it is not scaled. + + Note that `onesided` and `onesided2X` do not work for complex-valued signals or + complex-valued windows. Furthermore, the frequency values can be obtained by + reading the `f` property, and the number of samples by accessing the `f_pts` + property. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + f: Frequencies values of the STFT. + f_pts: Width of the frequency bins of the STFT. + onesided_fft: True if a one-sided FFT is used. + scaling: Normalization applied to the window function + ShortTimeFFT: Class this property belongs to. + """ + return self._fft_mode + + @fft_mode.setter + def fft_mode(self, t: FFT_MODE_TYPE): + """Set mode of FFT. + + Allowed values are 'twosided', 'centered', 'onesided', 'onesided2X'. + See the property `fft_mode` for more details. + """ + if t not in (fft_mode_types := get_args(FFT_MODE_TYPE)): + raise ValueError(f"fft_mode='{t}' not in {fft_mode_types}!") + + if t in {'onesided', 'onesided2X'} and np.iscomplexobj(self.win): + raise ValueError(f"One-sided spectra, i.e., fft_mode='{t}', " + + "are not allowed for complex-valued windows!") + + if t == 'onesided2X' and self.scaling is None: + raise ValueError(f"For scaling is None, fft_mode='{t}' is invalid!" + "Do scale_to('psd') or scale_to('magnitude')!") + self._fft_mode = t + + @property + def mfft(self) -> int: + """Length of input for the FFT used - may be larger than window + length `m_num`. + + If not set, `mfft` defaults to the window length `m_num`. + + See Also + -------- + f_pts: Number of points along the frequency axis. + f: Frequencies values of the STFT. + m_num: Number of samples in window `win`. + ShortTimeFFT: Class this property belongs to. + """ + return self._mfft + + @mfft.setter + def mfft(self, n_: int): + """Setter for the length of FFT utilized. + + See the property `mfft` for further details. + """ + if not (n_ >= self.m_num): + raise ValueError(f"Attribute mfft={n_} needs to be at least the " + + f"window length m_num={self.m_num}!") + self._mfft = n_ + + @property + def scaling(self) -> Literal['magnitude', 'psd'] | None: + """Normalization applied to the window function + ('magnitude', 'psd' or ``None``). + + If not ``None``, the FFTs can be either interpreted as a magnitude or + a power spectral density spectrum. + + The window function can be scaled by calling the `scale_to` method, + or it is set by the initializer parameter ``scale_to``. + + See Also + -------- + fac_magnitude: Scaling factor for to a magnitude spectrum. + fac_psd: Scaling factor for to a power spectral density spectrum. + fft_mode: Mode of utilized FFT + scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. + ShortTimeFFT: Class this property belongs to. + """ + return self._scaling + + def scale_to(self, scaling: Literal['magnitude', 'psd']): + """Scale window to obtain 'magnitude' or 'psd' scaling for the STFT. + + The window of a 'magnitude' spectrum has an integral of one, i.e., unit + area for non-negative windows. This ensures that absolute the values of + spectrum does not change if the length of the window changes (given + the input signal is stationary). + + To represent the power spectral density ('psd') for varying length + windows the area of the absolute square of the window needs to be + unity. + + The `scaling` property shows the current scaling. The properties + `fac_magnitude` and `fac_psd` show the scaling factors required to + scale the STFT values to a magnitude or a psd spectrum. + + This method is called, if the initializer parameter `scale_to` is set. + + See Also + -------- + fac_magnitude: Scaling factor for to a magnitude spectrum. + fac_psd: Scaling factor for to a power spectral density spectrum. + fft_mode: Mode of utilized FFT + scaling: Normalization applied to the window function. + ShortTimeFFT: Class this method belongs to. + """ + if scaling not in (scaling_values := {'magnitude', 'psd'}): + raise ValueError(f"{scaling=} not in {scaling_values}!") + if self._scaling == scaling: # do nothing + return + + s_fac = self.fac_psd if scaling == 'psd' else self.fac_magnitude + self._win = self._win * s_fac + if self._dual_win is not None: + self._dual_win = self._dual_win / s_fac + self._fac_mag, self._fac_psd = None, None # reset scaling factors + self._scaling = scaling + + @property + def phase_shift(self) -> int | None: + """If set, add linear phase `phase_shift` / `mfft` * `f` to each FFT + slice of frequency `f`. + + Shifting (more precisely `rolling`) an `mfft`-point FFT input by + `phase_shift` samples results in a multiplication of the output by + ``np.exp(2j*np.pi*q*phase_shift/mfft)`` at the frequency q * `delta_f`. + + The default value 0 ensures that there is no phase shift on the + zeroth slice (in which t=0 is centered). + No phase shift (``phase_shift is None``) is equivalent to + ``phase_shift = -mfft//2``. In this case slices are not shifted + before calculating the FFT. + + The absolute value of `phase_shift` is limited to be less than `mfft`. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + f: Frequencies values of the STFT. + mfft: Length of input for the FFT used + ShortTimeFFT: Class this property belongs to. + """ + return self._phase_shift + + @phase_shift.setter + def phase_shift(self, v: int | None): + """The absolute value of the phase shift needs to be less than mfft + samples. + + See the `phase_shift` getter method for more details. + """ + if v is None: + self._phase_shift = v + return + if not isinstance(v, int): + raise ValueError(f"phase_shift={v} has the unit samples. Hence " + + "it needs to be an int or it may be None!") + if not (-self.mfft < v < self.mfft): + raise ValueError("-mfft < phase_shift < mfft does not hold " + + f"for mfft={self.mfft}, phase_shift={v}!") + self._phase_shift = v + + def _x_slices(self, x: np.ndarray, k_off: int, p0: int, p1: int, + padding: PAD_TYPE) -> Generator[np.ndarray, None, None]: + """Generate signal slices along last axis of `x`. + + This method is only used by `stft_detrend`. The parameters are + described in `~ShortTimeFFT.stft`. + """ + if padding not in (padding_types := get_args(PAD_TYPE)): + raise ValueError(f"Parameter {padding=} not in {padding_types}!") + pad_kws: dict[str, dict] = { # possible keywords to pass to np.pad: + 'zeros': dict(mode='constant', constant_values=(0, 0)), + 'edge': dict(mode='edge'), + 'even': dict(mode='reflect', reflect_type='even'), + 'odd': dict(mode='reflect', reflect_type='odd'), + } # typing of pad_kws is needed to make mypy happy + + n, n1 = x.shape[-1], (p1 - p0) * self.hop + k0 = p0 * self.hop - self.m_num_mid + k_off # start sample + k1 = k0 + n1 + self.m_num # end sample + + i0, i1 = max(k0, 0), min(k1, n) # indexes to shorten x + # dimensions for padding x: + pad_width = [(0, 0)] * (x.ndim-1) + [(-min(k0, 0), max(k1 - n, 0))] + + x1 = np.pad(x[..., i0:i1], pad_width, **pad_kws[padding]) + for k_ in range(0, n1, self.hop): + yield x1[..., k_:k_ + self.m_num] + + def stft(self, x: np.ndarray, p0: int | None = None, + p1: int | None = None, *, k_offset: int = 0, + padding: PAD_TYPE = 'zeros', axis: int = -1) \ + -> np.ndarray: + """Perform the short-time Fourier transform. + + A two-dimensional matrix with ``p1-p0`` columns is calculated. + The `f_pts` rows represent value at the frequencies `f`. The q-th + column of the windowed FFT with the window `win` is centered at t[q]. + The columns represent the values at the frequencies `f`. + + Parameters + ---------- + x + The input signal as real or complex valued array. For complex values, the + property `fft_mode` must be set to 'twosided' or 'centered'. + p0 + The first element of the range of slices to calculate. If ``None`` + then it is set to :attr:`p_min`, which is the smallest possible + slice. + p1 + The end of the array. If ``None`` then `p_max(n)` is used. + k_offset + Index of first sample (t = 0) in `x`. + padding + Kind of values which are added, when the sliding window sticks out + on either the lower or upper end of the input `x`. Zeros are added + if the default 'zeros' is set. For 'edge' either the first or the + last value of `x` is used. 'even' pads by reflecting the + signal on the first or last sample and 'odd' additionally + multiplies it with -1. + axis + The axis of `x` over which to compute the STFT. + If not given, the last axis is used. + + Returns + ------- + S + A complex array is returned with the dimension always being larger + by one than of `x`. The last axis always represent the time slices + of the STFT. `axis` defines the frequency axis (default second to + last). E.g., for a one-dimensional `x`, a complex 2d array is + returned, with axis 0 representing frequency and axis 1 the time + slices. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + delta_t: Time increment of STFT + f: Frequencies values of the STFT. + invertible: Check if STFT is invertible. + :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. + p_range: Determine and validate slice index range. + stft_detrend: STFT with detrended segments. + t: Times of STFT for an input signal with `n` samples. + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + """ + return self.stft_detrend(x, None, p0, p1, k_offset=k_offset, + padding=padding, axis=axis) + + def stft_detrend(self, x: np.ndarray, + detr: Callable[[np.ndarray], np.ndarray] | Literal['linear', 'constant'] | None, # noqa: E501 + p0: int | None = None, p1: int | None = None, *, + k_offset: int = 0, padding: PAD_TYPE = 'zeros', + axis: int = -1) \ + -> np.ndarray: + """Short-time Fourier transform with a trend being subtracted from each + segment beforehand. + + If `detr` is set to 'constant', the mean is subtracted, if set to + "linear", the linear trend is removed. This is achieved by calling + :func:`scipy.signal.detrend`. If `detr` is a function, `detr` is + applied to each segment. + All other parameters have the same meaning as in `~ShortTimeFFT.stft`. + + Note that due to the detrending, the original signal cannot be + reconstructed by the `~ShortTimeFFT.istft`. + + See Also + -------- + invertible: Check if STFT is invertible. + :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. + :meth:`~ShortTimeFFT.stft`: Short-time Fourier transform + (without detrending). + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + """ + if self.onesided_fft and np.iscomplexobj(x): + raise ValueError(f"Complex-valued `x` not allowed for {self.fft_mode=}'! " + "Set property `fft_mode` to 'twosided' or 'centered'.") + if isinstance(detr, str): + detr = partial(detrend, type=detr) + elif not (detr is None or callable(detr)): + raise ValueError(f"Parameter {detr=} is not a str, function or " + + "None!") + n = x.shape[axis] + if not (n >= (m2p := self.m_num-self.m_num_mid)): + e_str = f'{len(x)=}' if x.ndim == 1 else f'of {axis=} of {x.shape}' + raise ValueError(f"{e_str} must be >= ceil(m_num/2) = {m2p}!") + + if x.ndim > 1: # motivated by the NumPy broadcasting mechanisms: + x = np.moveaxis(x, axis, -1) + # determine slice index range: + p0, p1 = self.p_range(n, p0, p1) + S_shape_1d = (self.f_pts, p1 - p0) + S_shape = x.shape[:-1] + S_shape_1d if x.ndim > 1 else S_shape_1d + S = np.zeros(S_shape, dtype=complex) + for p_, x_ in enumerate(self._x_slices(x, k_offset, p0, p1, padding)): + if detr is not None: + x_ = detr(x_) + S[..., :, p_] = self._fft_func(x_ * self.win.conj()) + if x.ndim > 1: + return np.moveaxis(S, -2, axis if axis >= 0 else axis-1) + return S + + def spectrogram(self, x: np.ndarray, y: np.ndarray | None = None, + detr: Callable[[np.ndarray], np.ndarray] | Literal['linear', 'constant'] | None = None, # noqa: E501 + *, + p0: int | None = None, p1: int | None = None, + k_offset: int = 0, padding: PAD_TYPE = 'zeros', + axis: int = -1) \ + -> np.ndarray: + r"""Calculate spectrogram or cross-spectrogram. + + The spectrogram is the absolute square of the STFT, i.e., it is + ``abs(S[q,p])**2`` for given ``S[q,p]`` and thus is always + non-negative. + For two STFTs ``Sx[q,p], Sy[q,p]``, the cross-spectrogram is defined + as ``Sx[q,p] * np.conj(Sy[q,p])`` and is complex-valued. + This is a convenience function for calling `~ShortTimeFFT.stft` / + `stft_detrend`, hence all parameters are discussed there. If `y` is not + ``None`` it needs to have the same shape as `x`. + + Examples + -------- + The following example shows the spectrogram of a square wave with + varying frequency :math:`f_i(t)` (marked by a green dashed line in the + plot) sampled with 20 Hz: + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy.signal import square, ShortTimeFFT + >>> from scipy.signal.windows import gaussian + ... + >>> T_x, N = 1 / 20, 1000 # 20 Hz sampling rate for 50 s signal + >>> t_x = np.arange(N) * T_x # time indexes for signal + >>> f_i = 5e-3*(t_x - t_x[N // 3])**2 + 1 # varying frequency + >>> x = square(2*np.pi*np.cumsum(f_i)*T_x) # the signal + + The utilized Gaussian window is 50 samples or 2.5 s long. The + parameter ``mfft=800`` (oversampling factor 16) and the `hop` interval + of 2 in `ShortTimeFFT` was chosen to produce a sufficient number of + points: + + >>> g_std = 12 # standard deviation for Gaussian window in samples + >>> win = gaussian(50, std=g_std, sym=True) # symmetric Gaussian wind. + >>> SFT = ShortTimeFFT(win, hop=2, fs=1/T_x, mfft=800, scale_to='psd') + >>> Sx2 = SFT.spectrogram(x) # calculate absolute square of STFT + + The plot's colormap is logarithmically scaled as the power spectral + density is in dB. The time extent of the signal `x` is marked by + vertical dashed lines and the shaded areas mark the presence of border + effects: + + >>> fig1, ax1 = plt.subplots(figsize=(6., 4.)) # enlarge plot a bit + >>> t_lo, t_hi = SFT.extent(N)[:2] # time range of plot + >>> ax1.set_title(rf"Spectrogram ({SFT.m_num*SFT.T:g}$\,s$ Gaussian " + + ... rf"window, $\sigma_t={g_std*SFT.T:g}\,$s)") + >>> ax1.set(xlabel=f"Time $t$ in seconds ({SFT.p_num(N)} slices, " + + ... rf"$\Delta t = {SFT.delta_t:g}\,$s)", + ... ylabel=f"Freq. $f$ in Hz ({SFT.f_pts} bins, " + + ... rf"$\Delta f = {SFT.delta_f:g}\,$Hz)", + ... xlim=(t_lo, t_hi)) + >>> Sx_dB = 10 * np.log10(np.fmax(Sx2, 1e-4)) # limit range to -40 dB + >>> im1 = ax1.imshow(Sx_dB, origin='lower', aspect='auto', + ... extent=SFT.extent(N), cmap='magma') + >>> ax1.plot(t_x, f_i, 'g--', alpha=.5, label='$f_i(t)$') + >>> fig1.colorbar(im1, label='Power Spectral Density ' + + ... r"$20\,\log_{10}|S_x(t, f)|$ in dB") + ... + >>> # Shade areas where window slices stick out to the side: + >>> for t0_, t1_ in [(t_lo, SFT.lower_border_end[0] * SFT.T), + ... (SFT.upper_border_begin(N)[0] * SFT.T, t_hi)]: + ... ax1.axvspan(t0_, t1_, color='w', linewidth=0, alpha=.3) + >>> for t_ in [0, N * SFT.T]: # mark signal borders with vertical line + ... ax1.axvline(t_, color='c', linestyle='--', alpha=0.5) + >>> ax1.legend() + >>> fig1.tight_layout() + >>> plt.show() + + The logarithmic scaling reveals the odd harmonics of the square wave, + which are reflected at the Nyquist frequency of 10 Hz. This aliasing + is also the main source of the noise artifacts in the plot. + + + See Also + -------- + :meth:`~ShortTimeFFT.stft`: Perform the short-time Fourier transform. + stft_detrend: STFT with a trend subtracted from each segment. + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + """ + Sx = self.stft_detrend(x, detr, p0, p1, k_offset=k_offset, + padding=padding, axis=axis) + if y is None or y is x: # do spectrogram: + return Sx.real**2 + Sx.imag**2 + # Cross-spectrogram: + Sy = self.stft_detrend(y, detr, p0, p1, k_offset=k_offset, + padding=padding, axis=axis) + return Sx * Sy.conj() + + @property + def dual_win(self) -> np.ndarray: + """Canonical dual window. + + A STFT can be interpreted as the input signal being expressed as a + weighted sum of modulated and time-shifted dual windows. Note that for + a given window there exist many dual windows. The canonical window is + the one with the minimal energy (i.e., :math:`L_2` norm). + + `dual_win` has same length as `win`, namely `m_num` samples. + + If the dual window cannot be calculated a ``ValueError`` is raised. + This attribute is read only and calculated lazily. + + See Also + -------- + dual_win: Canonical dual window. + m_num: Number of samples in window `win`. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + if self._dual_win is None: + self._dual_win = _calc_dual_canonical_window(self.win, self.hop) + return self._dual_win + + @property + def invertible(self) -> bool: + """Check if STFT is invertible. + + This is achieved by trying to calculate the canonical dual window. + + See Also + -------- + :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. + m_num: Number of samples in window `win` and `dual_win`. + dual_win: Canonical dual window. + win: Window for STFT. + ShortTimeFFT: Class this property belongs to. + """ + try: + return len(self.dual_win) > 0 # call self.dual_win() + except ValueError: + return False + + def istft(self, S: np.ndarray, k0: int = 0, k1: int | None = None, *, + f_axis: int = -2, t_axis: int = -1) \ + -> np.ndarray: + """Inverse short-time Fourier transform. + + It returns an array of dimension ``S.ndim - 1`` which is real + if `onesided_fft` is set, else complex. If the STFT is not + `invertible`, or the parameters are out of bounds a ``ValueError`` is + raised. + + Parameters + ---------- + S + A complex valued array where `f_axis` denotes the frequency + values and the `t-axis` dimension the temporal values of the + STFT values. + k0, k1 + The start and the end index of the reconstructed signal. The + default (``k0 = 0``, ``k1 = None``) assumes that the maximum length + signal should be reconstructed. + f_axis, t_axis + The axes in `S` denoting the frequency and the time dimension. + + Notes + ----- + It is required that `S` has `f_pts` entries along the `f_axis`. For + the `t_axis` it is assumed that the first entry corresponds to + `p_min` * `delta_t` (being <= 0). The length of `t_axis` needs to be + compatible with `k1`. I.e., ``S.shape[t_axis] >= self.p_max(k1)`` must + hold, if `k1` is not ``None``. Else `k1` is set to `k_max` with:: + + q_max = S.shape[t_range] + self.p_min + k_max = (q_max - 1) * self.hop + self.m_num - self.m_num_mid + + The :ref:`tutorial_stft` section of the :ref:`user_guide` discussed the + slicing behavior by means of an example. + + See Also + -------- + invertible: Check if STFT is invertible. + :meth:`~ShortTimeFFT.stft`: Perform Short-time Fourier transform. + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + """ + if f_axis == t_axis: + raise ValueError(f"{f_axis=} may not be equal to {t_axis=}!") + if S.shape[f_axis] != self.f_pts: + raise ValueError(f"{S.shape[f_axis]=} must be equal to " + + f"{self.f_pts=} ({S.shape=})!") + n_min = self.m_num-self.m_num_mid # minimum signal length + if not (S.shape[t_axis] >= (q_num := self.p_num(n_min))): + raise ValueError(f"{S.shape[t_axis]=} needs to have at least " + + f"{q_num} slices ({S.shape=})!") + if t_axis != S.ndim - 1 or f_axis != S.ndim - 2: + t_axis = S.ndim + t_axis if t_axis < 0 else t_axis + f_axis = S.ndim + f_axis if f_axis < 0 else f_axis + S = np.moveaxis(S, (f_axis, t_axis), (-2, -1)) + + q_max = S.shape[-1] + self.p_min + k_max = (q_max - 1) * self.hop + self.m_num - self.m_num_mid + + k1 = k_max if k1 is None else k1 + if not (self.k_min <= k0 < k1 <= k_max): + raise ValueError(f"({self.k_min=}) <= ({k0=}) < ({k1=}) <= " + + f"({k_max=}) is false!") + if not (num_pts := k1 - k0) >= n_min: + raise ValueError(f"({k1=}) - ({k0=}) = {num_pts} has to be at " + + f"least the half the window length {n_min}!") + + q0 = (k0 // self.hop + self.p_min if k0 >= 0 else # p_min always <= 0 + k0 // self.hop) + q1 = min(self.p_max(k1), q_max) + k_q0, k_q1 = self.nearest_k_p(k0), self.nearest_k_p(k1, left=False) + n_pts = k_q1 - k_q0 + self.m_num - self.m_num_mid + x = np.zeros(S.shape[:-2] + (n_pts,), + dtype=float if self.onesided_fft else complex) + for q_ in range(q0, q1): + xs = self._ifft_func(S[..., :, q_ - self.p_min]) * self.dual_win + i0 = q_ * self.hop - self.m_num_mid + i1 = min(i0 + self.m_num, n_pts+k0) + j0, j1 = 0, i1 - i0 + if i0 < k0: # xs sticks out to the left on x: + j0 += k0 - i0 + i0 = k0 + x[..., i0-k0:i1-k0] += xs[..., j0:j1] + x = x[..., :k1-k0] + if x.ndim > 1: + x = np.moveaxis(x, -1, f_axis if f_axis < x.ndim else t_axis) + return x + + @property + def fac_magnitude(self) -> float: + """Factor to multiply the STFT values by to scale each frequency slice + to a magnitude spectrum. + + It is 1 if attribute ``scaling == 'magnitude'``. + The window can be scaled to a magnitude spectrum by using the method + `scale_to`. + + See Also + -------- + fac_psd: Scaling factor for to a power spectral density spectrum. + scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. + scaling: Normalization applied to the window function. + ShortTimeFFT: Class this property belongs to. + """ + if self.scaling == 'magnitude': + return 1 + if self._fac_mag is None: + self._fac_mag = 1 / abs(sum(self.win)) + return self._fac_mag + + @property + def fac_psd(self) -> float: + """Factor to multiply the STFT values by to scale each frequency slice + to a power spectral density (PSD). + + It is 1 if attribute ``scaling == 'psd'``. + The window can be scaled to a psd spectrum by using the method + `scale_to`. + + See Also + -------- + fac_magnitude: Scaling factor for to a magnitude spectrum. + scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. + scaling: Normalization applied to the window function. + ShortTimeFFT: Class this property belongs to. + """ + if self.scaling == 'psd': + return 1 + if self._fac_psd is None: + self._fac_psd = 1 / np.sqrt( + sum(self.win.real**2+self.win.imag**2) / self.T) + return self._fac_psd + + @property + def m_num(self) -> int: + """Number of samples in window `win`. + + Note that the FFT can be oversampled by zero-padding. This is achieved + by setting the `mfft` property. + + See Also + -------- + m_num_mid: Center index of window `win`. + mfft: Length of input for the FFT used - may be larger than `m_num`. + hop: Time increment in signal samples for sliding window. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + return len(self.win) + + @property + def m_num_mid(self) -> int: + """Center index of window `win`. + + For odd `m_num`, ``(m_num - 1) / 2`` is returned and + for even `m_num` (per definition) ``m_num / 2`` is returned. + + See Also + -------- + m_num: Number of samples in window `win`. + mfft: Length of input for the FFT used - may be larger than `m_num`. + hop: ime increment in signal samples for sliding window. + win: Window function as real- or complex-valued 1d array. + ShortTimeFFT: Class this property belongs to. + """ + return self.m_num // 2 + + @cache + def _pre_padding(self) -> tuple[int, int]: + """Smallest signal index and slice index due to padding. + + Since, per convention, for time t=0, n,q is zero, the returned values + are negative or zero. + """ + w2 = self.win.real**2 + self.win.imag**2 + # move window to the left until the overlap with t >= 0 vanishes: + n0 = -self.m_num_mid + for q_, n_ in enumerate(range(n0, n0-self.m_num-1, -self.hop)): + n_next = n_ - self.hop + if n_next + self.m_num <= 0 or all(w2[n_next:] == 0): + return n_, -q_ + raise RuntimeError("This is code line should not have been reached!") + # If this case is reached, it probably means the first slice should be + # returned, i.e.: return n0, 0 + + @property + def k_min(self) -> int: + """The smallest possible signal index of the STFT. + + `k_min` is the index of the left-most non-zero value of the lowest + slice `p_min`. Since the zeroth slice is centered over the zeroth + sample of the input signal, `k_min` is never positive. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + upper_border_begin: Where post-padding effects start. + ShortTimeFFT: Class this property belongs to. + """ + return self._pre_padding()[0] + + @property + def p_min(self) -> int: + """The smallest possible slice index. + + `p_min` is the index of the left-most slice, where the window still + sticks into the signal, i.e., has non-zero part for t >= 0. + `k_min` is the smallest index where the window function of the slice + `p_min` is non-zero. + + Since, per convention the zeroth slice is centered at t=0, + `p_min` <= 0 always holds. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + ShortTimeFFT: Class this property belongs to. + """ + return self._pre_padding()[1] + + @lru_cache(maxsize=256) + def _post_padding(self, n: int) -> tuple[int, int]: + """Largest signal index and slice index due to padding.""" + w2 = self.win.real**2 + self.win.imag**2 + # move window to the right until the overlap for t < t[n] vanishes: + q1 = n // self.hop # last slice index with t[p1] <= t[n] + k1 = q1 * self.hop - self.m_num_mid + for q_, k_ in enumerate(range(k1, n+self.m_num, self.hop), start=q1): + n_next = k_ + self.hop + if n_next >= n or all(w2[:n-n_next] == 0): + return k_ + self.m_num, q_ + 1 + raise RuntimeError("This is code line should not have been reached!") + # If this case is reached, it probably means the last slice should be + # returned, i.e.: return k1 + self.m_num - self.m_num_mid, q1 + 1 + + def k_max(self, n: int) -> int: + """First sample index after signal end not touched by a time slice. + + `k_max` - 1 is the largest sample index of the slice `p_max` for a + given input signal of `n` samples. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + ShortTimeFFT: Class this method belongs to. + """ + return self._post_padding(n)[0] + + def p_max(self, n: int) -> int: + """Index of first non-overlapping upper time slice for `n` sample + input. + + Note that center point t[p_max] = (p_max(n)-1) * `delta_t` is typically + larger than last time index t[n-1] == (`n`-1) * `T`. The upper border + of samples indexes covered by the window slices is given by `k_max`. + Furthermore, `p_max` does not denote the number of slices `p_num` since + `p_min` is typically less than zero. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + p_min: The smallest possible slice index. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + ShortTimeFFT: Class this method belongs to. + """ + return self._post_padding(n)[1] + + def p_num(self, n: int) -> int: + """Number of time slices for an input signal with `n` samples. + + It is given by `p_num` = `p_max` - `p_min` with `p_min` typically + being negative. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_range: Determine and validate slice index range. + upper_border_begin: Where post-padding effects start. + ShortTimeFFT: Class this method belongs to. + """ + return self.p_max(n) - self.p_min + + @property + def lower_border_end(self) -> tuple[int, int]: + """First signal index and first slice index unaffected by pre-padding. + + Describes the point where the window does not stick out to the left + of the signal domain. + A detailed example is provided in the :ref:`tutorial_stft_sliding_win` + section of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + upper_border_begin: Where post-padding effects start. + ShortTimeFFT: Class this property belongs to. + """ + # not using @cache decorator due to MyPy limitations + if self._lower_border_end is not None: + return self._lower_border_end + + # first non-zero element in self.win: + m0 = np.flatnonzero(self.win.real**2 + self.win.imag**2)[0] + + # move window to the right until does not stick out to the left: + k0 = -self.m_num_mid + m0 + for q_, k_ in enumerate(range(k0, self.hop + 1, self.hop)): + if k_ + self.hop >= 0: # next entry does not stick out anymore + self._lower_border_end = (k_ + self.m_num, q_ + 1) + return self._lower_border_end + self._lower_border_end = (0, max(self.p_min, 0)) # ends at first slice + return self._lower_border_end + + @lru_cache(maxsize=256) + def upper_border_begin(self, n: int) -> tuple[int, int]: + """First signal index and first slice index affected by post-padding. + + Describes the point where the window does begin stick out to the right + of the signal domain. + A detailed example is given :ref:`tutorial_stft_sliding_win` section + of the :ref:`user_guide`. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + p_range: Determine and validate slice index range. + ShortTimeFFT: Class this method belongs to. + """ + w2 = self.win.real**2 + self.win.imag**2 + q2 = n // self.hop + 1 # first t[q] >= t[n] + q1 = max((n-self.m_num) // self.hop - 1, -1) + # move window left until does not stick out to the right: + for q_ in range(q2, q1, -1): + k_ = q_ * self.hop + (self.m_num - self.m_num_mid) + if k_ < n or all(w2[n-k_:] == 0): + return (q_ + 1) * self.hop - self.m_num_mid, q_ + 1 + return 0, 0 # border starts at first slice + + @property + def delta_t(self) -> float: + """Time increment of STFT. + + The time increment `delta_t` = `T` * `hop` represents the sample + increment `hop` converted to time based on the sampling interval `T`. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + hop: Hop size in signal samples for sliding window. + t: Times of STFT for an input signal with `n` samples. + T: Sampling interval of input signal and window `win`. + ShortTimeFFT: Class this property belongs to + """ + return self.T * self.hop + + def p_range(self, n: int, p0: int | None = None, + p1: int | None = None) -> tuple[int, int]: + """Determine and validate slice index range. + + Parameters + ---------- + n : int + Number of samples of input signal, assuming t[0] = 0. + p0 : int | None + First slice index. If 0 then the first slice is centered at t = 0. + If ``None`` then `p_min` is used. Note that p0 may be < 0 if + slices are left of t = 0. + p1 : int | None + End of interval (last value is p1-1). + If ``None`` then `p_max(n)` is used. + + + Returns + ------- + p0_ : int + The fist slice index + p1_ : int + End of interval (last value is p1-1). + + Notes + ----- + A ``ValueError`` is raised if ``p_min <= p0 < p1 <= p_max(n)`` does not + hold. + + See Also + -------- + k_min: The smallest possible signal index. + k_max: First sample index after signal end not touched by a time slice. + lower_border_end: Where pre-padding effects end. + p_min: The smallest possible slice index. + p_max: Index of first non-overlapping upper time slice. + p_num: Number of time slices, i.e., `p_max` - `p_min`. + upper_border_begin: Where post-padding effects start. + ShortTimeFFT: Class this property belongs to. + """ + p_max = self.p_max(n) # shorthand + p0_ = self.p_min if p0 is None else p0 + p1_ = p_max if p1 is None else p1 + if not (self.p_min <= p0_ < p1_ <= p_max): + raise ValueError(f"Invalid Parameter {p0=}, {p1=}, i.e., " + + f"{self.p_min=} <= p0 < p1 <= {p_max=} " + + f"does not hold for signal length {n=}!") + return p0_, p1_ + + @lru_cache(maxsize=1) + def t(self, n: int, p0: int | None = None, p1: int | None = None, + k_offset: int = 0) -> np.ndarray: + """Times of STFT for an input signal with `n` samples. + + Returns a 1d array with times of the `~ShortTimeFFT.stft` values with + the same parametrization. Note that the slices are + ``delta_t = hop * T`` time units apart. + + Parameters + ---------- + n + Number of sample of the input signal. + p0 + The first element of the range of slices to calculate. If ``None`` + then it is set to :attr:`p_min`, which is the smallest possible + slice. + p1 + The end of the array. If ``None`` then `p_max(n)` is used. + k_offset + Index of first sample (t = 0) in `x`. + + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + hop: Time increment in signal samples for sliding window. + nearest_k_p: Nearest sample index k_p for which t[k_p] == t[p] holds. + T: Sampling interval of input signal and of the window (``1/fs``). + fs: Sampling frequency (being ``1/T``) + ShortTimeFFT: Class this method belongs to. + """ + p0, p1 = self.p_range(n, p0, p1) + return np.arange(p0, p1) * self.delta_t + k_offset * self.T + + def nearest_k_p(self, k: int, left: bool = True) -> int: + """Return nearest sample index k_p for which t[k_p] == t[p] holds. + + The nearest next smaller time sample p (where t[p] is the center + position of the window of the p-th slice) is p_k = k // `hop`. + If `hop` is a divisor of `k` than `k` is returned. + If `left` is set than p_k * `hop` is returned else (p_k+1) * `hop`. + + This method can be used to slice an input signal into chunks for + calculating the STFT and iSTFT incrementally. + + See Also + -------- + delta_t: Time increment of STFT (``hop*T``) + hop: Time increment in signal samples for sliding window. + T: Sampling interval of input signal and of the window (``1/fs``). + fs: Sampling frequency (being ``1/T``) + t: Times of STFT for an input signal with `n` samples. + ShortTimeFFT: Class this method belongs to. + """ + p_q, remainder = divmod(k, self.hop) + if remainder == 0: + return k + return p_q * self.hop if left else (p_q + 1) * self.hop + + @property + def delta_f(self) -> float: + """Width of the frequency bins of the STFT. + + Return the frequency interval `delta_f` = 1 / (`mfft` * `T`). + + See Also + -------- + delta_t: Time increment of STFT. + f_pts: Number of points along the frequency axis. + f: Frequencies values of the STFT. + mfft: Length of the input for FFT used. + T: Sampling interval. + t: Times of STFT for an input signal with `n` samples. + ShortTimeFFT: Class this property belongs to. + """ + return 1 / (self.mfft * self.T) + + @property + def f_pts(self) -> int: + """Number of points along the frequency axis. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + f: Frequencies values of the STFT. + mfft: Length of the input for FFT used. + ShortTimeFFT: Class this property belongs to. + """ + return self.mfft // 2 + 1 if self.onesided_fft else self.mfft + + @property + def onesided_fft(self) -> bool: + """Return True if a one-sided FFT is used. + + Returns ``True`` if `fft_mode` is either 'onesided' or 'onesided2X'. + + See Also + -------- + fft_mode: Utilized FFT ('twosided', 'centered', 'onesided' or + 'onesided2X') + ShortTimeFFT: Class this property belongs to. + """ + return self.fft_mode in {'onesided', 'onesided2X'} + + @property + def f(self) -> np.ndarray: + """Frequencies values of the STFT. + + A 1d array of length `f_pts` with `delta_f` spaced entries is returned. + + See Also + -------- + delta_f: Width of the frequency bins of the STFT. + f_pts: Number of points along the frequency axis. + mfft: Length of the input for FFT used. + ShortTimeFFT: Class this property belongs to. + """ + if self.fft_mode in {'onesided', 'onesided2X'}: + return fft_lib.rfftfreq(self.mfft, self.T) + elif self.fft_mode == 'twosided': + return fft_lib.fftfreq(self.mfft, self.T) + elif self.fft_mode == 'centered': + return fft_lib.fftshift(fft_lib.fftfreq(self.mfft, self.T)) + # This should never happen but makes the Linters happy: + fft_modes = get_args(FFT_MODE_TYPE) + raise RuntimeError(f"{self.fft_mode=} not in {fft_modes}!") + + def _fft_func(self, x: np.ndarray) -> np.ndarray: + """FFT based on the `fft_mode`, `mfft`, `scaling` and `phase_shift` + attributes. + + For multidimensional arrays the transformation is carried out on the + last axis. + """ + if self.phase_shift is not None: + if x.shape[-1] < self.mfft: # zero pad if needed + z_shape = list(x.shape) + z_shape[-1] = self.mfft - x.shape[-1] + x = np.hstack((x, np.zeros(z_shape, dtype=x.dtype))) + p_s = (self.phase_shift + self.m_num_mid) % self.m_num + x = np.roll(x, -p_s, axis=-1) + + if self.fft_mode == 'twosided': + return fft_lib.fft(x, n=self.mfft, axis=-1) + if self.fft_mode == 'centered': + return fft_lib.fftshift(fft_lib.fft(x, self.mfft, axis=-1), axes=-1) + if self.fft_mode == 'onesided': + return fft_lib.rfft(x, n=self.mfft, axis=-1) + if self.fft_mode == 'onesided2X': + X = fft_lib.rfft(x, n=self.mfft, axis=-1) + # Either squared magnitude (psd) or magnitude is doubled: + fac = np.sqrt(2) if self.scaling == 'psd' else 2 + # For even input length, the last entry is unpaired: + X[..., 1: -1 if self.mfft % 2 == 0 else None] *= fac + return X + # This should never happen but makes the Linter happy: + fft_modes = get_args(FFT_MODE_TYPE) + raise RuntimeError(f"{self.fft_mode=} not in {fft_modes}!") + + def _ifft_func(self, X: np.ndarray) -> np.ndarray: + """Inverse to `_fft_func`. + + Returned is an array of length `m_num`. If the FFT is `onesided` + then a float array is returned else a complex array is returned. + For multidimensional arrays the transformation is carried out on the + last axis. + """ + if self.fft_mode == 'twosided': + x = fft_lib.ifft(X, n=self.mfft, axis=-1) + elif self.fft_mode == 'centered': + x = fft_lib.ifft(fft_lib.ifftshift(X, axes=-1), n=self.mfft, axis=-1) + elif self.fft_mode == 'onesided': + x = fft_lib.irfft(X, n=self.mfft, axis=-1) + elif self.fft_mode == 'onesided2X': + Xc = X.copy() # we do not want to modify function parameters + fac = np.sqrt(2) if self.scaling == 'psd' else 2 + # For even length X the last value is not paired with a negative + # value on the two-sided FFT: + q1 = -1 if self.mfft % 2 == 0 else None + Xc[..., 1:q1] /= fac + x = fft_lib.irfft(Xc, n=self.mfft, axis=-1) + else: # This should never happen but makes the Linter happy: + error_str = f"{self.fft_mode=} not in {get_args(FFT_MODE_TYPE)}!" + raise RuntimeError(error_str) + + if self.phase_shift is None: + return x[..., :self.m_num] + p_s = (self.phase_shift + self.m_num_mid) % self.m_num + return np.roll(x, p_s, axis=-1)[..., :self.m_num] + + def extent(self, n: int, axes_seq: Literal['tf', 'ft'] = 'tf', + center_bins: bool = False) -> tuple[float, float, float, float]: + """Return minimum and maximum values time-frequency values. + + A tuple with four floats ``(t0, t1, f0, f1)`` for 'tf' and + ``(f0, f1, t0, t1)`` for 'ft' is returned describing the corners + of the time-frequency domain of the `~ShortTimeFFT.stft`. + That tuple can be passed to `matplotlib.pyplot.imshow` as a parameter + with the same name. + + Parameters + ---------- + n : int + Number of samples in input signal. + axes_seq : {'tf', 'ft'} + Return time extent first and then frequency extent or vice-versa. + center_bins: bool + If set (default ``False``), the values of the time slots and + frequency bins are moved from the side the middle. This is useful, + when plotting the `~ShortTimeFFT.stft` values as step functions, + i.e., with no interpolation. + + See Also + -------- + :func:`matplotlib.pyplot.imshow`: Display data as an image. + :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. + + Examples + -------- + The following two plots illustrate the effect of the parameter `center_bins`: + The grid lines represent the three time and the four frequency values of the + STFT. + The left plot, where ``(t0, t1, f0, f1) = (0, 3, 0, 4)`` is passed as parameter + ``extent`` to `~matplotlib.pyplot.imshow`, shows the standard behavior of the + time and frequency values being at the lower edge of the corrsponding bin. + The right plot, with ``(t0, t1, f0, f1) = (-0.5, 2.5, -0.5, 3.5)``, shows that + the bins are centered over the respective values when passing + ``center_bins=True``. + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy.signal import ShortTimeFFT + ... + >>> n, m = 12, 6 + >>> SFT = ShortTimeFFT.from_window('hann', fs=m, nperseg=m, noverlap=0) + >>> Sxx = SFT.stft(np.cos(np.arange(n))) # produces a colorful plot + ... + >>> fig, axx = plt.subplots(1, 2, tight_layout=True, figsize=(6., 4.)) + >>> for ax_, center_bins in zip(axx, (False, True)): + ... ax_.imshow(abs(Sxx), origin='lower', interpolation=None, aspect='equal', + ... cmap='viridis', extent=SFT.extent(n, 'tf', center_bins)) + ... ax_.set_title(f"{center_bins=}") + ... ax_.set_xlabel(f"Time ({SFT.p_num(n)} points, Δt={SFT.delta_t})") + ... ax_.set_ylabel(f"Frequency ({SFT.f_pts} points, Δf={SFT.delta_f})") + ... ax_.set_xticks(SFT.t(n)) # vertical grid line are timestamps + ... ax_.set_yticks(SFT.f) # horizontal grid line are frequency values + ... ax_.grid(True) + >>> plt.show() + + Note that the step-like behavior with the constant colors is caused by passing + ``interpolation=None`` to `~matplotlib.pyplot.imshow`. + """ + if axes_seq not in ('tf', 'ft'): + raise ValueError(f"Parameter {axes_seq=} not in ['tf', 'ft']!") + + if self.onesided_fft: + q0, q1 = 0, self.f_pts + elif self.fft_mode == 'centered': + q0 = -(self.mfft // 2) + q1 = self.mfft // 2 if self.mfft % 2 == 0 else self.mfft // 2 + 1 + else: + raise ValueError(f"Attribute fft_mode={self.fft_mode} must be " + + "in ['centered', 'onesided', 'onesided2X']") + + p0, p1 = self.p_min, self.p_max(n) # shorthand + if center_bins: + t0, t1 = self.delta_t * (p0 - 0.5), self.delta_t * (p1 - 0.5) + f0, f1 = self.delta_f * (q0 - 0.5), self.delta_f * (q1 - 0.5) + else: + t0, t1 = self.delta_t * p0, self.delta_t * p1 + f0, f1 = self.delta_f * q0, self.delta_f * q1 + return (t0, t1, f0, f1) if axes_seq == 'tf' else (f0, f1, t0, t1) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_signaltools.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_signaltools.py new file mode 100644 index 0000000000000000000000000000000000000000..eac86cc07c4d6d69dcfee789414022af22d7156e --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_signaltools.py @@ -0,0 +1,4986 @@ +# Author: Travis Oliphant +# 1999 -- 2002 + +from __future__ import annotations # Provides typing union operator `|` in Python 3.9 +import operator +import math +from math import prod as _prod +import timeit +import warnings +from typing import Literal + +from numpy._typing import ArrayLike + +from scipy.spatial import cKDTree +from . import _sigtools +from ._ltisys import dlti +from ._upfirdn import upfirdn, _output_len, _upfirdn_modes +from scipy import linalg, fft as sp_fft +from scipy import ndimage +from scipy.fft._helper import _init_nd_shape_and_axes +import numpy as np +from scipy.special import lambertw +from .windows import get_window +from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext +from ._filter_design import cheby1, _validate_sos, zpk2sos +from ._fir_filter_design import firwin +from ._sosfilt import _sosfilt + + +__all__ = ['correlate', 'correlation_lags', 'correlate2d', + 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve', + 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter', + 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2', 'envelope', + 'unique_roots', 'invres', 'invresz', 'residue', + 'residuez', 'resample', 'resample_poly', 'detrend', + 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method', + 'filtfilt', 'decimate', 'vectorstrength'] + + +_modedict = {'valid': 0, 'same': 1, 'full': 2} + +_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1, + 'symmetric': 1, 'reflect': 4} + + +def _valfrommode(mode): + try: + return _modedict[mode] + except KeyError as e: + raise ValueError("Acceptable mode flags are 'valid'," + " 'same', or 'full'.") from e + + +def _bvalfromboundary(boundary): + try: + return _boundarydict[boundary] << 2 + except KeyError as e: + raise ValueError("Acceptable boundary flags are 'fill', 'circular' " + "(or 'wrap'), and 'symmetric' (or 'symm').") from e + + +def _inputs_swap_needed(mode, shape1, shape2, axes=None): + """Determine if inputs arrays need to be swapped in `"valid"` mode. + + If in `"valid"` mode, returns whether or not the input arrays need to be + swapped depending on whether `shape1` is at least as large as `shape2` in + every calculated dimension. + + This is important for some of the correlation and convolution + implementations in this module, where the larger array input needs to come + before the smaller array input when operating in this mode. + + Note that if the mode provided is not 'valid', False is immediately + returned. + + """ + if mode != 'valid': + return False + + if not shape1: + return False + + if axes is None: + axes = range(len(shape1)) + + ok1 = all(shape1[i] >= shape2[i] for i in axes) + ok2 = all(shape2[i] >= shape1[i] for i in axes) + + if not (ok1 or ok2): + raise ValueError("For 'valid' mode, one must be at least " + "as large as the other in every dimension") + + return not ok1 + + +def _reject_objects(arr, name): + """Warn if arr.dtype is object or longdouble. + """ + dt = np.asarray(arr).dtype + if not (np.issubdtype(dt, np.integer) + or dt in [np.bool_, np.float16, np.float32, np.float64, + np.complex64, np.complex128] + ): + msg = ( + f"dtype={dt} is not supported by {name} and will raise an error in " + f"SciPy 1.17.0. Supported dtypes are: boolean, integer, `np.float16`," + f"`np.float32`, `np.float64`, `np.complex64`, `np.complex128`." + ) + warnings.warn(msg, category=DeprecationWarning, stacklevel=3) + + +def correlate(in1, in2, mode='full', method='auto'): + r""" + Cross-correlate two N-dimensional arrays. + + Cross-correlate `in1` and `in2`, with the output size determined by the + `mode` argument. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear cross-correlation + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + method : str {'auto', 'direct', 'fft'}, optional + A string indicating which method to use to calculate the correlation. + + ``direct`` + The correlation is determined directly from sums, the definition of + correlation. + ``fft`` + The Fast Fourier Transform is used to perform the correlation more + quickly (only available for numerical arrays.) + ``auto`` + Automatically chooses direct or Fourier method based on an estimate + of which is faster (default). See `convolve` Notes for more detail. + + .. versionadded:: 0.19.0 + + Returns + ------- + correlate : array + An N-dimensional array containing a subset of the discrete linear + cross-correlation of `in1` with `in2`. + + See Also + -------- + choose_conv_method : contains more documentation on `method`. + correlation_lags : calculates the lag / displacement indices array for 1D + cross-correlation. + + Notes + ----- + The correlation z of two d-dimensional arrays x and y is defined as:: + + z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...]) + + This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')`` + then + + .. math:: + + z[k] = (x * y)(k - N + 1) + = \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*} + + for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2` + + where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`, + and :math:`y_m` is 0 when m is outside the range of y. + + ``method='fft'`` only works for numerical arrays as it relies on + `fftconvolve`. In certain cases (i.e., arrays of objects or when + rounding integers can lose precision), ``method='direct'`` is always used. + + When using "same" mode with even-length inputs, the outputs of `correlate` + and `correlate2d` differ: There is a 1-index offset between them. + + Examples + -------- + Implement a matched filter using cross-correlation, to recover a signal + that has passed through a noisy channel. + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + >>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128) + >>> sig_noise = sig + rng.standard_normal(len(sig)) + >>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128 + + >>> clock = np.arange(64, len(sig), 128) + >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True) + >>> ax_orig.plot(sig) + >>> ax_orig.plot(clock, sig[clock], 'ro') + >>> ax_orig.set_title('Original signal') + >>> ax_noise.plot(sig_noise) + >>> ax_noise.set_title('Signal with noise') + >>> ax_corr.plot(corr) + >>> ax_corr.plot(clock, corr[clock], 'ro') + >>> ax_corr.axhline(0.5, ls=':') + >>> ax_corr.set_title('Cross-correlated with rectangular pulse') + >>> ax_orig.margins(0, 0.1) + >>> fig.tight_layout() + >>> plt.show() + + Compute the cross-correlation of a noisy signal with the original signal. + + >>> x = np.arange(128) / 128 + >>> sig = np.sin(2 * np.pi * x) + >>> sig_noise = sig + rng.standard_normal(len(sig)) + >>> corr = signal.correlate(sig_noise, sig) + >>> lags = signal.correlation_lags(len(sig), len(sig_noise)) + >>> corr /= np.max(corr) + + >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, figsize=(4.8, 4.8)) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('Original signal') + >>> ax_orig.set_xlabel('Sample Number') + >>> ax_noise.plot(sig_noise) + >>> ax_noise.set_title('Signal with noise') + >>> ax_noise.set_xlabel('Sample Number') + >>> ax_corr.plot(lags, corr) + >>> ax_corr.set_title('Cross-correlated signal') + >>> ax_corr.set_xlabel('Lag') + >>> ax_orig.margins(0, 0.1) + >>> ax_noise.margins(0, 0.1) + >>> ax_corr.margins(0, 0.1) + >>> fig.tight_layout() + >>> plt.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + _reject_objects(in1, 'correlate') + _reject_objects(in2, 'correlate') + + if in1.ndim == in2.ndim == 0: + return in1 * in2.conj() + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + + # Don't use _valfrommode, since correlate should not accept numeric modes + try: + val = _modedict[mode] + except KeyError as e: + raise ValueError("Acceptable mode flags are 'valid'," + " 'same', or 'full'.") from e + + # this either calls fftconvolve or this function with method=='direct' + if method in ('fft', 'auto'): + return convolve(in1, _reverse_and_conj(in2), mode, method) + + elif method == 'direct': + # fastpath to faster numpy.correlate for 1d inputs when possible + if _np_conv_ok(in1, in2, mode): + return np.correlate(in1, in2, mode) + + # _correlateND is far slower when in2.size > in1.size, so swap them + # and then undo the effect afterward if mode == 'full'. Also, it fails + # with 'valid' mode if in2 is larger than in1, so swap those, too. + # Don't swap inputs for 'same' mode, since shape of in1 matters. + swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or + _inputs_swap_needed(mode, in1.shape, in2.shape)) + + if swapped_inputs: + in1, in2 = in2, in1 + + if mode == 'valid': + ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)] + out = np.empty(ps, in1.dtype) + + z = _sigtools._correlateND(in1, in2, out, val) + + else: + ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)] + + # zero pad input + in1zpadded = np.zeros(ps, in1.dtype) + sc = tuple(slice(0, i) for i in in1.shape) + in1zpadded[sc] = in1.copy() + + if mode == 'full': + out = np.empty(ps, in1.dtype) + elif mode == 'same': + out = np.empty(in1.shape, in1.dtype) + + z = _sigtools._correlateND(in1zpadded, in2, out, val) + + if swapped_inputs: + # Reverse and conjugate to undo the effect of swapping inputs + z = _reverse_and_conj(z) + + return z + + else: + raise ValueError("Acceptable method flags are 'auto'," + " 'direct', or 'fft'.") + + +def correlation_lags(in1_len, in2_len, mode='full'): + r""" + Calculates the lag / displacement indices array for 1D cross-correlation. + + Parameters + ---------- + in1_len : int + First input size. + in2_len : int + Second input size. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output. + See the documentation `correlate` for more information. + + Returns + ------- + lags : array + Returns an array containing cross-correlation lag/displacement indices. + Indices can be indexed with the np.argmax of the correlation to return + the lag/displacement. + + See Also + -------- + correlate : Compute the N-dimensional cross-correlation. + + Notes + ----- + Cross-correlation for continuous functions :math:`f` and :math:`g` is + defined as: + + .. math:: + + \left ( f\star g \right )\left ( \tau \right ) + \triangleq \int_{t_0}^{t_0 +T} + \overline{f\left ( t \right )}g\left ( t+\tau \right )dt + + Where :math:`\tau` is defined as the displacement, also known as the lag. + + Cross correlation for discrete functions :math:`f` and :math:`g` is + defined as: + + .. math:: + \left ( f\star g \right )\left [ n \right ] + \triangleq \sum_{-\infty}^{\infty} + \overline{f\left [ m \right ]}g\left [ m+n \right ] + + Where :math:`n` is the lag. + + Examples + -------- + Cross-correlation of a signal with its time-delayed self. + + >>> import numpy as np + >>> from scipy import signal + >>> rng = np.random.default_rng() + >>> x = rng.standard_normal(1000) + >>> y = np.concatenate([rng.standard_normal(100), x]) + >>> correlation = signal.correlate(x, y, mode="full") + >>> lags = signal.correlation_lags(x.size, y.size, mode="full") + >>> lag = lags[np.argmax(correlation)] + """ + + # calculate lag ranges in different modes of operation + if mode == "full": + # the output is the full discrete linear convolution + # of the inputs. (Default) + lags = np.arange(-in2_len + 1, in1_len) + elif mode == "same": + # the output is the same size as `in1`, centered + # with respect to the 'full' output. + # calculate the full output + lags = np.arange(-in2_len + 1, in1_len) + # determine the midpoint in the full output + mid = lags.size // 2 + # determine lag_bound to be used with respect + # to the midpoint + lag_bound = in1_len // 2 + # calculate lag ranges for even and odd scenarios + if in1_len % 2 == 0: + lags = lags[(mid-lag_bound):(mid+lag_bound)] + else: + lags = lags[(mid-lag_bound):(mid+lag_bound)+1] + elif mode == "valid": + # the output consists only of those elements that do not + # rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + # must be at least as large as the other in every dimension. + + # the lag_bound will be either negative or positive + # this let's us infer how to present the lag range + lag_bound = in1_len - in2_len + if lag_bound >= 0: + lags = np.arange(lag_bound + 1) + else: + lags = np.arange(lag_bound, 1) + else: + raise ValueError(f"Mode {mode} is invalid") + return lags + + +def _centered(arr, newshape): + # Return the center newshape portion of the array. + newshape = np.asarray(newshape) + currshape = np.array(arr.shape) + startind = (currshape - newshape) // 2 + endind = startind + newshape + myslice = [slice(startind[k], endind[k]) for k in range(len(endind))] + return arr[tuple(myslice)] + + +def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False): + """Handle the axes argument for frequency-domain convolution. + + Returns the inputs and axes in a standard form, eliminating redundant axes, + swapping the inputs if necessary, and checking for various potential + errors. + + Parameters + ---------- + in1 : array + First input. + in2 : array + Second input. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output. + See the documentation `fftconvolve` for more information. + axes : list of ints + Axes over which to compute the FFTs. + sorted_axes : bool, optional + If `True`, sort the axes. + Default is `False`, do not sort. + + Returns + ------- + in1 : array + The first input, possible swapped with the second input. + in2 : array + The second input, possible swapped with the first input. + axes : list of ints + Axes over which to compute the FFTs. + + """ + s1 = in1.shape + s2 = in2.shape + noaxes = axes is None + + _, axes = _init_nd_shape_and_axes(in1, shape=None, axes=axes) + + if not noaxes and not len(axes): + raise ValueError("when provided, axes cannot be empty") + + # Axes of length 1 can rely on broadcasting rules for multiply, + # no fft needed. + axes = [a for a in axes if s1[a] != 1 and s2[a] != 1] + + if sorted_axes: + axes.sort() + + if not all(s1[a] == s2[a] or s1[a] == 1 or s2[a] == 1 + for a in range(in1.ndim) if a not in axes): + raise ValueError("incompatible shapes for in1 and in2:" + f" {s1} and {s2}") + + # Check that input sizes are compatible with 'valid' mode. + if _inputs_swap_needed(mode, s1, s2, axes=axes): + # Convolution is commutative; order doesn't have any effect on output. + in1, in2 = in2, in1 + + return in1, in2, axes + + +def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False): + """Convolve two arrays in the frequency domain. + + This function implements only base the FFT-related operations. + Specifically, it converts the signals to the frequency domain, multiplies + them, then converts them back to the time domain. Calculations of axes, + shapes, convolution mode, etc. are implemented in higher level-functions, + such as `fftconvolve` and `oaconvolve`. Those functions should be used + instead of this one. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + axes : array_like of ints + Axes over which to compute the FFTs. + shape : array_like of ints + The sizes of the FFTs. + calc_fast_len : bool, optional + If `True`, set each value of `shape` to the next fast FFT length. + Default is `False`, use `axes` as-is. + + Returns + ------- + out : array + An N-dimensional array containing the discrete linear convolution of + `in1` with `in2`. + + """ + if not len(axes): + return in1 * in2 + + complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c') + + if calc_fast_len: + # Speed up FFT by padding to optimal size. + fshape = [ + sp_fft.next_fast_len(shape[a], not complex_result) for a in axes] + else: + fshape = shape + + if not complex_result: + fft, ifft = sp_fft.rfftn, sp_fft.irfftn + else: + fft, ifft = sp_fft.fftn, sp_fft.ifftn + + sp1 = fft(in1, fshape, axes=axes) + sp2 = fft(in2, fshape, axes=axes) + + ret = ifft(sp1 * sp2, fshape, axes=axes) + + if calc_fast_len: + fslice = tuple([slice(sz) for sz in shape]) + ret = ret[fslice] + + return ret + + +def _apply_conv_mode(ret, s1, s2, mode, axes): + """Calculate the convolution result shape based on the `mode` argument. + + Returns the result sliced to the correct size for the given mode. + + Parameters + ---------- + ret : array + The result array, with the appropriate shape for the 'full' mode. + s1 : list of int + The shape of the first input. + s2 : list of int + The shape of the second input. + mode : str {'full', 'valid', 'same'} + A string indicating the size of the output. + See the documentation `fftconvolve` for more information. + axes : list of ints + Axes over which to compute the convolution. + + Returns + ------- + ret : array + A copy of `res`, sliced to the correct size for the given `mode`. + + """ + if mode == "full": + return ret.copy() + elif mode == "same": + return _centered(ret, s1).copy() + elif mode == "valid": + shape_valid = [ret.shape[a] if a not in axes else s1[a] - s2[a] + 1 + for a in range(ret.ndim)] + return _centered(ret, shape_valid).copy() + else: + raise ValueError("acceptable mode flags are 'valid'," + " 'same', or 'full'") + + +def fftconvolve(in1, in2, mode="full", axes=None): + """Convolve two N-dimensional arrays using FFT. + + Convolve `in1` and `in2` using the fast Fourier transform method, with + the output size determined by the `mode` argument. + + This is generally much faster than `convolve` for large arrays (n > ~500), + but can be slower when only a few output values are needed, and can only + output float arrays (int or object array inputs will be cast to float). + + As of v0.19, `convolve` automatically chooses this method or the direct + method based on an estimation of which is faster. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + axes : int or array_like of ints or None, optional + Axes over which to compute the convolution. + The default is over all axes. + + Returns + ------- + out : array + An N-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + See Also + -------- + convolve : Uses the direct convolution or FFT convolution algorithm + depending on which is faster. + oaconvolve : Uses the overlap-add method to do convolution, which is + generally faster when the input arrays are large and + significantly different in size. + + Examples + -------- + Autocorrelation of white noise is an impulse. + + >>> import numpy as np + >>> from scipy import signal + >>> rng = np.random.default_rng() + >>> sig = rng.standard_normal(1000) + >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full') + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('White noise') + >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr) + >>> ax_mag.set_title('Autocorrelation') + >>> fig.tight_layout() + >>> fig.show() + + Gaussian blur implemented using FFT convolution. Notice the dark borders + around the image, due to the zero-padding beyond its boundaries. + The `convolve2d` function allows for other types of image boundaries, + but is far slower. + + >>> from scipy import datasets + >>> face = datasets.face(gray=True) + >>> kernel = np.outer(signal.windows.gaussian(70, 8), + ... signal.windows.gaussian(70, 8)) + >>> blurred = signal.fftconvolve(face, kernel, mode='same') + + >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1, + ... figsize=(6, 15)) + >>> ax_orig.imshow(face, cmap='gray') + >>> ax_orig.set_title('Original') + >>> ax_orig.set_axis_off() + >>> ax_kernel.imshow(kernel, cmap='gray') + >>> ax_kernel.set_title('Gaussian kernel') + >>> ax_kernel.set_axis_off() + >>> ax_blurred.imshow(blurred, cmap='gray') + >>> ax_blurred.set_title('Blurred') + >>> ax_blurred.set_axis_off() + >>> fig.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if in1.ndim == in2.ndim == 0: # scalar inputs + return in1 * in2 + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + elif in1.size == 0 or in2.size == 0: # empty arrays + return np.array([]) + + in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, + sorted_axes=False) + + s1 = in1.shape + s2 = in2.shape + + shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 + for i in range(in1.ndim)] + + ret = _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True) + + return _apply_conv_mode(ret, s1, s2, mode, axes) + + +def _calc_oa_lens(s1, s2): + """Calculate the optimal FFT lengths for overlap-add convolution. + + The calculation is done for a single dimension. + + Parameters + ---------- + s1 : int + Size of the dimension for the first array. + s2 : int + Size of the dimension for the second array. + + Returns + ------- + block_size : int + The size of the FFT blocks. + overlap : int + The amount of overlap between two blocks. + in1_step : int + The size of each step for the first array. + in2_step : int + The size of each step for the first array. + + """ + # Set up the arguments for the conventional FFT approach. + fallback = (s1+s2-1, None, s1, s2) + + # Use conventional FFT convolve if sizes are same. + if s1 == s2 or s1 == 1 or s2 == 1: + return fallback + + if s2 > s1: + s1, s2 = s2, s1 + swapped = True + else: + swapped = False + + # There cannot be a useful block size if s2 is more than half of s1. + if s2 >= s1/2: + return fallback + + # Derivation of optimal block length + # For original formula see: + # https://en.wikipedia.org/wiki/Overlap-add_method + # + # Formula: + # K = overlap = s2-1 + # N = block_size + # C = complexity + # e = exponential, exp(1) + # + # C = (N*(log2(N)+1))/(N-K) + # C = (N*log2(2N))/(N-K) + # C = N/(N-K) * log2(2N) + # C1 = N/(N-K) + # C2 = log2(2N) = ln(2N)/ln(2) + # + # dC1/dN = (1*(N-K)-N)/(N-K)^2 = -K/(N-K)^2 + # dC2/dN = 2/(2*N*ln(2)) = 1/(N*ln(2)) + # + # dC/dN = dC1/dN*C2 + dC2/dN*C1 + # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + N/(N*ln(2)*(N-K)) + # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + 1/(ln(2)*(N-K)) + # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + (N-K)/(ln(2)*(N-K)^2) + # dC/dN = (-K*ln(2N) + (N-K)/(ln(2)*(N-K)^2) + # dC/dN = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2) + # + # Solve for minimum, where dC/dN = 0 + # 0 = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2) + # 0 * ln(2)*(N-K)^2 = N - K*ln(2N) - K + # 0 = N - K*ln(2N) - K + # 0 = N - K*(ln(2N) + 1) + # 0 = N - K*ln(2Ne) + # N = K*ln(2Ne) + # N/K = ln(2Ne) + # + # e^(N/K) = e^ln(2Ne) + # e^(N/K) = 2Ne + # 1/e^(N/K) = 1/(2*N*e) + # e^(N/-K) = 1/(2*N*e) + # e^(N/-K) = K/N*1/(2*K*e) + # N/K*e^(N/-K) = 1/(2*e*K) + # N/-K*e^(N/-K) = -1/(2*e*K) + # + # Using Lambert W function + # https://en.wikipedia.org/wiki/Lambert_W_function + # x = W(y) It is the solution to y = x*e^x + # x = N/-K + # y = -1/(2*e*K) + # + # N/-K = W(-1/(2*e*K)) + # + # N = -K*W(-1/(2*e*K)) + overlap = s2-1 + opt_size = -overlap*lambertw(-1/(2*math.e*overlap), k=-1).real + block_size = sp_fft.next_fast_len(math.ceil(opt_size)) + + # Use conventional FFT convolve if there is only going to be one block. + if block_size >= s1: + return fallback + + if not swapped: + in1_step = block_size-s2+1 + in2_step = s2 + else: + in1_step = s2 + in2_step = block_size-s2+1 + + return block_size, overlap, in1_step, in2_step + + +def oaconvolve(in1, in2, mode="full", axes=None): + """Convolve two N-dimensional arrays using the overlap-add method. + + Convolve `in1` and `in2` using the overlap-add method, with + the output size determined by the `mode` argument. + + This is generally much faster than `convolve` for large arrays (n > ~500), + and generally much faster than `fftconvolve` when one array is much + larger than the other, but can be slower when only a few output values are + needed or when the arrays are very similar in shape, and can only + output float arrays (int or object array inputs will be cast to float). + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + axes : int or array_like of ints or None, optional + Axes over which to compute the convolution. + The default is over all axes. + + Returns + ------- + out : array + An N-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + See Also + -------- + convolve : Uses the direct convolution or FFT convolution algorithm + depending on which is faster. + fftconvolve : An implementation of convolution using FFT. + + Notes + ----- + .. versionadded:: 1.4.0 + + References + ---------- + .. [1] Wikipedia, "Overlap-add_method". + https://en.wikipedia.org/wiki/Overlap-add_method + .. [2] Richard G. Lyons. Understanding Digital Signal Processing, + Third Edition, 2011. Chapter 13.10. + ISBN 13: 978-0137-02741-5 + + Examples + -------- + Convolve a 100,000 sample signal with a 512-sample filter. + + >>> import numpy as np + >>> from scipy import signal + >>> rng = np.random.default_rng() + >>> sig = rng.standard_normal(100000) + >>> filt = signal.firwin(512, 0.01) + >>> fsig = signal.oaconvolve(sig, filt) + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('White noise') + >>> ax_mag.plot(fsig) + >>> ax_mag.set_title('Filtered noise') + >>> fig.tight_layout() + >>> fig.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if in1.ndim == in2.ndim == 0: # scalar inputs + return in1 * in2 + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + elif in1.size == 0 or in2.size == 0: # empty arrays + return np.array([]) + elif in1.shape == in2.shape: # Equivalent to fftconvolve + return fftconvolve(in1, in2, mode=mode, axes=axes) + + in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, + sorted_axes=True) + + s1 = in1.shape + s2 = in2.shape + + if not axes: + ret = in1 * in2 + return _apply_conv_mode(ret, s1, s2, mode, axes) + + # Calculate this now since in1 is changed later + shape_final = [None if i not in axes else + s1[i] + s2[i] - 1 for i in range(in1.ndim)] + + # Calculate the block sizes for the output, steps, first and second inputs. + # It is simpler to calculate them all together than doing them in separate + # loops due to all the special cases that need to be handled. + optimal_sizes = ((-1, -1, s1[i], s2[i]) if i not in axes else + _calc_oa_lens(s1[i], s2[i]) for i in range(in1.ndim)) + block_size, overlaps, \ + in1_step, in2_step = zip(*optimal_sizes) + + # Fall back to fftconvolve if there is only one block in every dimension. + if in1_step == s1 and in2_step == s2: + return fftconvolve(in1, in2, mode=mode, axes=axes) + + # Figure out the number of steps and padding. + # This would get too complicated in a list comprehension. + nsteps1 = [] + nsteps2 = [] + pad_size1 = [] + pad_size2 = [] + for i in range(in1.ndim): + if i not in axes: + pad_size1 += [(0, 0)] + pad_size2 += [(0, 0)] + continue + + if s1[i] > in1_step[i]: + curnstep1 = math.ceil((s1[i]+1)/in1_step[i]) + if (block_size[i] - overlaps[i])*curnstep1 < shape_final[i]: + curnstep1 += 1 + + curpad1 = curnstep1*in1_step[i] - s1[i] + else: + curnstep1 = 1 + curpad1 = 0 + + if s2[i] > in2_step[i]: + curnstep2 = math.ceil((s2[i]+1)/in2_step[i]) + if (block_size[i] - overlaps[i])*curnstep2 < shape_final[i]: + curnstep2 += 1 + + curpad2 = curnstep2*in2_step[i] - s2[i] + else: + curnstep2 = 1 + curpad2 = 0 + + nsteps1 += [curnstep1] + nsteps2 += [curnstep2] + pad_size1 += [(0, curpad1)] + pad_size2 += [(0, curpad2)] + + # Pad the array to a size that can be reshaped to the desired shape + # if necessary. + if not all(curpad == (0, 0) for curpad in pad_size1): + in1 = np.pad(in1, pad_size1, mode='constant', constant_values=0) + + if not all(curpad == (0, 0) for curpad in pad_size2): + in2 = np.pad(in2, pad_size2, mode='constant', constant_values=0) + + # Reshape the overlap-add parts to input block sizes. + split_axes = [iax+i for i, iax in enumerate(axes)] + fft_axes = [iax+1 for iax in split_axes] + + # We need to put each new dimension before the corresponding dimension + # being reshaped in order to get the data in the right layout at the end. + reshape_size1 = list(in1_step) + reshape_size2 = list(in2_step) + for i, iax in enumerate(split_axes): + reshape_size1.insert(iax, nsteps1[i]) + reshape_size2.insert(iax, nsteps2[i]) + + in1 = in1.reshape(*reshape_size1) + in2 = in2.reshape(*reshape_size2) + + # Do the convolution. + fft_shape = [block_size[i] for i in axes] + ret = _freq_domain_conv(in1, in2, fft_axes, fft_shape, calc_fast_len=False) + + # Do the overlap-add. + for ax, ax_fft, ax_split in zip(axes, fft_axes, split_axes): + overlap = overlaps[ax] + if overlap is None: + continue + + ret, overpart = np.split(ret, [-overlap], ax_fft) + overpart = np.split(overpart, [-1], ax_split)[0] + + ret_overpart = np.split(ret, [overlap], ax_fft)[0] + ret_overpart = np.split(ret_overpart, [1], ax_split)[1] + ret_overpart += overpart + + # Reshape back to the correct dimensionality. + shape_ret = [ret.shape[i] if i not in fft_axes else + ret.shape[i]*ret.shape[i-1] + for i in range(ret.ndim) if i not in split_axes] + ret = ret.reshape(*shape_ret) + + # Slice to the correct size. + slice_final = tuple([slice(islice) for islice in shape_final]) + ret = ret[slice_final] + + return _apply_conv_mode(ret, s1, s2, mode, axes) + + +def _numeric_arrays(arrays, kinds='buifc'): + """ + See if a list of arrays are all numeric. + + Parameters + ---------- + arrays : array or list of arrays + arrays to check if numeric. + kinds : string-like + The dtypes of the arrays to be checked. If the dtype.kind of + the ndarrays are not in this string the function returns False and + otherwise returns True. + """ + if isinstance(arrays, np.ndarray): + return arrays.dtype.kind in kinds + for array_ in arrays: + if array_.dtype.kind not in kinds: + return False + return True + + +def _conv_ops(x_shape, h_shape, mode): + """ + Find the number of operations required for direct/fft methods of + convolution. The direct operations were recorded by making a dummy class to + record the number of operations by overriding ``__mul__`` and ``__add__``. + The FFT operations rely on the (well-known) computational complexity of the + FFT (and the implementation of ``_freq_domain_conv``). + + """ + if mode == "full": + out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)] + elif mode == "valid": + out_shape = [abs(n - k) + 1 for n, k in zip(x_shape, h_shape)] + elif mode == "same": + out_shape = x_shape + else: + raise ValueError("Acceptable mode flags are 'valid'," + f" 'same', or 'full', not mode={mode}") + + s1, s2 = x_shape, h_shape + if len(x_shape) == 1: + s1, s2 = s1[0], s2[0] + if mode == "full": + direct_ops = s1 * s2 + elif mode == "valid": + direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2 + elif mode == "same": + direct_ops = (s1 * s2 if s1 < s2 else + s1 * s2 - (s2 // 2) * ((s2 + 1) // 2)) + else: + if mode == "full": + direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape) + elif mode == "valid": + direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape) + elif mode == "same": + direct_ops = _prod(s1) * _prod(s2) + + full_out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)] + N = _prod(full_out_shape) + fft_ops = 3 * N * np.log(N) # 3 separate FFTs of size full_out_shape + return fft_ops, direct_ops + + +def _fftconv_faster(x, h, mode): + """ + See if using fftconvolve or convolve is faster. + + Parameters + ---------- + x : np.ndarray + Signal + h : np.ndarray + Kernel + mode : str + Mode passed to convolve + + Returns + ------- + fft_faster : bool + + Notes + ----- + See docstring of `choose_conv_method` for details on tuning hardware. + + See pull request 11031 for more detail: + https://github.com/scipy/scipy/pull/11031. + + """ + fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode) + offset = -1e-3 if x.ndim == 1 else -1e-4 + constants = { + "valid": (1.89095737e-9, 2.1364985e-10, offset), + "full": (1.7649070e-9, 2.1414831e-10, offset), + "same": (3.2646654e-9, 2.8478277e-10, offset) + if h.size <= x.size + else (3.21635404e-9, 1.1773253e-8, -1e-5), + } if x.ndim == 1 else { + "valid": (1.85927e-9, 2.11242e-8, offset), + "full": (1.99817e-9, 1.66174e-8, offset), + "same": (2.04735e-9, 1.55367e-8, offset), + } + O_fft, O_direct, O_offset = constants[mode] + return O_fft * fft_ops < O_direct * direct_ops + O_offset + + +def _reverse_and_conj(x): + """ + Reverse array `x` in all dimensions and perform the complex conjugate + """ + reverse = (slice(None, None, -1),) * x.ndim + return x[reverse].conj() + + +def _np_conv_ok(volume, kernel, mode): + """ + See if numpy supports convolution of `volume` and `kernel` (i.e. both are + 1D ndarrays and of the appropriate shape). NumPy's 'same' mode uses the + size of the larger input, while SciPy's uses the size of the first input. + + Invalid mode strings will return False and be caught by the calling func. + """ + if volume.ndim == kernel.ndim == 1: + if mode in ('full', 'valid'): + return True + elif mode == 'same': + return volume.size >= kernel.size + else: + return False + + +def _timeit_fast(stmt="pass", setup="pass", repeat=3): + """ + Returns the time the statement/function took, in seconds. + + Faster, less precise version of IPython's timeit. `stmt` can be a statement + written as a string or a callable. + + Will do only 1 loop (like IPython's timeit) with no repetitions + (unlike IPython) for very slow functions. For fast functions, only does + enough loops to take 5 ms, which seems to produce similar results (on + Windows at least), and avoids doing an extraneous cycle that isn't + measured. + + """ + timer = timeit.Timer(stmt, setup) + + # determine number of calls per rep so total time for 1 rep >= 5 ms + x = 0 + for p in range(0, 10): + number = 10**p + x = timer.timeit(number) # seconds + if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one + break + if x > 1: # second + # If it's macroscopic, don't bother with repetitions + best = x + else: + number *= 10 + r = timer.repeat(repeat, number) + best = min(r) + + sec = best / number + return sec + + +def choose_conv_method(in1, in2, mode='full', measure=False): + """ + Find the fastest convolution/correlation method. + + This primarily exists to be called during the ``method='auto'`` option in + `convolve` and `correlate`. It can also be used to determine the value of + ``method`` for many different convolutions of the same dtype/shape. + In addition, it supports timing the convolution to adapt the value of + ``method`` to a particular set of inputs and/or hardware. + + Parameters + ---------- + in1 : array_like + The first argument passed into the convolution function. + in2 : array_like + The second argument passed into the convolution function. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + measure : bool, optional + If True, run and time the convolution of `in1` and `in2` with both + methods and return the fastest. If False (default), predict the fastest + method using precomputed values. + + Returns + ------- + method : str + A string indicating which convolution method is fastest, either + 'direct' or 'fft' + times : dict, optional + A dictionary containing the times (in seconds) needed for each method. + This value is only returned if ``measure=True``. + + See Also + -------- + convolve + correlate + + Notes + ----- + Generally, this method is 99% accurate for 2D signals and 85% accurate + for 1D signals for randomly chosen input sizes. For precision, use + ``measure=True`` to find the fastest method by timing the convolution. + This can be used to avoid the minimal overhead of finding the fastest + ``method`` later, or to adapt the value of ``method`` to a particular set + of inputs. + + Experiments were run on an Amazon EC2 r5a.2xlarge machine to test this + function. These experiments measured the ratio between the time required + when using ``method='auto'`` and the time required for the fastest method + (i.e., ``ratio = time_auto / min(time_fft, time_direct)``). In these + experiments, we found: + + * There is a 95% chance of this ratio being less than 1.5 for 1D signals + and a 99% chance of being less than 2.5 for 2D signals. + * The ratio was always less than 2.5/5 for 1D/2D signals respectively. + * This function is most inaccurate for 1D convolutions that take between 1 + and 10 milliseconds with ``method='direct'``. A good proxy for this + (at least in our experiments) is ``1e6 <= in1.size * in2.size <= 1e7``. + + The 2D results almost certainly generalize to 3D/4D/etc because the + implementation is the same (the 1D implementation is different). + + All the numbers above are specific to the EC2 machine. However, we did find + that this function generalizes fairly decently across hardware. The speed + tests were of similar quality (and even slightly better) than the same + tests performed on the machine to tune this function's numbers (a mid-2014 + 15-inch MacBook Pro with 16GB RAM and a 2.5GHz Intel i7 processor). + + There are cases when `fftconvolve` supports the inputs but this function + returns `direct` (e.g., to protect against floating point integer + precision). + + .. versionadded:: 0.19 + + Examples + -------- + Estimate the fastest method for a given input: + + >>> import numpy as np + >>> from scipy import signal + >>> rng = np.random.default_rng() + >>> img = rng.random((32, 32)) + >>> filter = rng.random((8, 8)) + >>> method = signal.choose_conv_method(img, filter, mode='same') + >>> method + 'fft' + + This can then be applied to other arrays of the same dtype and shape: + + >>> img2 = rng.random((32, 32)) + >>> filter2 = rng.random((8, 8)) + >>> corr2 = signal.correlate(img2, filter2, mode='same', method=method) + >>> conv2 = signal.convolve(img2, filter2, mode='same', method=method) + + The output of this function (``method``) works with `correlate` and + `convolve`. + + """ + volume = np.asarray(in1) + kernel = np.asarray(in2) + + _reject_objects(volume, 'choose_conv_method') + _reject_objects(kernel, 'choose_conv_method') + + if measure: + times = {} + for method in ['fft', 'direct']: + times[method] = _timeit_fast(lambda: convolve(volume, kernel, + mode=mode, method=method)) + + chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct' + return chosen_method, times + + # for integer input, + # catch when more precision required than float provides (representing an + # integer as float can lose precision in fftconvolve if larger than 2**52) + if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]): + max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max()) + max_value *= int(min(volume.size, kernel.size)) + if max_value > 2**np.finfo('float').nmant - 1: + return 'direct' + + if _numeric_arrays([volume, kernel], kinds='b'): + return 'direct' + + if _numeric_arrays([volume, kernel]): + if _fftconv_faster(volume, kernel, mode): + return 'fft' + + return 'direct' + + +def convolve(in1, in2, mode='full', method='auto'): + """ + Convolve two N-dimensional arrays. + + Convolve `in1` and `in2`, with the output size determined by the + `mode` argument. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + method : str {'auto', 'direct', 'fft'}, optional + A string indicating which method to use to calculate the convolution. + + ``direct`` + The convolution is determined directly from sums, the definition of + convolution. + ``fft`` + The Fourier Transform is used to perform the convolution by calling + `fftconvolve`. + ``auto`` + Automatically chooses direct or Fourier method based on an estimate + of which is faster (default). See Notes for more detail. + + .. versionadded:: 0.19.0 + + Returns + ------- + convolve : array + An N-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + Warns + ----- + RuntimeWarning + Use of the FFT convolution on input containing NAN or INF will lead + to the entire output being NAN or INF. Use method='direct' when your + input contains NAN or INF values. + + See Also + -------- + numpy.polymul : performs polynomial multiplication (same operation, but + also accepts poly1d objects) + choose_conv_method : chooses the fastest appropriate convolution method + fftconvolve : Always uses the FFT method. + oaconvolve : Uses the overlap-add method to do convolution, which is + generally faster when the input arrays are large and + significantly different in size. + + Notes + ----- + By default, `convolve` and `correlate` use ``method='auto'``, which calls + `choose_conv_method` to choose the fastest method using pre-computed + values (`choose_conv_method` can also measure real-world timing with a + keyword argument). Because `fftconvolve` relies on floating point numbers, + there are certain constraints that may force ``method='direct'`` (more detail + in `choose_conv_method` docstring). + + Examples + -------- + Smooth a square pulse using a Hann window: + + >>> import numpy as np + >>> from scipy import signal + >>> sig = np.repeat([0., 1., 0.], 100) + >>> win = signal.windows.hann(50) + >>> filtered = signal.convolve(sig, win, mode='same') / sum(win) + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('Original pulse') + >>> ax_orig.margins(0, 0.1) + >>> ax_win.plot(win) + >>> ax_win.set_title('Filter impulse response') + >>> ax_win.margins(0, 0.1) + >>> ax_filt.plot(filtered) + >>> ax_filt.set_title('Filtered signal') + >>> ax_filt.margins(0, 0.1) + >>> fig.tight_layout() + >>> fig.show() + + """ + volume = np.asarray(in1) + kernel = np.asarray(in2) + + _reject_objects(volume, 'correlate') + _reject_objects(kernel, 'correlate') + + if volume.ndim == kernel.ndim == 0: + return volume * kernel + elif volume.ndim != kernel.ndim: + raise ValueError("volume and kernel should have the same " + "dimensionality") + + if _inputs_swap_needed(mode, volume.shape, kernel.shape): + # Convolution is commutative; order doesn't have any effect on output + volume, kernel = kernel, volume + + if method == 'auto': + method = choose_conv_method(volume, kernel, mode=mode) + + if method == 'fft': + out = fftconvolve(volume, kernel, mode=mode) + result_type = np.result_type(volume, kernel) + if result_type.kind in {'u', 'i'}: + out = np.around(out) + + if np.isnan(out.flat[0]) or np.isinf(out.flat[0]): + warnings.warn("Use of fft convolution on input with NAN or inf" + " results in NAN or inf output. Consider using" + " method='direct' instead.", + category=RuntimeWarning, stacklevel=2) + + return out.astype(result_type) + elif method == 'direct': + # fastpath to faster numpy.convolve for 1d inputs when possible + if _np_conv_ok(volume, kernel, mode): + return np.convolve(volume, kernel, mode) + + return correlate(volume, _reverse_and_conj(kernel), mode, 'direct') + else: + raise ValueError("Acceptable method flags are 'auto'," + " 'direct', or 'fft'.") + + +def order_filter(a, domain, rank): + """ + Perform an order filter on an N-D array. + + Perform an order filter on the array in. The domain argument acts as a + mask centered over each pixel. The non-zero elements of domain are + used to select elements surrounding each input pixel which are placed + in a list. The list is sorted, and the output for that pixel is the + element corresponding to rank in the sorted list. + + Parameters + ---------- + a : ndarray + The N-dimensional input array. + domain : array_like + A mask array with the same number of dimensions as `a`. + Each dimension should have an odd number of elements. + rank : int + A non-negative integer which selects the element from the + sorted list (0 corresponds to the smallest element, 1 is the + next smallest element, etc.). + + Returns + ------- + out : ndarray + The results of the order filter in an array with the same + shape as `a`. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> x = np.arange(25).reshape(5, 5) + >>> domain = np.identity(3) + >>> x + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19], + [20, 21, 22, 23, 24]]) + >>> signal.order_filter(x, domain, 0) + array([[ 0, 0, 0, 0, 0], + [ 0, 0, 1, 2, 0], + [ 0, 5, 6, 7, 0], + [ 0, 10, 11, 12, 0], + [ 0, 0, 0, 0, 0]]) + >>> signal.order_filter(x, domain, 2) + array([[ 6, 7, 8, 9, 4], + [ 11, 12, 13, 14, 9], + [ 16, 17, 18, 19, 14], + [ 21, 22, 23, 24, 19], + [ 20, 21, 22, 23, 24]]) + + """ + domain = np.asarray(domain) + for dimsize in domain.shape: + if (dimsize % 2) != 1: + raise ValueError("Each dimension of domain argument " + "should have an odd number of elements.") + + a = np.asarray(a) + if not (np.issubdtype(a.dtype, np.integer) + or a.dtype in [np.float32, np.float64]): + raise ValueError(f"dtype={a.dtype} is not supported by order_filter") + + result = ndimage.rank_filter(a, rank, footprint=domain, mode='constant') + return result + + +def medfilt(volume, kernel_size=None): + """ + Perform a median filter on an N-dimensional array. + + Apply a median filter to the input array using a local window-size + given by `kernel_size`. The array will automatically be zero-padded. + + Parameters + ---------- + volume : array_like + An N-dimensional input array. + kernel_size : array_like, optional + A scalar or an N-length list giving the size of the median filter + window in each dimension. Elements of `kernel_size` should be odd. + If `kernel_size` is a scalar, then this scalar is used as the size in + each dimension. Default size is 3 for each dimension. + + Returns + ------- + out : ndarray + An array the same size as input containing the median filtered + result. + + Warns + ----- + UserWarning + If array size is smaller than kernel size along any dimension + + See Also + -------- + scipy.ndimage.median_filter + scipy.signal.medfilt2d + + Notes + ----- + The more general function `scipy.ndimage.median_filter` has a more + efficient implementation of a median filter and therefore runs much faster. + + For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes, + the specialised function `scipy.signal.medfilt2d` may be faster. + + """ + volume = np.atleast_1d(volume) + if not (np.issubdtype(volume.dtype, np.integer) + or volume.dtype in [np.float32, np.float64]): + raise ValueError(f"dtype={volume.dtype} is not supported by medfilt") + + if kernel_size is None: + kernel_size = [3] * volume.ndim + kernel_size = np.asarray(kernel_size) + if kernel_size.shape == (): + kernel_size = np.repeat(kernel_size.item(), volume.ndim) + + for k in range(volume.ndim): + if (kernel_size[k] % 2) != 1: + raise ValueError("Each element of kernel_size should be odd.") + if any(k > s for k, s in zip(kernel_size, volume.shape)): + warnings.warn('kernel_size exceeds volume extent: the volume will be ' + 'zero-padded.', + stacklevel=2) + + size = math.prod(kernel_size) + result = ndimage.rank_filter(volume, size // 2, size=kernel_size, + mode='constant') + + return result + + +def wiener(im, mysize=None, noise=None): + """ + Perform a Wiener filter on an N-dimensional array. + + Apply a Wiener filter to the N-dimensional array `im`. + + Parameters + ---------- + im : ndarray + An N-dimensional array. + mysize : int or array_like, optional + A scalar or an N-length list giving the size of the Wiener filter + window in each dimension. Elements of mysize should be odd. + If mysize is a scalar, then this scalar is used as the size + in each dimension. + noise : float, optional + The noise-power to use. If None, then noise is estimated as the + average of the local variance of the input. + + Returns + ------- + out : ndarray + Wiener filtered result with the same shape as `im`. + + Notes + ----- + This implementation is similar to wiener2 in Matlab/Octave. + For more details see [1]_ + + References + ---------- + .. [1] Lim, Jae S., Two-Dimensional Signal and Image Processing, + Englewood Cliffs, NJ, Prentice Hall, 1990, p. 548. + + Examples + -------- + >>> from scipy.datasets import face + >>> from scipy.signal import wiener + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> img = rng.random((40, 40)) #Create a random image + >>> filtered_img = wiener(img, (5, 5)) #Filter the image + >>> f, (plot1, plot2) = plt.subplots(1, 2) + >>> plot1.imshow(img) + >>> plot2.imshow(filtered_img) + >>> plt.show() + + """ + im = np.asarray(im) + if mysize is None: + mysize = [3] * im.ndim + mysize = np.asarray(mysize) + if mysize.shape == (): + mysize = np.repeat(mysize.item(), im.ndim) + + # Estimate the local mean + size = math.prod(mysize) + lMean = correlate(im, np.ones(mysize), 'same') / size + + # Estimate the local variance + lVar = (correlate(im ** 2, np.ones(mysize), 'same') / size - lMean ** 2) + + # Estimate the noise power if needed. + if noise is None: + noise = np.mean(np.ravel(lVar), axis=0) + + res = (im - lMean) + res *= (1 - noise / lVar) + res += lMean + out = np.where(lVar < noise, lMean, res) + + return out + + +def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0): + """ + Convolve two 2-dimensional arrays. + + Convolve `in1` and `in2` with output size determined by `mode`, and + boundary conditions determined by `boundary` and `fillvalue`. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + boundary : str {'fill', 'wrap', 'symm'}, optional + A flag indicating how to handle boundaries: + + ``fill`` + pad input arrays with fillvalue. (default) + ``wrap`` + circular boundary conditions. + ``symm`` + symmetrical boundary conditions. + + fillvalue : scalar, optional + Value to fill pad input arrays with. Default is 0. + + Returns + ------- + out : ndarray + A 2-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + Examples + -------- + Compute the gradient of an image by 2D convolution with a complex Scharr + operator. (Horizontal operator is real, vertical is imaginary.) Use + symmetric boundary condition to avoid creating edges at the image + boundaries. + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy import datasets + >>> ascent = datasets.ascent() + >>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j], + ... [-10+0j, 0+ 0j, +10 +0j], + ... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy + >>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same') + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15)) + >>> ax_orig.imshow(ascent, cmap='gray') + >>> ax_orig.set_title('Original') + >>> ax_orig.set_axis_off() + >>> ax_mag.imshow(np.absolute(grad), cmap='gray') + >>> ax_mag.set_title('Gradient magnitude') + >>> ax_mag.set_axis_off() + >>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles + >>> ax_ang.set_title('Gradient orientation') + >>> ax_ang.set_axis_off() + >>> fig.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if not in1.ndim == in2.ndim == 2: + raise ValueError('convolve2d inputs must both be 2-D arrays') + + if _inputs_swap_needed(mode, in1.shape, in2.shape): + in1, in2 = in2, in1 + + val = _valfrommode(mode) + bval = _bvalfromboundary(boundary) + out = _sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue) + return out + + +def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0): + """ + Cross-correlate two 2-dimensional arrays. + + Cross correlate `in1` and `in2` with output size determined by `mode`, and + boundary conditions determined by `boundary` and `fillvalue`. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear cross-correlation + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + boundary : str {'fill', 'wrap', 'symm'}, optional + A flag indicating how to handle boundaries: + + ``fill`` + pad input arrays with fillvalue. (default) + ``wrap`` + circular boundary conditions. + ``symm`` + symmetrical boundary conditions. + + fillvalue : scalar, optional + Value to fill pad input arrays with. Default is 0. + + Returns + ------- + correlate2d : ndarray + A 2-dimensional array containing a subset of the discrete linear + cross-correlation of `in1` with `in2`. + + Notes + ----- + When using "same" mode with even-length inputs, the outputs of `correlate` + and `correlate2d` differ: There is a 1-index offset between them. + + Examples + -------- + Use 2D cross-correlation to find the location of a template in a noisy + image: + + >>> import numpy as np + >>> from scipy import signal, datasets, ndimage + >>> rng = np.random.default_rng() + >>> face = datasets.face(gray=True) - datasets.face(gray=True).mean() + >>> face = ndimage.zoom(face[30:500, 400:950], 0.5) # extract the face + >>> template = np.copy(face[135:165, 140:175]) # right eye + >>> template -= template.mean() + >>> face = face + rng.standard_normal(face.shape) * 50 # add noise + >>> corr = signal.correlate2d(face, template, boundary='symm', mode='same') + >>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1, + ... figsize=(6, 15)) + >>> ax_orig.imshow(face, cmap='gray') + >>> ax_orig.set_title('Original') + >>> ax_orig.set_axis_off() + >>> ax_template.imshow(template, cmap='gray') + >>> ax_template.set_title('Template') + >>> ax_template.set_axis_off() + >>> ax_corr.imshow(corr, cmap='gray') + >>> ax_corr.set_title('Cross-correlation') + >>> ax_corr.set_axis_off() + >>> ax_orig.plot(x, y, 'ro') + >>> fig.show() + + """ + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if not in1.ndim == in2.ndim == 2: + raise ValueError('correlate2d inputs must both be 2-D arrays') + + swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape) + if swapped_inputs: + in1, in2 = in2, in1 + + val = _valfrommode(mode) + bval = _bvalfromboundary(boundary) + out = _sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue) + + if swapped_inputs: + out = out[::-1, ::-1] + + return out + + +def medfilt2d(input, kernel_size=3): + """ + Median filter a 2-dimensional array. + + Apply a median filter to the `input` array using a local window-size + given by `kernel_size` (must be odd). The array is zero-padded + automatically. + + Parameters + ---------- + input : array_like + A 2-dimensional input array. + kernel_size : array_like, optional + A scalar or a list of length 2, giving the size of the + median filter window in each dimension. Elements of + `kernel_size` should be odd. If `kernel_size` is a scalar, + then this scalar is used as the size in each dimension. + Default is a kernel of size (3, 3). + + Returns + ------- + out : ndarray + An array the same size as input containing the median filtered + result. + + See Also + -------- + scipy.ndimage.median_filter + + Notes + ----- + This is faster than `medfilt` when the input dtype is ``uint8``, + ``float32``, or ``float64``; for other types, this falls back to + `medfilt`. In some situations, `scipy.ndimage.median_filter` may be + faster than this function. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> x = np.arange(25).reshape(5, 5) + >>> x + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19], + [20, 21, 22, 23, 24]]) + + # Replaces i,j with the median out of 5*5 window + + >>> signal.medfilt2d(x, kernel_size=5) + array([[ 0, 0, 2, 0, 0], + [ 0, 3, 7, 4, 0], + [ 2, 8, 12, 9, 4], + [ 0, 8, 12, 9, 0], + [ 0, 0, 12, 0, 0]]) + + # Replaces i,j with the median out of default 3*3 window + + >>> signal.medfilt2d(x) + array([[ 0, 1, 2, 3, 0], + [ 1, 6, 7, 8, 4], + [ 6, 11, 12, 13, 9], + [11, 16, 17, 18, 14], + [ 0, 16, 17, 18, 0]]) + + # Replaces i,j with the median out of default 5*3 window + + >>> signal.medfilt2d(x, kernel_size=[5,3]) + array([[ 0, 1, 2, 3, 0], + [ 0, 6, 7, 8, 3], + [ 5, 11, 12, 13, 8], + [ 5, 11, 12, 13, 8], + [ 0, 11, 12, 13, 0]]) + + # Replaces i,j with the median out of default 3*5 window + + >>> signal.medfilt2d(x, kernel_size=[3,5]) + array([[ 0, 0, 2, 1, 0], + [ 1, 5, 7, 6, 3], + [ 6, 10, 12, 11, 8], + [11, 15, 17, 16, 13], + [ 0, 15, 17, 16, 0]]) + + # As seen in the examples, + # kernel numbers must be odd and not exceed original array dim + + """ + image = np.asarray(input) + + # checking dtype.type, rather than just dtype, is necessary for + # excluding np.longdouble with MS Visual C. + if image.dtype.type not in (np.ubyte, np.float32, np.float64): + return medfilt(image, kernel_size) + + if kernel_size is None: + kernel_size = [3] * 2 + kernel_size = np.asarray(kernel_size) + if kernel_size.shape == (): + kernel_size = np.repeat(kernel_size.item(), 2) + + for size in kernel_size: + if (size % 2) != 1: + raise ValueError("Each element of kernel_size should be odd.") + + return _sigtools._medfilt2d(image, kernel_size) + + +def lfilter(b, a, x, axis=-1, zi=None): + """ + Filter data along one-dimension with an IIR or FIR filter. + + Filter a data sequence, `x`, using a digital filter. This works for many + fundamental data types (including Object type). The filter is a direct + form II transposed implementation of the standard difference equation + (see Notes). + + The function `sosfilt` (and filter design using ``output='sos'``) should be + preferred over `lfilter` for most filtering tasks, as second-order sections + have fewer numerical problems. + + Parameters + ---------- + b : array_like + The numerator coefficient vector in a 1-D sequence. + a : array_like + The denominator coefficient vector in a 1-D sequence. If ``a[0]`` + is not 1, then both `a` and `b` are normalized by ``a[0]``. + x : array_like + An N-dimensional input array. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + zi : array_like, optional + Initial conditions for the filter delays. It is a vector + (or array of vectors for an N-dimensional input) of length + ``max(len(a), len(b)) - 1``. If `zi` is None or is not given then + initial rest is assumed. See `lfiltic` for more information. + + Returns + ------- + y : array + The output of the digital filter. + zf : array, optional + If `zi` is None, this is not returned, otherwise, `zf` holds the + final filter delay values. + + See Also + -------- + lfiltic : Construct initial conditions for `lfilter`. + lfilter_zi : Compute initial state (steady state of step response) for + `lfilter`. + filtfilt : A forward-backward filter, to obtain a filter with zero phase. + savgol_filter : A Savitzky-Golay filter. + sosfilt: Filter data using cascaded second-order sections. + sosfiltfilt: A forward-backward filter using second-order sections. + + Notes + ----- + The filter function is implemented as a direct II transposed structure. + This means that the filter implements:: + + a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M] + - a[1]*y[n-1] - ... - a[N]*y[n-N] + + where `M` is the degree of the numerator, `N` is the degree of the + denominator, and `n` is the sample number. It is implemented using + the following difference equations (assuming M = N):: + + a[0]*y[n] = b[0] * x[n] + d[0][n-1] + d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1] + d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1] + ... + d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1] + d[N-1][n] = b[N] * x[n] - a[N] * y[n] + + where `d` are the state variables. + + The rational transfer function describing this filter in the + z-transform domain is:: + + -1 -M + b[0] + b[1]z + ... + b[M] z + Y(z) = -------------------------------- X(z) + -1 -N + a[0] + a[1]z + ... + a[N] z + + Examples + -------- + Generate a noisy signal to be filtered: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> t = np.linspace(-1, 1, 201) + >>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) + + ... 0.1*np.sin(2*np.pi*1.25*t + 1) + + ... 0.18*np.cos(2*np.pi*3.85*t)) + >>> xn = x + rng.standard_normal(len(t)) * 0.08 + + Create an order 3 lowpass butterworth filter: + + >>> b, a = signal.butter(3, 0.05) + + Apply the filter to xn. Use lfilter_zi to choose the initial condition of + the filter: + + >>> zi = signal.lfilter_zi(b, a) + >>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0]) + + Apply the filter again, to have a result filtered at an order the same as + filtfilt: + + >>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0]) + + Use filtfilt to apply the filter: + + >>> y = signal.filtfilt(b, a, xn) + + Plot the original signal and the various filtered versions: + + >>> plt.figure + >>> plt.plot(t, xn, 'b', alpha=0.75) + >>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k') + >>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice', + ... 'filtfilt'), loc='best') + >>> plt.grid(True) + >>> plt.show() + + """ + b = np.atleast_1d(b) + a = np.atleast_1d(a) + + _reject_objects(x, 'lfilter') + _reject_objects(a, 'lfilter') + _reject_objects(b, 'lfilter') + + if len(a) == 1: + # This path only supports types fdgFDGO to mirror _linear_filter below. + # Any of b, a, x, or zi can set the dtype, but there is no default + # casting of other types; instead a NotImplementedError is raised. + b = np.asarray(b) + a = np.asarray(a) + if b.ndim != 1 and a.ndim != 1: + raise ValueError('object of too small depth for desired array') + x = _validate_x(x) + inputs = [b, a, x] + if zi is not None: + # _linear_filter does not broadcast zi, but does do expansion of + # singleton dims. + zi = np.asarray(zi) + if zi.ndim != x.ndim: + raise ValueError('object of too small depth for desired array') + expected_shape = list(x.shape) + expected_shape[axis] = b.shape[0] - 1 + expected_shape = tuple(expected_shape) + # check the trivial case where zi is the right shape first + if zi.shape != expected_shape: + strides = zi.ndim * [None] + if axis < 0: + axis += zi.ndim + for k in range(zi.ndim): + if k == axis and zi.shape[k] == expected_shape[k]: + strides[k] = zi.strides[k] + elif k != axis and zi.shape[k] == expected_shape[k]: + strides[k] = zi.strides[k] + elif k != axis and zi.shape[k] == 1: + strides[k] = 0 + else: + raise ValueError('Unexpected shape for zi: expected ' + f'{expected_shape}, found {zi.shape}.') + zi = np.lib.stride_tricks.as_strided(zi, expected_shape, + strides) + inputs.append(zi) + dtype = np.result_type(*inputs) + + if dtype.char not in 'fdgFDGO': + raise NotImplementedError(f"input type '{dtype}' not supported") + + b = np.array(b, dtype=dtype) + a = np.asarray(a, dtype=dtype) + b /= a[0] + x = np.asarray(x, dtype=dtype) + + out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x) + ind = out_full.ndim * [slice(None)] + if zi is not None: + ind[axis] = slice(zi.shape[axis]) + out_full[tuple(ind)] += zi + + ind[axis] = slice(out_full.shape[axis] - len(b) + 1) + out = out_full[tuple(ind)] + + if zi is None: + return out + else: + ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None) + zf = out_full[tuple(ind)] + return out, zf + else: + if zi is None: + return _sigtools._linear_filter(b, a, x, axis) + else: + return _sigtools._linear_filter(b, a, x, axis, zi) + + +def lfiltic(b, a, y, x=None): + """ + Construct initial conditions for lfilter given input and output vectors. + + Given a linear filter (b, a) and initial conditions on the output `y` + and the input `x`, return the initial conditions on the state vector zi + which is used by `lfilter` to generate the output given the input. + + Parameters + ---------- + b : array_like + Linear filter term. + a : array_like + Linear filter term. + y : array_like + Initial conditions. + + If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``. + + If `y` is too short, it is padded with zeros. + x : array_like, optional + Initial conditions. + + If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``. + + If `x` is not given, its initial conditions are assumed zero. + + If `x` is too short, it is padded with zeros. + + Returns + ------- + zi : ndarray + The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, + where ``K = max(M, N)``. + + See Also + -------- + lfilter, lfilter_zi + + """ + N = np.size(a) - 1 + M = np.size(b) - 1 + K = max(M, N) + y = np.asarray(y) + + if x is None: + result_type = np.result_type(np.asarray(b), np.asarray(a), y) + if result_type.kind in 'bui': + result_type = np.float64 + x = np.zeros(M, dtype=result_type) + else: + x = np.asarray(x) + + result_type = np.result_type(np.asarray(b), np.asarray(a), y, x) + if result_type.kind in 'bui': + result_type = np.float64 + x = x.astype(result_type) + + L = np.size(x) + if L < M: + x = np.r_[x, np.zeros(M - L)] + + y = y.astype(result_type) + zi = np.zeros(K, result_type) + + L = np.size(y) + if L < N: + y = np.r_[y, np.zeros(N - L)] + + for m in range(M): + zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0) + + for m in range(N): + zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0) + + return zi + + +def deconvolve(signal, divisor): + """Deconvolves ``divisor`` out of ``signal`` using inverse filtering. + + Returns the quotient and remainder such that + ``signal = convolve(divisor, quotient) + remainder`` + + Parameters + ---------- + signal : (N,) array_like + Signal data, typically a recorded signal + divisor : (N,) array_like + Divisor data, typically an impulse response or filter that was + applied to the original signal + + Returns + ------- + quotient : ndarray + Quotient, typically the recovered original signal + remainder : ndarray + Remainder + + See Also + -------- + numpy.polydiv : performs polynomial division (same operation, but + also accepts poly1d objects) + + Examples + -------- + Deconvolve a signal that's been filtered: + + >>> from scipy import signal + >>> original = [0, 1, 0, 0, 1, 1, 0, 0] + >>> impulse_response = [2, 1] + >>> recorded = signal.convolve(impulse_response, original) + >>> recorded + array([0, 2, 1, 0, 2, 3, 1, 0, 0]) + >>> recovered, remainder = signal.deconvolve(recorded, impulse_response) + >>> recovered + array([ 0., 1., 0., 0., 1., 1., 0., 0.]) + + """ + num = np.atleast_1d(signal) + den = np.atleast_1d(divisor) + if num.ndim > 1: + raise ValueError("signal must be 1-D.") + if den.ndim > 1: + raise ValueError("divisor must be 1-D.") + N = len(num) + D = len(den) + if D > N: + quot = [] + rem = num + else: + input = np.zeros(N - D + 1, float) + input[0] = 1 + quot = lfilter(num, den, input) + rem = num - convolve(den, quot, mode='full') + return quot, rem + + +def hilbert(x, N=None, axis=-1): + r"""FFT-based computation of the analytic signal. + + The analytic signal is calculated by filtering out the negative frequencies and + doubling the amplitudes of the positive frequencies in the FFT domain. + The imaginary part of the result is the hilbert transform of the real-valued input + signal. + + The transformation is done along the last axis by default. + + Parameters + ---------- + x : array_like + Signal data. Must be real. + N : int, optional + Number of Fourier components. Default: ``x.shape[axis]`` + axis : int, optional + Axis along which to do the transformation. Default: -1. + + Returns + ------- + xa : ndarray + Analytic signal of `x`, of each 1-D array along `axis` + + Notes + ----- + The analytic signal ``x_a(t)`` of a real-valued signal ``x(t)`` + can be expressed as [1]_ + + .. math:: x_a = F^{-1}(F(x) 2U) = x + i y\ , + + where `F` is the Fourier transform, `U` the unit step function, + and `y` the Hilbert transform of `x`. [2]_ + + In other words, the negative half of the frequency spectrum is zeroed + out, turning the real-valued signal into a complex-valued signal. The Hilbert + transformed signal can be obtained from ``np.imag(hilbert(x))``, and the + original signal from ``np.real(hilbert(x))``. + + References + ---------- + .. [1] Wikipedia, "Analytic signal". + https://en.wikipedia.org/wiki/Analytic_signal + .. [2] Wikipedia, "Hilbert Transform". + https://en.wikipedia.org/wiki/Hilbert_transform + .. [3] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2. + .. [4] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal + Processing, Third Edition, 2009. Chapter 12. + ISBN 13: 978-1292-02572-8 + + See Also + -------- + envelope: Compute envelope of a real- or complex-valued signal. + + Examples + -------- + In this example we use the Hilbert transform to determine the amplitude + envelope and instantaneous frequency of an amplitude-modulated signal. + + Let's create a chirp of which the frequency increases from 20 Hz to 100 Hz and + apply an amplitude modulation: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import hilbert, chirp + ... + >>> duration, fs = 1, 400 # 1 s signal with sampling frequency of 400 Hz + >>> t = np.arange(int(fs*duration)) / fs # timestamps of samples + >>> signal = chirp(t, 20.0, t[-1], 100.0) + >>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) ) + + The amplitude envelope is given by the magnitude of the analytic signal. The + instantaneous frequency can be obtained by differentiating the + instantaneous phase in respect to time. The instantaneous phase corresponds + to the phase angle of the analytic signal. + + >>> analytic_signal = hilbert(signal) + >>> amplitude_envelope = np.abs(analytic_signal) + >>> instantaneous_phase = np.unwrap(np.angle(analytic_signal)) + >>> instantaneous_frequency = np.diff(instantaneous_phase) / (2.0*np.pi) * fs + ... + >>> fig, (ax0, ax1) = plt.subplots(nrows=2, sharex='all', tight_layout=True) + >>> ax0.set_title("Amplitude-modulated Chirp Signal") + >>> ax0.set_ylabel("Amplitude") + >>> ax0.plot(t, signal, label='Signal') + >>> ax0.plot(t, amplitude_envelope, label='Envelope') + >>> ax0.legend() + >>> ax1.set(xlabel="Time in seconds", ylabel="Phase in rad", ylim=(0, 120)) + >>> ax1.plot(t[1:], instantaneous_frequency, 'C2-', label='Instantaneous Phase') + >>> ax1.legend() + >>> plt.show() + + """ + x = np.asarray(x) + if np.iscomplexobj(x): + raise ValueError("x must be real.") + if N is None: + N = x.shape[axis] + if N <= 0: + raise ValueError("N must be positive.") + + Xf = sp_fft.fft(x, N, axis=axis) + h = np.zeros(N, dtype=Xf.dtype) + if N % 2 == 0: + h[0] = h[N // 2] = 1 + h[1:N // 2] = 2 + else: + h[0] = 1 + h[1:(N + 1) // 2] = 2 + + if x.ndim > 1: + ind = [np.newaxis] * x.ndim + ind[axis] = slice(None) + h = h[tuple(ind)] + x = sp_fft.ifft(Xf * h, axis=axis) + return x + + +def hilbert2(x, N=None): + """ + Compute the '2-D' analytic signal of `x` + + Parameters + ---------- + x : array_like + 2-D signal data. + N : int or tuple of two ints, optional + Number of Fourier components. Default is ``x.shape`` + + Returns + ------- + xa : ndarray + Analytic signal of `x` taken along axes (0,1). + + References + ---------- + .. [1] Wikipedia, "Analytic signal", + https://en.wikipedia.org/wiki/Analytic_signal + + """ + x = np.atleast_2d(x) + if x.ndim > 2: + raise ValueError("x must be 2-D.") + if np.iscomplexobj(x): + raise ValueError("x must be real.") + if N is None: + N = x.shape + elif isinstance(N, int): + if N <= 0: + raise ValueError("N must be positive.") + N = (N, N) + elif len(N) != 2 or np.any(np.asarray(N) <= 0): + raise ValueError("When given as a tuple, N must hold exactly " + "two positive integers") + + Xf = sp_fft.fft2(x, N, axes=(0, 1)) + h1 = np.zeros(N[0], dtype=Xf.dtype) + h2 = np.zeros(N[1], dtype=Xf.dtype) + for h in (h1, h2): + N1 = h.shape[0] + if N1 % 2 == 0: + h[0] = h[N1 // 2] = 1 + h[1:N1 // 2] = 2 + else: + h[0] = 1 + h[1:(N1 + 1) // 2] = 2 + + h = h1[:, np.newaxis] * h2[np.newaxis, :] + k = x.ndim + while k > 2: + h = h[:, np.newaxis] + k -= 1 + x = sp_fft.ifft2(Xf * h, axes=(0, 1)) + return x + + +def envelope(z: np.ndarray, bp_in: tuple[int | None, int | None] = (1, None), *, + n_out: int | None = None, squared: bool = False, + residual: Literal['lowpass', 'all', None] = 'lowpass', + axis: int = -1) -> np.ndarray: + r"""Compute the envelope of a real- or complex-valued signal. + + Parameters + ---------- + z : ndarray + Real- or complex-valued input signal, which is assumed to be made up of ``n`` + samples and having sampling interval ``T``. `z` may also be a multidimensional + array with the time axis being defined by `axis`. + bp_in : tuple[int | None, int | None], optional + 2-tuple defining the frequency band ``bp_in[0]:bp_in[1]`` of the input filter. + The corner frequencies are specified as integer multiples of ``1/(n*T)`` with + ``-n//2 <= bp_in[0] < bp_in[1] <= (n+1)//2`` being the allowed frequency range. + ``None`` entries are replaced with ``-n//2`` or ``(n+1)//2`` respectively. The + default of ``(1, None)`` removes the mean value as well as the negative + frequency components. + n_out : int | None, optional + If not ``None`` the output will be resampled to `n_out` samples. The default + of ``None`` sets the output to the same length as the input `z`. + squared : bool, optional + If set, the square of the envelope is returned. The bandwidth of the squared + envelope is often smaller than the non-squared envelope bandwidth due to the + nonlinear nature of the utilized absolute value function. I.e., the embedded + square root function typically produces addiational harmonics. + The default is ``False``. + residual : Literal['lowpass', 'all', None], optional + This option determines what kind of residual, i.e., the signal part which the + input bandpass filter removes, is returned. ``'all'`` returns everything except + the contents of the frequency band ``bp_in[0]:bp_in[1]``, ``'lowpass'`` + returns the contents of the frequency band ``< bp_in[0]``. If ``None`` then + only the envelope is returned. Default: ``'lowpass'``. + axis : int, optional + Axis of `z` over which to compute the envelope. Default is last the axis. + + Returns + ------- + ndarray + If parameter `residual` is ``None`` then an array ``z_env`` with the same shape + as the input `z` is returned, containing its envelope. Otherwise, an array with + shape ``(2, *z.shape)``, containing the arrays ``z_env`` and ``z_res``, stacked + along the first axis, is returned. + It allows unpacking, i.e., ``z_env, z_res = envelope(z, residual='all')``. + The residual ``z_res`` contains the signal part which the input bandpass filter + removed, depending on the parameter `residual`. Note that for real-valued + signals, a real-valued residual is returned. Hence, the negative frequency + components of `bp_in` are ignored. + + Notes + ----- + Any complex-valued signal :math:`z(t)` can be described by a real-valued + instantaneous amplitude :math:`a(t)` and a real-valued instantaneous phase + :math:`\phi(t)`, i.e., :math:`z(t) = a(t) \exp\!\big(j \phi(t)\big)`. The + envelope is defined as the absolute value of the amplitude :math:`|a(t)| = |z(t)|`, + which is at the same time the absolute value of the signal. Hence, :math:`|a(t)|` + "envelopes" the class of all signals with amplitude :math:`a(t)` and arbitrary + phase :math:`\phi(t)`. + For real-valued signals, :math:`x(t) = a(t) \cos\!\big(\phi(t)\big)` is the + analogous formulation. Hence, :math:`|a(t)|` can be determined by converting + :math:`x(t)` into an analytic signal :math:`z_a(t)` by means of a Hilbert + transform, i.e., + :math:`z_a(t) = a(t) \cos\!\big(\phi(t)\big) + j a(t) \sin\!\big(\phi(t) \big)`, + which produces a complex-valued signal with the same envelope :math:`|a(t)|`. + + The implementation is based on computing the FFT of the input signal and then + performing the necessary operations in Fourier space. Hence, the typical FFT + caveats need to be taken into account: + + * The signal is assumed to be periodic. Discontinuities between signal start and + end can lead to unwanted results due to Gibbs phenomenon. + * The FFT is slow if the signal length is prime or very long. Also, the memory + demands are typically higher than a comparable FIR/IIR filter based + implementation. + * The frequency spacing ``1 / (n*T)`` for corner frequencies of the bandpass filter + corresponds to the frequencies produced by ``scipy.fft.fftfreq(len(z), T)``. + + If the envelope of a complex-valued signal `z` with no bandpass filtering is + desired, i.e., ``bp_in=(None, None)``, then the envelope corresponds to the + absolute value. Hence, it is more efficient to use ``np.abs(z)`` instead of this + function. + + Although computing the envelope based on the analytic signal [1]_ is the natural + method for real-valued signals, other methods are also frequently used. The most + popular alternative is probably the so-called "square-law" envelope detector and + its relatives [2]_. They do not always compute the correct result for all kinds of + signals, but are usually correct and typically computationally more efficient for + most kinds of narrowband signals. The definition for an envelope presented here is + common where instantaneous amplitude and phase are of interest (e.g., as described + in [3]_). There exist also other concepts, which rely on the general mathematical + idea of an envelope [4]_: A pragmatic approach is to determine all upper and lower + signal peaks and use a spline interpolation to determine the curves [5]_. + + + References + ---------- + .. [1] "Analytic Signal", Wikipedia, + https://en.wikipedia.org/wiki/Analytic_signal + .. [2] Lyons, Richard, "Digital envelope detection: The good, the bad, and the + ugly", IEEE Signal Processing Magazine 34.4 (2017): 183-187. + `PDF `__ + .. [3] T.G. Kincaid, "The complex representation of signals.", + TIS R67# MH5, General Electric Co. (1966). + `PDF `__ + .. [4] "Envelope (mathematics)", Wikipedia, + https://en.wikipedia.org/wiki/Envelope_(mathematics) + .. [5] Yang, Yanli. "A signal theoretic approach for envelope analysis of + real-valued signals." IEEE Access 5 (2017): 5623-5630. + `PDF `__ + + + See Also + -------- + hilbert: Compute analytic signal by means of Hilbert transform. + + + Examples + -------- + The following plot illustrates the envelope of a signal with variable frequency and + a low-frequency drift. To separate the drift from the envelope, a 4 Hz highpass + filter is used. The low-pass residuum of the input bandpass filter is utilized to + determine an asymmetric upper and lower bound to enclose the signal. Due to the + smoothness of the resulting envelope, it is down-sampled from 500 to 40 samples. + Note that the instantaneous amplitude ``a_x`` and the computed envelope ``x_env`` + are not perfectly identical. This is due to the signal not being perfectly periodic + as well as the existence of some spectral overlapping of ``x_carrier`` and + ``x_drift``. Hence, they cannot be completely separated by a bandpass filter. + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy.signal.windows import gaussian + >>> from scipy.signal import envelope + ... + >>> n, n_out = 500, 40 # number of signal samples and envelope samples + >>> T = 2 / n # sampling interval for 2 s duration + >>> t = np.arange(n) * T # time stamps + >>> a_x = gaussian(len(t), 0.4/T) # instantaneous amplitude + >>> phi_x = 30*np.pi*t + 35*np.cos(2*np.pi*0.25*t) # instantaneous phase + >>> x_carrier = a_x * np.cos(phi_x) + >>> x_drift = 0.3 * gaussian(len(t), 0.4/T) # drift + >>> x = x_carrier + x_drift + ... + >>> bp_in = (int(4 * (n*T)), None) # 4 Hz highpass input filter + >>> x_env, x_res = envelope(x, bp_in, n_out=n_out) + >>> t_out = np.arange(n_out) * (n / n_out) * T + ... + >>> fg0, ax0 = plt.subplots(1, 1, tight_layout=True) + >>> ax0.set_title(r"$4\,$Hz Highpass Envelope of Drifting Signal") + >>> ax0.set(xlabel="Time in seconds", xlim=(0, n*T), ylabel="Amplitude") + >>> ax0.plot(t, x, 'C0-', alpha=0.5, label="Signal") + >>> ax0.plot(t, x_drift, 'C2--', alpha=0.25, label="Drift") + >>> ax0.plot(t_out, x_res+x_env, 'C1.-', alpha=0.5, label="Envelope") + >>> ax0.plot(t_out, x_res-x_env, 'C1.-', alpha=0.5, label=None) + >>> ax0.grid(True) + >>> ax0.legend() + >>> plt.show() + + The second example provides a geometric envelope interpretation of complex-valued + signals: The following two plots show the complex-valued signal as a blue + 3d-trajectory and the envelope as an orange round tube with varying diameter, i.e., + as :math:`|a(t)| \exp(j\rho(t))`, with :math:`\rho(t)\in[-\pi,\pi]`. Also, the + projection into the 2d real and imaginary coordinate planes of trajectory and tube + is depicted. Every point of the complex-valued signal touches the tube's surface. + + The left plot shows an analytic signal, i.e, the phase difference between + imaginary and real part is always 90 degrees, resulting in a spiraling trajectory. + It can be seen that in this case the real part has also the expected envelope, + i.e., representing the absolute value of the instantaneous amplitude. + + The right plot shows the real part of that analytic signal being interpreted + as a complex-vauled signal, i.e., having zero imaginary part. There the resulting + envelope is not as smooth as in the analytic case and the instantaneous amplitude + in the real plane is not recovered. If ``z_re`` had been passed as a real-valued + signal, i.e., as ``z_re = z.real`` instead of ``z_re = z.real + 0j``, the result + would have been identical to the left plot. The reason for this is that real-valued + signals are interpreted as being the real part of a complex-valued analytic signal. + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy.signal.windows import gaussian + >>> from scipy.signal import envelope + ... + >>> n, T = 1000, 1/1000 # number of samples and sampling interval + >>> t = np.arange(n) * T # time stamps for 1 s duration + >>> f_c = 3 # Carrier frequency for signal + >>> z = gaussian(len(t), 0.3/T) * np.exp(2j*np.pi*f_c*t) # analytic signal + >>> z_re = z.real + 0j # complex signal with zero imaginary part + ... + >>> e_a, e_r = (envelope(z_, (None, None), residual=None) for z_ in (z, z_re)) + ... + >>> # Generate grids to visualize envelopes as 2d and 3d surfaces: + >>> E2d_t, E2_amp = np.meshgrid(t, [-1, 1]) + >>> E2d_1 = np.ones_like(E2_amp) + >>> E3d_t, E3d_phi = np.meshgrid(t, np.linspace(-np.pi, np.pi, 300)) + >>> ma = 1.8 # maximum axis values in real and imaginary direction + ... + >>> fg0 = plt.figure(figsize=(6.2, 4.)) + >>> ax00 = fg0.add_subplot(1, 2, 1, projection='3d') + >>> ax01 = fg0.add_subplot(1, 2, 2, projection='3d', sharex=ax00, + ... sharey=ax00, sharez=ax00) + >>> ax00.set_title("Analytic Signal") + >>> ax00.set(xlim=(0, 1), ylim=(-ma, ma), zlim=(-ma, ma)) + >>> ax01.set_title("Real-valued Signal") + >>> for z_, e_, ax_ in zip((z, z.real), (e_a, e_r), (ax00, ax01)): + ... ax_.set(xlabel="Time $t$", ylabel="Real Amp. $x(t)$", + ... zlabel="Imag. Amp. $y(t)$") + ... ax_.plot(t, z_.real, 'C0-', zs=-ma, zdir='z', alpha=0.5, label="Real") + ... ax_.plot_surface(E2d_t, e_*E2_amp, -ma*E2d_1, color='C1', alpha=0.25) + ... ax_.plot(t, z_.imag, 'C0-', zs=+ma, zdir='y', alpha=0.5, label="Imag.") + ... ax_.plot_surface(E2d_t, ma*E2d_1, e_*E2_amp, color='C1', alpha=0.25) + ... ax_.plot(t, z_.real, z_.imag, 'C0-', label="Signal") + ... ax_.plot_surface(E3d_t, e_*np.cos(E3d_phi), e_*np.sin(E3d_phi), + ... color='C1', alpha=0.5, shade=True, label="Envelope") + ... ax_.view_init(elev=22.7, azim=-114.3) + >>> fg0.subplots_adjust(left=0.08, right=0.97, wspace=0.15) + >>> plt.show() + """ + if not (-z.ndim <= axis < z.ndim): + raise ValueError(f"Invalid parameter {axis=} for {z.shape=}!") + if not (z.shape[axis] > 0): + raise ValueError(f"z.shape[axis] not > 0 for {z.shape=}, {axis=}!") + if len(bp_in) != 2 or not all((isinstance(b_, int) or b_ is None) for b_ in bp_in): + raise ValueError(f"{bp_in=} isn't a 2-tuple of type (int | None, int | None)!") + if not ((isinstance(n_out, int) and 0 < n_out) or n_out is None): + raise ValueError(f"{n_out=} is not a positive integer or None!") + if residual not in ('lowpass', 'all', None): + raise ValueError(f"{residual=} not in ['lowpass', 'all', None]!") + + n = z.shape[axis] # number of time samples of input + n_out = n if n_out is None else n_out + fak = n_out / n # scaling factor for resampling + + bp = slice(bp_in[0] if bp_in[0] is not None else -(n//2), + bp_in[1] if bp_in[1] is not None else (n+1)//2) + if not (-n//2 <= bp.start < bp.stop <= (n+1)//2): + raise ValueError("`-n//2 <= bp_in[0] < bp_in[1] <= (n+1)//2` does not hold " + + f"for n={z.shape[axis]=} and {bp_in=}!") + + # moving active axis to end allows to use `...` for indexing: + z = np.moveaxis(z, axis, -1) + + if np.iscomplexobj(z): + Z = sp_fft.fft(z) + else: # avoid calculating negative frequency bins for real signals: + Z = np.zeros_like(z, dtype=sp_fft.rfft(z.flat[:1]).dtype) + Z[..., :n//2 + 1] = sp_fft.rfft(z) + if bp.start > 0: # make signal analytic within bp_in band: + Z[..., bp] *= 2 + elif bp.stop > 0: + Z[..., 1:bp.stop] *= 2 + if not (bp.start <= 0 < bp.stop): # envelope is invariant to freq. shifts. + z_bb = sp_fft.ifft(Z[..., bp], n=n_out) * fak # baseband signal + else: + bp_shift = slice(bp.start + n//2, bp.stop + n//2) + z_bb = sp_fft.ifft(sp_fft.fftshift(Z, axes=-1)[..., bp_shift], n=n_out) * fak + + z_env = np.abs(z_bb) if not squared else z_bb.real ** 2 + z_bb.imag ** 2 + z_env = np.moveaxis(z_env, -1, axis) + + # Calculate the residual from the input bandpass filter: + if residual is None: + return z_env + if not (bp.start <= 0 < bp.stop): + Z[..., bp] = 0 + else: + Z[..., :bp.stop], Z[..., bp.start:] = 0, 0 + if residual == 'lowpass': + if bp.stop > 0: + Z[..., bp.stop:(n+1) // 2] = 0 + else: + Z[..., bp.start:], Z[..., 0:(n + 1) // 2] = 0, 0 + + z_res = fak * (sp_fft.ifft(Z, n=n_out) if np.iscomplexobj(z) else + sp_fft.irfft(Z, n=n_out)) + return np.stack((z_env, np.moveaxis(z_res, -1, axis)), axis=0) + +def _cmplx_sort(p): + """Sort roots based on magnitude. + + Parameters + ---------- + p : array_like + The roots to sort, as a 1-D array. + + Returns + ------- + p_sorted : ndarray + Sorted roots. + indx : ndarray + Array of indices needed to sort the input `p`. + + Examples + -------- + >>> from scipy import signal + >>> vals = [1, 4, 1+1.j, 3] + >>> p_sorted, indx = signal.cmplx_sort(vals) + >>> p_sorted + array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j]) + >>> indx + array([0, 2, 3, 1]) + """ + p = np.asarray(p) + indx = np.argsort(abs(p)) + return np.take(p, indx, 0), indx + + +def unique_roots(p, tol=1e-3, rtype='min'): + """Determine unique roots and their multiplicities from a list of roots. + + Parameters + ---------- + p : array_like + The list of roots. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. Refer to Notes about + the details on roots grouping. + rtype : {'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}, optional + How to determine the returned root if multiple roots are within + `tol` of each other. + + - 'max', 'maximum': pick the maximum of those roots + - 'min', 'minimum': pick the minimum of those roots + - 'avg', 'mean': take the average of those roots + + When finding minimum or maximum among complex roots they are compared + first by the real part and then by the imaginary part. + + Returns + ------- + unique : ndarray + The list of unique roots. + multiplicity : ndarray + The multiplicity of each root. + + Notes + ----- + If we have 3 roots ``a``, ``b`` and ``c``, such that ``a`` is close to + ``b`` and ``b`` is close to ``c`` (distance is less than `tol`), then it + doesn't necessarily mean that ``a`` is close to ``c``. It means that roots + grouping is not unique. In this function we use "greedy" grouping going + through the roots in the order they are given in the input `p`. + + This utility function is not specific to roots but can be used for any + sequence of values for which uniqueness and multiplicity has to be + determined. For a more general routine, see `numpy.unique`. + + Examples + -------- + >>> from scipy import signal + >>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3] + >>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg') + + Check which roots have multiplicity larger than 1: + + >>> uniq[mult > 1] + array([ 1.305]) + """ + if rtype in ['max', 'maximum']: + reduce = np.max + elif rtype in ['min', 'minimum']: + reduce = np.min + elif rtype in ['avg', 'mean']: + reduce = np.mean + else: + raise ValueError("`rtype` must be one of " + "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}") + + p = np.asarray(p) + + points = np.empty((len(p), 2)) + points[:, 0] = np.real(p) + points[:, 1] = np.imag(p) + tree = cKDTree(points) + + p_unique = [] + p_multiplicity = [] + used = np.zeros(len(p), dtype=bool) + for i in range(len(p)): + if used[i]: + continue + + group = tree.query_ball_point(points[i], tol) + group = [x for x in group if not used[x]] + + p_unique.append(reduce(p[group])) + p_multiplicity.append(len(group)) + + used[group] = True + + return np.asarray(p_unique), np.asarray(p_multiplicity) + + +def invres(r, p, k, tol=1e-3, rtype='avg'): + """Compute b(s) and a(s) from partial fraction expansion. + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] + H(s) = ------ = ------------------------------------------ + a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] + + then the partial-fraction expansion H(s) is defined as:: + + r[0] r[1] r[-1] + = -------- + -------- + ... + --------- + k(s) + (s-p[0]) (s-p[1]) (s-p[-1]) + + If there are any repeated roots (closer together than `tol`), then H(s) + has terms like:: + + r[i] r[i+1] r[i+n-1] + -------- + ----------- + ... + ----------- + (s-p[i]) (s-p[i])**2 (s-p[i])**n + + This function is used for polynomials in positive powers of s or z, + such as analog filters or digital filters in controls engineering. For + negative powers of z (typical for digital filters in DSP), use `invresz`. + + Parameters + ---------- + r : array_like + Residues corresponding to the poles. For repeated poles, the residues + must be ordered to correspond to ascending by power fractions. + p : array_like + Poles. Equal poles must be adjacent. + k : array_like + Coefficients of the direct polynomial term. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + See Also + -------- + residue, invresz, unique_roots + + """ + r = np.atleast_1d(r) + p = np.atleast_1d(p) + k = np.trim_zeros(np.atleast_1d(k), 'f') + + unique_poles, multiplicity = _group_poles(p, tol, rtype) + factors, denominator = _compute_factors(unique_poles, multiplicity, + include_powers=True) + + if len(k) == 0: + numerator = 0 + else: + numerator = np.polymul(k, denominator) + + for residue, factor in zip(r, factors): + numerator = np.polyadd(numerator, residue * factor) + + return numerator, denominator + + +def _compute_factors(roots, multiplicity, include_powers=False): + """Compute the total polynomial divided by factors for each root.""" + current = np.array([1]) + suffixes = [current] + for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]): + monomial = np.array([1, -pole]) + for _ in range(mult): + current = np.polymul(current, monomial) + suffixes.append(current) + suffixes = suffixes[::-1] + + factors = [] + current = np.array([1]) + for pole, mult, suffix in zip(roots, multiplicity, suffixes): + monomial = np.array([1, -pole]) + block = [] + for i in range(mult): + if i == 0 or include_powers: + block.append(np.polymul(current, suffix)) + current = np.polymul(current, monomial) + factors.extend(reversed(block)) + + return factors, current + + +def _compute_residues(poles, multiplicity, numerator): + denominator_factors, _ = _compute_factors(poles, multiplicity) + numerator = numerator.astype(poles.dtype) + + residues = [] + for pole, mult, factor in zip(poles, multiplicity, + denominator_factors): + if mult == 1: + residues.append(np.polyval(numerator, pole) / + np.polyval(factor, pole)) + else: + numer = numerator.copy() + monomial = np.array([1, -pole]) + factor, d = np.polydiv(factor, monomial) + + block = [] + for _ in range(mult): + numer, n = np.polydiv(numer, monomial) + r = n[0] / d[0] + numer = np.polysub(numer, r * factor) + block.append(r) + + residues.extend(reversed(block)) + + return np.asarray(residues) + + +def residue(b, a, tol=1e-3, rtype='avg'): + """Compute partial-fraction expansion of b(s) / a(s). + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] + H(s) = ------ = ------------------------------------------ + a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] + + then the partial-fraction expansion H(s) is defined as:: + + r[0] r[1] r[-1] + = -------- + -------- + ... + --------- + k(s) + (s-p[0]) (s-p[1]) (s-p[-1]) + + If there are any repeated roots (closer together than `tol`), then H(s) + has terms like:: + + r[i] r[i+1] r[i+n-1] + -------- + ----------- + ... + ----------- + (s-p[i]) (s-p[i])**2 (s-p[i])**n + + This function is used for polynomials in positive powers of s or z, + such as analog filters or digital filters in controls engineering. For + negative powers of z (typical for digital filters in DSP), use `residuez`. + + See Notes for details about the algorithm. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + r : ndarray + Residues corresponding to the poles. For repeated poles, the residues + are ordered to correspond to ascending by power fractions. + p : ndarray + Poles ordered by magnitude in ascending order. + k : ndarray + Coefficients of the direct polynomial term. + + See Also + -------- + invres, residuez, numpy.poly, unique_roots + + Notes + ----- + The "deflation through subtraction" algorithm is used for + computations --- method 6 in [1]_. + + The form of partial fraction expansion depends on poles multiplicity in + the exact mathematical sense. However there is no way to exactly + determine multiplicity of roots of a polynomial in numerical computing. + Thus you should think of the result of `residue` with given `tol` as + partial fraction expansion computed for the denominator composed of the + computed poles with empirically determined multiplicity. The choice of + `tol` can drastically change the result if there are close poles. + + References + ---------- + .. [1] J. F. Mahoney, B. D. Sivazlian, "Partial fractions expansion: a + review of computational methodology and efficiency", Journal of + Computational and Applied Mathematics, Vol. 9, 1983. + """ + b = np.asarray(b) + a = np.asarray(a) + if (np.issubdtype(b.dtype, np.complexfloating) + or np.issubdtype(a.dtype, np.complexfloating)): + b = b.astype(complex) + a = a.astype(complex) + else: + b = b.astype(float) + a = a.astype(float) + + b = np.trim_zeros(np.atleast_1d(b), 'f') + a = np.trim_zeros(np.atleast_1d(a), 'f') + + if a.size == 0: + raise ValueError("Denominator `a` is zero.") + + poles = np.roots(a) + if b.size == 0: + return np.zeros(poles.shape), _cmplx_sort(poles)[0], np.array([]) + + if len(b) < len(a): + k = np.empty(0) + else: + k, b = np.polydiv(b, a) + + unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype) + unique_poles, order = _cmplx_sort(unique_poles) + multiplicity = multiplicity[order] + + residues = _compute_residues(unique_poles, multiplicity, b) + + index = 0 + for pole, mult in zip(unique_poles, multiplicity): + poles[index:index + mult] = pole + index += mult + + return residues / a[0], poles, k + + +def residuez(b, a, tol=1e-3, rtype='avg'): + """Compute partial-fraction expansion of b(z) / a(z). + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) + H(z) = ------ = ------------------------------------------ + a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) + + then the partial-fraction expansion H(z) is defined as:: + + r[0] r[-1] + = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... + (1-p[0]z**(-1)) (1-p[-1]z**(-1)) + + If there are any repeated roots (closer than `tol`), then the partial + fraction expansion has terms like:: + + r[i] r[i+1] r[i+n-1] + -------------- + ------------------ + ... + ------------------ + (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n + + This function is used for polynomials in negative powers of z, + such as digital filters in DSP. For positive powers, use `residue`. + + See Notes of `residue` for details about the algorithm. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + r : ndarray + Residues corresponding to the poles. For repeated poles, the residues + are ordered to correspond to ascending by power fractions. + p : ndarray + Poles ordered by magnitude in ascending order. + k : ndarray + Coefficients of the direct polynomial term. + + See Also + -------- + invresz, residue, unique_roots + """ + b = np.asarray(b) + a = np.asarray(a) + if (np.issubdtype(b.dtype, np.complexfloating) + or np.issubdtype(a.dtype, np.complexfloating)): + b = b.astype(complex) + a = a.astype(complex) + else: + b = b.astype(float) + a = a.astype(float) + + b = np.trim_zeros(np.atleast_1d(b), 'b') + a = np.trim_zeros(np.atleast_1d(a), 'b') + + if a.size == 0: + raise ValueError("Denominator `a` is zero.") + elif a[0] == 0: + raise ValueError("First coefficient of determinant `a` must be " + "non-zero.") + + poles = np.roots(a) + if b.size == 0: + return np.zeros(poles.shape), _cmplx_sort(poles)[0], np.array([]) + + b_rev = b[::-1] + a_rev = a[::-1] + + if len(b_rev) < len(a_rev): + k_rev = np.empty(0) + else: + k_rev, b_rev = np.polydiv(b_rev, a_rev) + + unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype) + unique_poles, order = _cmplx_sort(unique_poles) + multiplicity = multiplicity[order] + + residues = _compute_residues(1 / unique_poles, multiplicity, b_rev) + + index = 0 + powers = np.empty(len(residues), dtype=int) + for pole, mult in zip(unique_poles, multiplicity): + poles[index:index + mult] = pole + powers[index:index + mult] = 1 + np.arange(mult) + index += mult + + residues *= (-poles) ** powers / a_rev[0] + + return residues, poles, k_rev[::-1] + + +def _group_poles(poles, tol, rtype): + if rtype in ['max', 'maximum']: + reduce = np.max + elif rtype in ['min', 'minimum']: + reduce = np.min + elif rtype in ['avg', 'mean']: + reduce = np.mean + else: + raise ValueError("`rtype` must be one of " + "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}") + + unique = [] + multiplicity = [] + + pole = poles[0] + block = [pole] + for i in range(1, len(poles)): + if abs(poles[i] - pole) <= tol: + block.append(pole) + else: + unique.append(reduce(block)) + multiplicity.append(len(block)) + pole = poles[i] + block = [pole] + + unique.append(reduce(block)) + multiplicity.append(len(block)) + + return np.asarray(unique), np.asarray(multiplicity) + + +def invresz(r, p, k, tol=1e-3, rtype='avg'): + """Compute b(z) and a(z) from partial fraction expansion. + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) + H(z) = ------ = ------------------------------------------ + a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) + + then the partial-fraction expansion H(z) is defined as:: + + r[0] r[-1] + = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... + (1-p[0]z**(-1)) (1-p[-1]z**(-1)) + + If there are any repeated roots (closer than `tol`), then the partial + fraction expansion has terms like:: + + r[i] r[i+1] r[i+n-1] + -------------- + ------------------ + ... + ------------------ + (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n + + This function is used for polynomials in negative powers of z, + such as digital filters in DSP. For positive powers, use `invres`. + + Parameters + ---------- + r : array_like + Residues corresponding to the poles. For repeated poles, the residues + must be ordered to correspond to ascending by power fractions. + p : array_like + Poles. Equal poles must be adjacent. + k : array_like + Coefficients of the direct polynomial term. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + See Also + -------- + residuez, unique_roots, invres + + """ + r = np.atleast_1d(r) + p = np.atleast_1d(p) + k = np.trim_zeros(np.atleast_1d(k), 'b') + + unique_poles, multiplicity = _group_poles(p, tol, rtype) + factors, denominator = _compute_factors(unique_poles, multiplicity, + include_powers=True) + + if len(k) == 0: + numerator = 0 + else: + numerator = np.polymul(k[::-1], denominator[::-1]) + + for residue, factor in zip(r, factors): + numerator = np.polyadd(numerator, residue * factor[::-1]) + + return numerator[::-1], denominator + + +def resample(x, num, t=None, axis=0, window=None, domain='time'): + """ + Resample `x` to `num` samples using Fourier method along the given axis. + + The resampled signal starts at the same value as `x` but is sampled + with a spacing of ``len(x) / num * (spacing of x)``. Because a + Fourier method is used, the signal is assumed to be periodic. + + Parameters + ---------- + x : array_like + The data to be resampled. + num : int + The number of samples in the resampled signal. + t : array_like, optional + If `t` is given, it is assumed to be the equally spaced sample + positions associated with the signal data in `x`. + axis : int, optional + The axis of `x` that is resampled. Default is 0. + window : array_like, callable, string, float, or tuple, optional + Specifies the window applied to the signal in the Fourier + domain. See below for details. + domain : string, optional + A string indicating the domain of the input `x`: + ``time`` Consider the input `x` as time-domain (Default), + ``freq`` Consider the input `x` as frequency-domain. + + Returns + ------- + resampled_x or (resampled_x, resampled_t) + Either the resampled array, or, if `t` was given, a tuple + containing the resampled array and the corresponding resampled + positions. + + See Also + -------- + decimate : Downsample the signal after applying an FIR or IIR filter. + resample_poly : Resample using polyphase filtering and an FIR filter. + + Notes + ----- + The argument `window` controls a Fourier-domain window that tapers + the Fourier spectrum before zero-padding to alleviate ringing in + the resampled values for sampled signals you didn't intend to be + interpreted as band-limited. + + If `window` is a function, then it is called with a vector of inputs + indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ). + + If `window` is an array of the same length as `x.shape[axis]` it is + assumed to be the window to be applied directly in the Fourier + domain (with dc and low-frequency first). + + For any other type of `window`, the function `scipy.signal.get_window` + is called to generate the window. + + The first sample of the returned vector is the same as the first + sample of the input vector. The spacing between samples is changed + from ``dx`` to ``dx * len(x) / num``. + + If `t` is not None, then it is used solely to calculate the resampled + positions `resampled_t` + + As noted, `resample` uses FFT transformations, which can be very + slow if the number of input or output samples is large and prime; + see :func:`~scipy.fft.fft`. In such cases, it can be faster to first downsample + a signal of length ``n`` with :func:`~scipy.signal.resample_poly` by a factor of + ``n//num`` before using `resample`. Note that this approach changes the + characteristics of the antialiasing filter. + + Examples + -------- + Note that the end of the resampled data rises to meet the first + sample of the next cycle: + + >>> import numpy as np + >>> from scipy import signal + + >>> x = np.linspace(0, 10, 20, endpoint=False) + >>> y = np.cos(-x**2/6.0) + >>> f = signal.resample(y, 100) + >>> xnew = np.linspace(0, 10, 100, endpoint=False) + + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro') + >>> plt.legend(['data', 'resampled'], loc='best') + >>> plt.show() + + Consider the following signal ``y`` where ``len(y)`` is a large prime number: + + >>> N = 55949 + >>> freq = 100 + >>> x = np.linspace(0, 1, N) + >>> y = np.cos(2 * np.pi * freq * x) + + Due to ``N`` being prime, + + >>> num = 5000 + >>> f = signal.resample(signal.resample_poly(y, 1, N // num), num) + + runs significantly faster than + + >>> f = signal.resample(y, num) + """ + + if domain not in ('time', 'freq'): + raise ValueError("Acceptable domain flags are 'time' or" + f" 'freq', not domain={domain}") + + x = np.asarray(x) + Nx = x.shape[axis] + + # Check if we can use faster real FFT + real_input = np.isrealobj(x) + + if domain == 'time': + # Forward transform + if real_input: + X = sp_fft.rfft(x, axis=axis) + else: # Full complex FFT + X = sp_fft.fft(x, axis=axis) + else: # domain == 'freq' + X = x + + # Apply window to spectrum + if window is not None: + if callable(window): + W = window(sp_fft.fftfreq(Nx)) + elif isinstance(window, np.ndarray): + if window.shape != (Nx,): + raise ValueError('window must have the same length as data') + W = window + else: + W = sp_fft.ifftshift(get_window(window, Nx)) + + newshape_W = [1] * x.ndim + newshape_W[axis] = X.shape[axis] + if real_input: + # Fold the window back on itself to mimic complex behavior + W_real = W.copy() + W_real[1:] += W_real[-1:0:-1] + W_real[1:] *= 0.5 + X *= W_real[:newshape_W[axis]].reshape(newshape_W) + else: + X *= W.reshape(newshape_W) + + # Copy each half of the original spectrum to the output spectrum, either + # truncating high frequencies (downsampling) or zero-padding them + # (upsampling) + + # Placeholder array for output spectrum + newshape = list(x.shape) + if real_input: + newshape[axis] = num // 2 + 1 + else: + newshape[axis] = num + Y = np.zeros(newshape, X.dtype) + + # Copy positive frequency components (and Nyquist, if present) + N = min(num, Nx) + nyq = N // 2 + 1 # Slice index that includes Nyquist if present + sl = [slice(None)] * x.ndim + sl[axis] = slice(0, nyq) + Y[tuple(sl)] = X[tuple(sl)] + if not real_input: + # Copy negative frequency components + if N > 2: # (slice expression doesn't collapse to empty array) + sl[axis] = slice(nyq - N, None) + Y[tuple(sl)] = X[tuple(sl)] + + # Split/join Nyquist component(s) if present + # So far we have set Y[+N/2]=X[+N/2] + if N % 2 == 0: + if num < Nx: # downsampling + if real_input: + sl[axis] = slice(N//2, N//2 + 1) + Y[tuple(sl)] *= 2. + else: + # select the component of Y at frequency +N/2, + # add the component of X at -N/2 + sl[axis] = slice(-N//2, -N//2 + 1) + Y[tuple(sl)] += X[tuple(sl)] + elif Nx < num: # upsampling + # select the component at frequency +N/2 and halve it + sl[axis] = slice(N//2, N//2 + 1) + Y[tuple(sl)] *= 0.5 + if not real_input: + temp = Y[tuple(sl)] + # set the component at -N/2 equal to the component at +N/2 + sl[axis] = slice(num-N//2, num-N//2 + 1) + Y[tuple(sl)] = temp + + # Inverse transform + if real_input: + y = sp_fft.irfft(Y, num, axis=axis) + else: + y = sp_fft.ifft(Y, axis=axis, overwrite_x=True) + + y *= (float(num) / float(Nx)) + + if t is None: + return y + else: + new_t = np.arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0] + return y, new_t + + +def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0), + padtype='constant', cval=None): + """ + Resample `x` along the given axis using polyphase filtering. + + The signal `x` is upsampled by the factor `up`, a zero-phase low-pass + FIR filter is applied, and then it is downsampled by the factor `down`. + The resulting sample rate is ``up / down`` times the original sample + rate. By default, values beyond the boundary of the signal are assumed + to be zero during the filtering step. + + Parameters + ---------- + x : array_like + The data to be resampled. + up : int + The upsampling factor. + down : int + The downsampling factor. + axis : int, optional + The axis of `x` that is resampled. Default is 0. + window : string, tuple, or array_like, optional + Desired window to use to design the low-pass filter, or the FIR filter + coefficients to employ. See below for details. + padtype : string, optional + `constant`, `line`, `mean`, `median`, `maximum`, `minimum` or any of + the other signal extension modes supported by `scipy.signal.upfirdn`. + Changes assumptions on values beyond the boundary. If `constant`, + assumed to be `cval` (default zero). If `line` assumed to continue a + linear trend defined by the first and last points. `mean`, `median`, + `maximum` and `minimum` work as in `np.pad` and assume that the values + beyond the boundary are the mean, median, maximum or minimum + respectively of the array along the axis. + + .. versionadded:: 1.4.0 + cval : float, optional + Value to use if `padtype='constant'`. Default is zero. + + .. versionadded:: 1.4.0 + + Returns + ------- + resampled_x : array + The resampled array. + + See Also + -------- + decimate : Downsample the signal after applying an FIR or IIR filter. + resample : Resample up or down using the FFT method. + + Notes + ----- + This polyphase method will likely be faster than the Fourier method + in `scipy.signal.resample` when the number of samples is large and + prime, or when the number of samples is large and `up` and `down` + share a large greatest common denominator. The length of the FIR + filter used will depend on ``max(up, down) // gcd(up, down)``, and + the number of operations during polyphase filtering will depend on + the filter length and `down` (see `scipy.signal.upfirdn` for details). + + The argument `window` specifies the FIR low-pass filter design. + + If `window` is an array_like it is assumed to be the FIR filter + coefficients. Note that the FIR filter is applied after the upsampling + step, so it should be designed to operate on a signal at a sampling + frequency higher than the original by a factor of `up//gcd(up, down)`. + This function's output will be centered with respect to this array, so it + is best to pass a symmetric filter with an odd number of samples if, as + is usually the case, a zero-phase filter is desired. + + For any other type of `window`, the functions `scipy.signal.get_window` + and `scipy.signal.firwin` are called to generate the appropriate filter + coefficients. + + The first sample of the returned vector is the same as the first + sample of the input vector. The spacing between samples is changed + from ``dx`` to ``dx * down / float(up)``. + + Examples + -------- + By default, the end of the resampled data rises to meet the first + sample of the next cycle for the FFT method, and gets closer to zero + for the polyphase method: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(0, 10, 20, endpoint=False) + >>> y = np.cos(-x**2/6.0) + >>> f_fft = signal.resample(y, 100) + >>> f_poly = signal.resample_poly(y, 100, 20) + >>> xnew = np.linspace(0, 10, 100, endpoint=False) + + >>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-') + >>> plt.plot(x, y, 'ko-') + >>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries + >>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best') + >>> plt.show() + + This default behaviour can be changed by using the padtype option: + + >>> N = 5 + >>> x = np.linspace(0, 1, N, endpoint=False) + >>> y = 2 + x**2 - 1.7*np.sin(x) + .2*np.cos(11*x) + >>> y2 = 1 + x**3 + 0.1*np.sin(x) + .1*np.cos(11*x) + >>> Y = np.stack([y, y2], axis=-1) + >>> up = 4 + >>> xr = np.linspace(0, 1, N*up, endpoint=False) + + >>> y2 = signal.resample_poly(Y, up, 1, padtype='constant') + >>> y3 = signal.resample_poly(Y, up, 1, padtype='mean') + >>> y4 = signal.resample_poly(Y, up, 1, padtype='line') + + >>> for i in [0,1]: + ... plt.figure() + ... plt.plot(xr, y4[:,i], 'g.', label='line') + ... plt.plot(xr, y3[:,i], 'y.', label='mean') + ... plt.plot(xr, y2[:,i], 'r.', label='constant') + ... plt.plot(x, Y[:,i], 'k-') + ... plt.legend() + >>> plt.show() + + """ + x = np.asarray(x) + if up != int(up): + raise ValueError("up must be an integer") + if down != int(down): + raise ValueError("down must be an integer") + up = int(up) + down = int(down) + if up < 1 or down < 1: + raise ValueError('up and down must be >= 1') + if cval is not None and padtype != 'constant': + raise ValueError('cval has no effect when padtype is ', padtype) + + # Determine our up and down factors + # Use a rational approximation to save computation time on really long + # signals + g_ = math.gcd(up, down) + up //= g_ + down //= g_ + if up == down == 1: + return x.copy() + n_in = x.shape[axis] + n_out = n_in * up + n_out = n_out // down + bool(n_out % down) + + if isinstance(window, (list | np.ndarray)): + window = np.array(window) # use array to force a copy (we modify it) + if window.ndim > 1: + raise ValueError('window must be 1-D') + half_len = (window.size - 1) // 2 + h = window + else: + # Design a linear-phase low-pass FIR filter + max_rate = max(up, down) + f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist) + half_len = 10 * max_rate # reasonable cutoff for sinc-like function + if np.issubdtype(x.dtype, np.floating): + h = firwin(2 * half_len + 1, f_c, + window=window).astype(x.dtype) # match dtype of x + else: + h = firwin(2 * half_len + 1, f_c, + window=window) + h *= up + + # Zero-pad our filter to put the output samples at the center + n_pre_pad = (down - half_len % down) + n_post_pad = 0 + n_pre_remove = (half_len + n_pre_pad) // down + # We should rarely need to do this given our filter lengths... + while _output_len(len(h) + n_pre_pad + n_post_pad, n_in, + up, down) < n_out + n_pre_remove: + n_post_pad += 1 + h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h, + np.zeros(n_post_pad, dtype=h.dtype))) + n_pre_remove_end = n_pre_remove + n_out + + # Remove background depending on the padtype option + funcs = {'mean': np.mean, 'median': np.median, + 'minimum': np.amin, 'maximum': np.amax} + upfirdn_kwargs = {'mode': 'constant', 'cval': 0} + if padtype in funcs: + background_values = funcs[padtype](x, axis=axis, keepdims=True) + elif padtype in _upfirdn_modes: + upfirdn_kwargs = {'mode': padtype} + if padtype == 'constant': + if cval is None: + cval = 0 + upfirdn_kwargs['cval'] = cval + else: + raise ValueError( + 'padtype must be one of: maximum, mean, median, minimum, ' + + ', '.join(_upfirdn_modes)) + + if padtype in funcs: + x = x - background_values + + # filter then remove excess + y = upfirdn(h, x, up, down, axis=axis, **upfirdn_kwargs) + keep = [slice(None), ]*x.ndim + keep[axis] = slice(n_pre_remove, n_pre_remove_end) + y_keep = y[tuple(keep)] + + # Add background back + if padtype in funcs: + y_keep += background_values + + return y_keep + + +def vectorstrength(events, period): + ''' + Determine the vector strength of the events corresponding to the given + period. + + The vector strength is a measure of phase synchrony, how well the + timing of the events is synchronized to a single period of a periodic + signal. + + If multiple periods are used, calculate the vector strength of each. + This is called the "resonating vector strength". + + Parameters + ---------- + events : 1D array_like + An array of time points containing the timing of the events. + period : float or array_like + The period of the signal that the events should synchronize to. + The period is in the same units as `events`. It can also be an array + of periods, in which case the outputs are arrays of the same length. + + Returns + ------- + strength : float or 1D array + The strength of the synchronization. 1.0 is perfect synchronization + and 0.0 is no synchronization. If `period` is an array, this is also + an array with each element containing the vector strength at the + corresponding period. + phase : float or array + The phase that the events are most strongly synchronized to in radians. + If `period` is an array, this is also an array with each element + containing the phase for the corresponding period. + + References + ---------- + van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector + strength: Auditory system, electric fish, and noise. + Chaos 21, 047508 (2011); + :doi:`10.1063/1.3670512`. + van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises: + biological and mathematical perspectives. Biol Cybern. + 2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`. + van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens + when we vary the "probing" frequency while keeping the spike times + fixed. Biol Cybern. 2013 Aug;107(4):491-94. + :doi:`10.1007/s00422-013-0560-8`. + ''' + events = np.asarray(events) + period = np.asarray(period) + if events.ndim > 1: + raise ValueError('events cannot have dimensions more than 1') + if period.ndim > 1: + raise ValueError('period cannot have dimensions more than 1') + + # we need to know later if period was originally a scalar + scalarperiod = not period.ndim + + events = np.atleast_2d(events) + period = np.atleast_2d(period) + if (period <= 0).any(): + raise ValueError('periods must be positive') + + # this converts the times to vectors + vectors = np.exp(np.dot(2j*np.pi/period.T, events)) + + # the vector strength is just the magnitude of the mean of the vectors + # the vector phase is the angle of the mean of the vectors + vectormean = np.mean(vectors, axis=1) + strength = abs(vectormean) + phase = np.angle(vectormean) + + # if the original period was a scalar, return scalars + if scalarperiod: + strength = strength[0] + phase = phase[0] + return strength, phase + + +def detrend(data: np.ndarray, axis: int = -1, + type: Literal['linear', 'constant'] = 'linear', + bp: ArrayLike | int = 0, overwrite_data: bool = False) -> np.ndarray: + r"""Remove linear or constant trend along axis from data. + + Parameters + ---------- + data : array_like + The input data. + axis : int, optional + The axis along which to detrend the data. By default this is the + last axis (-1). + type : {'linear', 'constant'}, optional + The type of detrending. If ``type == 'linear'`` (default), + the result of a linear least-squares fit to `data` is subtracted + from `data`. + If ``type == 'constant'``, only the mean of `data` is subtracted. + bp : array_like of ints, optional + A sequence of break points. If given, an individual linear fit is + performed for each part of `data` between two break points. + Break points are specified as indices into `data`. This parameter + only has an effect when ``type == 'linear'``. + overwrite_data : bool, optional + If True, perform in place detrending and avoid a copy. Default is False + + Returns + ------- + ret : ndarray + The detrended input data. + + Notes + ----- + Detrending can be interpreted as subtracting a least squares fit polynomial: + Setting the parameter `type` to 'constant' corresponds to fitting a zeroth degree + polynomial, 'linear' to a first degree polynomial. Consult the example below. + + See Also + -------- + numpy.polynomial.polynomial.Polynomial.fit: Create least squares fit polynomial. + + + Examples + -------- + The following example detrends the function :math:`x(t) = \sin(\pi t) + 1/4`: + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy.signal import detrend + ... + >>> t = np.linspace(-0.5, 0.5, 21) + >>> x = np.sin(np.pi*t) + 1/4 + ... + >>> x_d_const = detrend(x, type='constant') + >>> x_d_linear = detrend(x, type='linear') + ... + >>> fig1, ax1 = plt.subplots() + >>> ax1.set_title(r"Detrending $x(t)=\sin(\pi t) + 1/4$") + >>> ax1.set(xlabel="t", ylabel="$x(t)$", xlim=(t[0], t[-1])) + >>> ax1.axhline(y=0, color='black', linewidth=.5) + >>> ax1.axvline(x=0, color='black', linewidth=.5) + >>> ax1.plot(t, x, 'C0.-', label="No detrending") + >>> ax1.plot(t, x_d_const, 'C1x-', label="type='constant'") + >>> ax1.plot(t, x_d_linear, 'C2+-', label="type='linear'") + >>> ax1.legend() + >>> plt.show() + + Alternatively, NumPy's `~numpy.polynomial.polynomial.Polynomial` can be used for + detrending as well: + + >>> pp0 = np.polynomial.Polynomial.fit(t, x, deg=0) # fit degree 0 polynomial + >>> np.allclose(x_d_const, x - pp0(t)) # compare with constant detrend + True + >>> pp1 = np.polynomial.Polynomial.fit(t, x, deg=1) # fit degree 1 polynomial + >>> np.allclose(x_d_linear, x - pp1(t)) # compare with linear detrend + True + + Note that `~numpy.polynomial.polynomial.Polynomial` also allows fitting higher + degree polynomials. Consult its documentation on how to extract the polynomial + coefficients. + """ + if type not in ['linear', 'l', 'constant', 'c']: + raise ValueError("Trend type must be 'linear' or 'constant'.") + data = np.asarray(data) + dtype = data.dtype.char + if dtype not in 'dfDF': + dtype = 'd' + if type in ['constant', 'c']: + ret = data - np.mean(data, axis, keepdims=True) + return ret + else: + dshape = data.shape + N = dshape[axis] + bp = np.sort(np.unique(np.concatenate(np.atleast_1d(0, bp, N)))) + if np.any(bp > N): + raise ValueError("Breakpoints must be less than length " + "of data along given axis.") + + # Restructure data so that axis is along first dimension and + # all other dimensions are collapsed into second dimension + rnk = len(dshape) + if axis < 0: + axis = axis + rnk + newdata = np.moveaxis(data, axis, 0) + newdata_shape = newdata.shape + newdata = newdata.reshape(N, -1) + + if not overwrite_data: + newdata = newdata.copy() # make sure we have a copy + if newdata.dtype.char not in 'dfDF': + newdata = newdata.astype(dtype) + +# Nreg = len(bp) - 1 + # Find leastsq fit and remove it for each piece + for m in range(len(bp) - 1): + Npts = bp[m + 1] - bp[m] + A = np.ones((Npts, 2), dtype) + A[:, 0] = np.arange(1, Npts + 1, dtype=dtype) / Npts + sl = slice(bp[m], bp[m + 1]) + coef, resids, rank, s = linalg.lstsq(A, newdata[sl]) + newdata[sl] = newdata[sl] - A @ coef + + # Put data back in original shape. + newdata = newdata.reshape(newdata_shape) + ret = np.moveaxis(newdata, 0, axis) + return ret + + +def lfilter_zi(b, a): + """ + Construct initial conditions for lfilter for step response steady-state. + + Compute an initial state `zi` for the `lfilter` function that corresponds + to the steady state of the step response. + + A typical use of this function is to set the initial state so that the + output of the filter starts at the same value as the first element of + the signal to be filtered. + + Parameters + ---------- + b, a : array_like (1-D) + The IIR filter coefficients. See `lfilter` for more + information. + + Returns + ------- + zi : 1-D ndarray + The initial state for the filter. + + See Also + -------- + lfilter, lfiltic, filtfilt + + Notes + ----- + A linear filter with order m has a state space representation (A, B, C, D), + for which the output y of the filter can be expressed as:: + + z(n+1) = A*z(n) + B*x(n) + y(n) = C*z(n) + D*x(n) + + where z(n) is a vector of length m, A has shape (m, m), B has shape + (m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is + a scalar). lfilter_zi solves:: + + zi = A*zi + B + + In other words, it finds the initial condition for which the response + to an input of all ones is a constant. + + Given the filter coefficients `a` and `b`, the state space matrices + for the transposed direct form II implementation of the linear filter, + which is the implementation used by scipy.signal.lfilter, are:: + + A = scipy.linalg.companion(a).T + B = b[1:] - a[1:]*b[0] + + assuming ``a[0]`` is 1.0; if ``a[0]`` is not 1, `a` and `b` are first + divided by a[0]. + + Examples + -------- + The following code creates a lowpass Butterworth filter. Then it + applies that filter to an array whose values are all 1.0; the + output is also all 1.0, as expected for a lowpass filter. If the + `zi` argument of `lfilter` had not been given, the output would have + shown the transient signal. + + >>> from numpy import array, ones + >>> from scipy.signal import lfilter, lfilter_zi, butter + >>> b, a = butter(5, 0.25) + >>> zi = lfilter_zi(b, a) + >>> y, zo = lfilter(b, a, ones(10), zi=zi) + >>> y + array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) + + Another example: + + >>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]) + >>> y, zf = lfilter(b, a, x, zi=zi*x[0]) + >>> y + array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528, + 0.44399389, 0.35505241]) + + Note that the `zi` argument to `lfilter` was computed using + `lfilter_zi` and scaled by ``x[0]``. Then the output `y` has no + transient until the input drops from 0.5 to 0.0. + + """ + + # FIXME: Can this function be replaced with an appropriate + # use of lfiltic? For example, when b,a = butter(N,Wn), + # lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)). + # + + # We could use scipy.signal.normalize, but it uses warnings in + # cases where a ValueError is more appropriate, and it allows + # b to be 2D. + b = np.atleast_1d(b) + if b.ndim != 1: + raise ValueError("Numerator b must be 1-D.") + a = np.atleast_1d(a) + if a.ndim != 1: + raise ValueError("Denominator a must be 1-D.") + + while len(a) > 1 and a[0] == 0.0: + a = a[1:] + if a.size < 1: + raise ValueError("There must be at least one nonzero `a` coefficient.") + + if a[0] != 1.0: + # Normalize the coefficients so a[0] == 1. + b = b / a[0] + a = a / a[0] + + n = max(len(a), len(b)) + + # Pad a or b with zeros so they are the same length. + if len(a) < n: + a = np.r_[a, np.zeros(n - len(a), dtype=a.dtype)] + elif len(b) < n: + b = np.r_[b, np.zeros(n - len(b), dtype=b.dtype)] + + IminusA = np.eye(n - 1, dtype=np.result_type(a, b)) - linalg.companion(a).T + B = b[1:] - a[1:] * b[0] + # Solve zi = A*zi + B + zi = np.linalg.solve(IminusA, B) + + # For future reference: we could also use the following + # explicit formulas to solve the linear system: + # + # zi = np.zeros(n - 1) + # zi[0] = B.sum() / IminusA[:,0].sum() + # asum = 1.0 + # csum = 0.0 + # for k in range(1,n-1): + # asum += a[k] + # csum += b[k] - a[k]*b[0] + # zi[k] = asum*zi[0] - csum + + return zi + + +def sosfilt_zi(sos): + """ + Construct initial conditions for sosfilt for step response steady-state. + + Compute an initial state `zi` for the `sosfilt` function that corresponds + to the steady state of the step response. + + A typical use of this function is to set the initial state so that the + output of the filter starts at the same value as the first element of + the signal to be filtered. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + Returns + ------- + zi : ndarray + Initial conditions suitable for use with ``sosfilt``, shape + ``(n_sections, 2)``. + + See Also + -------- + sosfilt, zpk2sos + + Notes + ----- + .. versionadded:: 0.16.0 + + Examples + -------- + Filter a rectangular pulse that begins at time 0, with and without + the use of the `zi` argument of `scipy.signal.sosfilt`. + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> sos = signal.butter(9, 0.125, output='sos') + >>> zi = signal.sosfilt_zi(sos) + >>> x = (np.arange(250) < 100).astype(int) + >>> f1 = signal.sosfilt(sos, x) + >>> f2, zo = signal.sosfilt(sos, x, zi=zi) + + >>> plt.plot(x, 'k--', label='x') + >>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered') + >>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + sos = np.asarray(sos) + if sos.ndim != 2 or sos.shape[1] != 6: + raise ValueError('sos must be shape (n_sections, 6)') + + if sos.dtype.kind in 'bui': + sos = sos.astype(np.float64) + + n_sections = sos.shape[0] + zi = np.empty((n_sections, 2), dtype=sos.dtype) + scale = 1.0 + for section in range(n_sections): + b = sos[section, :3] + a = sos[section, 3:] + zi[section] = scale * lfilter_zi(b, a) + # If H(z) = B(z)/A(z) is this section's transfer function, then + # b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady + # state value of this section's step response. + scale *= b.sum() / a.sum() + + return zi + + +def _filtfilt_gust(b, a, x, axis=-1, irlen=None): + """Forward-backward IIR filter that uses Gustafsson's method. + + Apply the IIR filter defined by ``(b,a)`` to `x` twice, first forward + then backward, using Gustafsson's initial conditions [1]_. + + Let ``y_fb`` be the result of filtering first forward and then backward, + and let ``y_bf`` be the result of filtering first backward then forward. + Gustafsson's method is to compute initial conditions for the forward + pass and the backward pass such that ``y_fb == y_bf``. + + Parameters + ---------- + b : scalar or 1-D ndarray + Numerator coefficients of the filter. + a : scalar or 1-D ndarray + Denominator coefficients of the filter. + x : ndarray + Data to be filtered. + axis : int, optional + Axis of `x` to be filtered. Default is -1. + irlen : int or None, optional + The length of the nonnegligible part of the impulse response. + If `irlen` is None, or if the length of the signal is less than + ``2 * irlen``, then no part of the impulse response is ignored. + + Returns + ------- + y : ndarray + The filtered data. + x0 : ndarray + Initial condition for the forward filter. + x1 : ndarray + Initial condition for the backward filter. + + Notes + ----- + Typically the return values `x0` and `x1` are not needed by the + caller. The intended use of these return values is in unit tests. + + References + ---------- + .. [1] F. Gustaffson. Determining the initial states in forward-backward + filtering. Transactions on Signal Processing, 46(4):988-992, 1996. + + """ + # In the comments, "Gustafsson's paper" and [1] refer to the + # paper referenced in the docstring. + + b = np.atleast_1d(b) + a = np.atleast_1d(a) + + order = max(len(b), len(a)) - 1 + if order == 0: + # The filter is just scalar multiplication, with no state. + scale = (b[0] / a[0])**2 + y = scale * x + return y, np.array([]), np.array([]) + + if axis != -1 or axis != x.ndim - 1: + # Move the axis containing the data to the end. + x = np.swapaxes(x, axis, x.ndim - 1) + + # n is the number of samples in the data to be filtered. + n = x.shape[-1] + + if irlen is None or n <= 2*irlen: + m = n + else: + m = irlen + + # Create Obs, the observability matrix (called O in the paper). + # This matrix can be interpreted as the operator that propagates + # an arbitrary initial state to the output, assuming the input is + # zero. + # In Gustafsson's paper, the forward and backward filters are not + # necessarily the same, so he has both O_f and O_b. We use the same + # filter in both directions, so we only need O. The same comment + # applies to S below. + Obs = np.zeros((m, order)) + zi = np.zeros(order) + zi[0] = 1 + Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0] + for k in range(1, order): + Obs[k:, k] = Obs[:-k, 0] + + # Obsr is O^R (Gustafsson's notation for row-reversed O) + Obsr = Obs[::-1] + + # Create S. S is the matrix that applies the filter to the reversed + # propagated initial conditions. That is, + # out = S.dot(zi) + # is the same as + # tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs. + # out = lfilter(b, a, tmp[::-1]) # Reverse and filter. + + # Equations (5) & (6) of [1] + S = lfilter(b, a, Obs[::-1], axis=0) + + # Sr is S^R (row-reversed S) + Sr = S[::-1] + + # M is [(S^R - O), (O^R - S)] + if m == n: + M = np.hstack((Sr - Obs, Obsr - S)) + else: + # Matrix described in section IV of [1]. + M = np.zeros((2*m, 2*order)) + M[:m, :order] = Sr - Obs + M[m:, order:] = Obsr - S + + # Naive forward-backward and backward-forward filters. + # These have large transients because the filters use zero initial + # conditions. + y_f = lfilter(b, a, x) + y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1] + + y_b = lfilter(b, a, x[..., ::-1])[..., ::-1] + y_bf = lfilter(b, a, y_b) + + delta_y_bf_fb = y_bf - y_fb + if m == n: + delta = delta_y_bf_fb + else: + start_m = delta_y_bf_fb[..., :m] + end_m = delta_y_bf_fb[..., -m:] + delta = np.concatenate((start_m, end_m), axis=-1) + + # ic_opt holds the "optimal" initial conditions. + # The following code computes the result shown in the formula + # of the paper between equations (6) and (7). + if delta.ndim == 1: + ic_opt = linalg.lstsq(M, delta)[0] + else: + # Reshape delta so it can be used as an array of multiple + # right-hand-sides in linalg.lstsq. + delta2d = delta.reshape(-1, delta.shape[-1]).T + ic_opt0 = linalg.lstsq(M, delta2d)[0].T + ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],)) + + # Now compute the filtered signal using equation (7) of [1]. + # First, form [S^R, O^R] and call it W. + if m == n: + W = np.hstack((Sr, Obsr)) + else: + W = np.zeros((2*m, 2*order)) + W[:m, :order] = Sr + W[m:, order:] = Obsr + + # Equation (7) of [1] says + # Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt] + # `wic` is (almost) the product on the right. + # W has shape (m, 2*order), and ic_opt has shape (..., 2*order), + # so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T, + # so wic has shape (..., m). + wic = ic_opt.dot(W.T) + + # `wic` is "almost" the product of W and the optimal ICs in equation + # (7)--if we're using a truncated impulse response (m < n), `wic` + # contains only the adjustments required for the ends of the signal. + # Here we form y_opt, taking this into account if necessary. + y_opt = y_fb + if m == n: + y_opt += wic + else: + y_opt[..., :m] += wic[..., :m] + y_opt[..., -m:] += wic[..., -m:] + + x0 = ic_opt[..., :order] + x1 = ic_opt[..., -order:] + if axis != -1 or axis != x.ndim - 1: + # Restore the data axis to its original position. + x0 = np.swapaxes(x0, axis, x.ndim - 1) + x1 = np.swapaxes(x1, axis, x.ndim - 1) + y_opt = np.swapaxes(y_opt, axis, x.ndim - 1) + + return y_opt, x0, x1 + + +def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad', + irlen=None): + """ + Apply a digital filter forward and backward to a signal. + + This function applies a linear digital filter twice, once forward and + once backwards. The combined filter has zero phase and a filter order + twice that of the original. + + The function provides options for handling the edges of the signal. + + The function `sosfiltfilt` (and filter design using ``output='sos'``) + should be preferred over `filtfilt` for most filtering tasks, as + second-order sections have fewer numerical problems. + + Parameters + ---------- + b : (N,) array_like + The numerator coefficient vector of the filter. + a : (N,) array_like + The denominator coefficient vector of the filter. If ``a[0]`` + is not 1, then both `a` and `b` are normalized by ``a[0]``. + x : array_like + The array of data to be filtered. + axis : int, optional + The axis of `x` to which the filter is applied. + Default is -1. + padtype : str or None, optional + Must be 'odd', 'even', 'constant', or None. This determines the + type of extension to use for the padded signal to which the filter + is applied. If `padtype` is None, no padding is used. The default + is 'odd'. + padlen : int or None, optional + The number of elements by which to extend `x` at both ends of + `axis` before applying the filter. This value must be less than + ``x.shape[axis] - 1``. ``padlen=0`` implies no padding. + The default value is ``3 * max(len(a), len(b))``. + method : str, optional + Determines the method for handling the edges of the signal, either + "pad" or "gust". When `method` is "pad", the signal is padded; the + type of padding is determined by `padtype` and `padlen`, and `irlen` + is ignored. When `method` is "gust", Gustafsson's method is used, + and `padtype` and `padlen` are ignored. + irlen : int or None, optional + When `method` is "gust", `irlen` specifies the length of the + impulse response of the filter. If `irlen` is None, no part + of the impulse response is ignored. For a long signal, specifying + `irlen` can significantly improve the performance of the filter. + + Returns + ------- + y : ndarray + The filtered output with the same shape as `x`. + + See Also + -------- + sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt + + Notes + ----- + When `method` is "pad", the function pads the data along the given axis + in one of three ways: odd, even or constant. The odd and even extensions + have the corresponding symmetry about the end point of the data. The + constant extension extends the data with the values at the end points. On + both the forward and backward passes, the initial condition of the + filter is found by using `lfilter_zi` and scaling it by the end point of + the extended data. + + When `method` is "gust", Gustafsson's method [1]_ is used. Initial + conditions are chosen for the forward and backward passes so that the + forward-backward filter gives the same result as the backward-forward + filter. + + The option to use Gustaffson's method was added in scipy version 0.16.0. + + References + ---------- + .. [1] F. Gustaffson, "Determining the initial states in forward-backward + filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992, + 1996. + + Examples + -------- + The examples will use several functions from `scipy.signal`. + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + First we create a one second signal that is the sum of two pure sine + waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz. + + >>> t = np.linspace(0, 1.0, 2001) + >>> xlow = np.sin(2 * np.pi * 5 * t) + >>> xhigh = np.sin(2 * np.pi * 250 * t) + >>> x = xlow + xhigh + + Now create a lowpass Butterworth filter with a cutoff of 0.125 times + the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`. + The result should be approximately ``xlow``, with no phase shift. + + >>> b, a = signal.butter(8, 0.125) + >>> y = signal.filtfilt(b, a, x, padlen=150) + >>> np.abs(y - xlow).max() + 9.1086182074789912e-06 + + We get a fairly clean result for this artificial example because + the odd extension is exact, and with the moderately long padding, + the filter's transients have dissipated by the time the actual data + is reached. In general, transient effects at the edges are + unavoidable. + + The following example demonstrates the option ``method="gust"``. + + First, create a filter. + + >>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied. + + `sig` is a random input signal to be filtered. + + >>> rng = np.random.default_rng() + >>> n = 60 + >>> sig = rng.standard_normal(n)**3 + 3*rng.standard_normal(n).cumsum() + + Apply `filtfilt` to `sig`, once using the Gustafsson method, and + once using padding, and plot the results for comparison. + + >>> fgust = signal.filtfilt(b, a, sig, method="gust") + >>> fpad = signal.filtfilt(b, a, sig, padlen=50) + >>> plt.plot(sig, 'k-', label='input') + >>> plt.plot(fgust, 'b-', linewidth=4, label='gust') + >>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad') + >>> plt.legend(loc='best') + >>> plt.show() + + The `irlen` argument can be used to improve the performance + of Gustafsson's method. + + Estimate the impulse response length of the filter. + + >>> z, p, k = signal.tf2zpk(b, a) + >>> eps = 1e-9 + >>> r = np.max(np.abs(p)) + >>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) + >>> approx_impulse_len + 137 + + Apply the filter to a longer signal, with and without the `irlen` + argument. The difference between `y1` and `y2` is small. For long + signals, using `irlen` gives a significant performance improvement. + + >>> x = rng.standard_normal(4000) + >>> y1 = signal.filtfilt(b, a, x, method='gust') + >>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len) + >>> print(np.max(np.abs(y1 - y2))) + 2.875334415008979e-10 + + """ + b = np.atleast_1d(b) + a = np.atleast_1d(a) + x = np.asarray(x) + + if method not in ["pad", "gust"]: + raise ValueError("method must be 'pad' or 'gust'.") + + if method == "gust": + y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) + return y + + # method == "pad" + edge, ext = _validate_pad(padtype, padlen, x, axis, + ntaps=max(len(a), len(b))) + + # Get the steady state of the filter's step response. + zi = lfilter_zi(b, a) + + # Reshape zi and create x0 so that zi*x0 broadcasts + # to the correct value for the 'zi' keyword argument + # to lfilter. + zi_shape = [1] * x.ndim + zi_shape[axis] = zi.size + zi = np.reshape(zi, zi_shape) + x0 = axis_slice(ext, stop=1, axis=axis) + + # Forward filter. + (y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0) + + # Backward filter. + # Create y0 so zi*y0 broadcasts appropriately. + y0 = axis_slice(y, start=-1, axis=axis) + (y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0) + + # Reverse y. + y = axis_reverse(y, axis=axis) + + if edge > 0: + # Slice the actual signal from the extended signal. + y = axis_slice(y, start=edge, stop=-edge, axis=axis) + + return y + + +def _validate_pad(padtype, padlen, x, axis, ntaps): + """Helper to validate padding for filtfilt""" + if padtype not in ['even', 'odd', 'constant', None]: + raise ValueError(f"Unknown value '{padtype}' given to padtype. " + "padtype must be 'even', 'odd', 'constant', or None.") + + if padtype is None: + padlen = 0 + + if padlen is None: + # Original padding; preserved for backwards compatibility. + edge = ntaps * 3 + else: + edge = padlen + + # x's 'axis' dimension must be bigger than edge. + if x.shape[axis] <= edge: + raise ValueError("The length of the input vector x must be greater " + "than padlen, which is %d." % edge) + + if padtype is not None and edge > 0: + # Make an extension of length `edge` at each + # end of the input array. + if padtype == 'even': + ext = even_ext(x, edge, axis=axis) + elif padtype == 'odd': + ext = odd_ext(x, edge, axis=axis) + else: + ext = const_ext(x, edge, axis=axis) + else: + ext = x + return edge, ext + + +def _validate_x(x): + x = np.asarray(x) + if x.ndim == 0: + raise ValueError('x must be at least 1-D') + return x + + +def sosfilt(sos, x, axis=-1, zi=None): + """ + Filter data along one dimension using cascaded second-order sections. + + Filter a data sequence, `x`, using a digital IIR filter defined by + `sos`. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + x : array_like + An N-dimensional input array. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + zi : array_like, optional + Initial conditions for the cascaded filter delays. It is a (at + least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where + ``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]`` + replaced by 2. If `zi` is None or is not given then initial rest + (i.e. all zeros) is assumed. + Note that these initial conditions are *not* the same as the initial + conditions given by `lfiltic` or `lfilter_zi`. + + Returns + ------- + y : ndarray + The output of the digital filter. + zf : ndarray, optional + If `zi` is None, this is not returned, otherwise, `zf` holds the + final filter delay values. + + See Also + -------- + zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, freqz_sos + + Notes + ----- + The filter function is implemented as a series of second-order filters + with direct-form II transposed structure. It is designed to minimize + numerical precision errors for high-order filters. + + .. versionadded:: 0.16.0 + + Examples + -------- + Plot a 13th-order filter's impulse response using both `lfilter` and + `sosfilt`, showing the instability that results from trying to do a + 13th-order filter in a single stage (the numerical error pushes some poles + outside of the unit circle): + + >>> import matplotlib.pyplot as plt + >>> from scipy import signal + >>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba') + >>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos') + >>> x = signal.unit_impulse(700) + >>> y_tf = signal.lfilter(b, a, x) + >>> y_sos = signal.sosfilt(sos, x) + >>> plt.plot(y_tf, 'r', label='TF') + >>> plt.plot(y_sos, 'k', label='SOS') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + _reject_objects(sos, 'sosfilt') + _reject_objects(x, 'sosfilt') + if zi is not None: + _reject_objects(zi, 'sosfilt') + + x = _validate_x(x) + sos, n_sections = _validate_sos(sos) + x_zi_shape = list(x.shape) + x_zi_shape[axis] = 2 + x_zi_shape = tuple([n_sections] + x_zi_shape) + inputs = [sos, x] + if zi is not None: + inputs.append(np.asarray(zi)) + dtype = np.result_type(*inputs) + if dtype.char not in 'fdgFDGO': + raise NotImplementedError(f"input type '{dtype}' not supported") + if zi is not None: + zi = np.array(zi, dtype) # make a copy so that we can operate in place + if zi.shape != x_zi_shape: + raise ValueError('Invalid zi shape. With axis=%r, an input with ' + 'shape %r, and an sos array with %d sections, zi ' + 'must have shape %r, got %r.' % + (axis, x.shape, n_sections, x_zi_shape, zi.shape)) + return_zi = True + else: + zi = np.zeros(x_zi_shape, dtype=dtype) + return_zi = False + axis = axis % x.ndim # make positive + x = np.moveaxis(x, axis, -1) + zi = np.moveaxis(zi, [0, axis + 1], [-2, -1]) + x_shape, zi_shape = x.shape, zi.shape + x = np.reshape(x, (-1, x.shape[-1])) + x = np.array(x, dtype, order='C') # make a copy, can modify in place + zi = np.ascontiguousarray(np.reshape(zi, (-1, n_sections, 2))) + sos = sos.astype(dtype, copy=False) + _sosfilt(sos, x, zi) + x.shape = x_shape + x = np.moveaxis(x, -1, axis) + if return_zi: + zi.shape = zi_shape + zi = np.moveaxis(zi, [-2, -1], [0, axis + 1]) + out = (x, zi) + else: + out = x + return out + + +def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None): + """ + A forward-backward digital filter using cascaded second-order sections. + + See `filtfilt` for more complete information about this method. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + x : array_like + The array of data to be filtered. + axis : int, optional + The axis of `x` to which the filter is applied. + Default is -1. + padtype : str or None, optional + Must be 'odd', 'even', 'constant', or None. This determines the + type of extension to use for the padded signal to which the filter + is applied. If `padtype` is None, no padding is used. The default + is 'odd'. + padlen : int or None, optional + The number of elements by which to extend `x` at both ends of + `axis` before applying the filter. This value must be less than + ``x.shape[axis] - 1``. ``padlen=0`` implies no padding. + The default value is:: + + 3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(), + (sos[:, 5] == 0).sum())) + + The extra subtraction at the end attempts to compensate for poles + and zeros at the origin (e.g. for odd-order filters) to yield + equivalent estimates of `padlen` to those of `filtfilt` for + second-order section filters built with `scipy.signal` functions. + + Returns + ------- + y : ndarray + The filtered output with the same shape as `x`. + + See Also + -------- + filtfilt, sosfilt, sosfilt_zi, freqz_sos + + Notes + ----- + .. versionadded:: 0.18.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import sosfiltfilt, butter + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Create an interesting signal to filter. + + >>> n = 201 + >>> t = np.linspace(0, 1, n) + >>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*rng.standard_normal(n) + + Create a lowpass Butterworth filter, and use it to filter `x`. + + >>> sos = butter(4, 0.125, output='sos') + >>> y = sosfiltfilt(sos, x) + + For comparison, apply an 8th order filter using `sosfilt`. The filter + is initialized using the mean of the first four values of `x`. + + >>> from scipy.signal import sosfilt, sosfilt_zi + >>> sos8 = butter(8, 0.125, output='sos') + >>> zi = x[:4].mean() * sosfilt_zi(sos8) + >>> y2, zo = sosfilt(sos8, x, zi=zi) + + Plot the results. Note that the phase of `y` matches the input, while + `y2` has a significant phase delay. + + >>> plt.plot(t, x, alpha=0.5, label='x(t)') + >>> plt.plot(t, y, label='y(t)') + >>> plt.plot(t, y2, label='y2(t)') + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.grid(alpha=0.25) + >>> plt.xlabel('t') + >>> plt.show() + + """ + sos, n_sections = _validate_sos(sos) + x = _validate_x(x) + + # `method` is "pad"... + ntaps = 2 * n_sections + 1 + ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum()) + edge, ext = _validate_pad(padtype, padlen, x, axis, + ntaps=ntaps) + + # These steps follow the same form as filtfilt with modifications + zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...) + zi_shape = [1] * x.ndim + zi_shape[axis] = 2 + zi.shape = [n_sections] + zi_shape + x_0 = axis_slice(ext, stop=1, axis=axis) + (y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0) + y_0 = axis_slice(y, start=-1, axis=axis) + (y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0) + y = axis_reverse(y, axis=axis) + if edge > 0: + y = axis_slice(y, start=edge, stop=-edge, axis=axis) + return y + + +def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True): + """ + Downsample the signal after applying an anti-aliasing filter. + + By default, an order 8 Chebyshev type I filter is used. A 30 point FIR + filter with Hamming window is used if `ftype` is 'fir'. + + Parameters + ---------- + x : array_like + The signal to be downsampled, as an N-dimensional array. + q : int + The downsampling factor. When using IIR downsampling, it is recommended + to call `decimate` multiple times for downsampling factors higher than + 13. + n : int, optional + The order of the filter (1 less than the length for 'fir'). Defaults to + 8 for 'iir' and 20 times the downsampling factor for 'fir'. + ftype : str {'iir', 'fir'} or ``dlti`` instance, optional + If 'iir' or 'fir', specifies the type of lowpass filter. If an instance + of an `dlti` object, uses that object to filter before downsampling. + axis : int, optional + The axis along which to decimate. + zero_phase : bool, optional + Prevent phase shift by filtering with `filtfilt` instead of `lfilter` + when using an IIR filter, and shifting the outputs back by the filter's + group delay when using an FIR filter. The default value of ``True`` is + recommended, since a phase shift is generally not desired. + + .. versionadded:: 0.18.0 + + Returns + ------- + y : ndarray + The down-sampled signal. + + See Also + -------- + resample : Resample up or down using the FFT method. + resample_poly : Resample using polyphase filtering and an FIR filter. + + Notes + ----- + The ``zero_phase`` keyword was added in 0.18.0. + The possibility to use instances of ``dlti`` as ``ftype`` was added in + 0.18.0. + + Examples + -------- + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Define wave parameters. + + >>> wave_duration = 3 + >>> sample_rate = 100 + >>> freq = 2 + >>> q = 5 + + Calculate number of samples. + + >>> samples = wave_duration*sample_rate + >>> samples_decimated = int(samples/q) + + Create cosine wave. + + >>> x = np.linspace(0, wave_duration, samples, endpoint=False) + >>> y = np.cos(x*np.pi*freq*2) + + Decimate cosine wave. + + >>> ydem = signal.decimate(y, q) + >>> xnew = np.linspace(0, wave_duration, samples_decimated, endpoint=False) + + Plot original and decimated waves. + + >>> plt.plot(x, y, '.-', xnew, ydem, 'o-') + >>> plt.xlabel('Time, Seconds') + >>> plt.legend(['data', 'decimated'], loc='best') + >>> plt.show() + + """ + + x = np.asarray(x) + q = operator.index(q) + + if n is not None: + n = operator.index(n) + + result_type = x.dtype + if not np.issubdtype(result_type, np.inexact) \ + or result_type.type == np.float16: + # upcast integers and float16 to float64 + result_type = np.float64 + + if ftype == 'fir': + if n is None: + half_len = 10 * q # reasonable cutoff for our sinc-like function + n = 2 * half_len + b, a = firwin(n+1, 1. / q, window='hamming'), 1. + b = np.asarray(b, dtype=result_type) + a = np.asarray(a, dtype=result_type) + elif ftype == 'iir': + iir_use_sos = True + if n is None: + n = 8 + sos = cheby1(n, 0.05, 0.8 / q, output='sos') + sos = np.asarray(sos, dtype=result_type) + elif isinstance(ftype, dlti): + system = ftype._as_zpk() + if system.poles.shape[0] == 0: + # FIR + system = ftype._as_tf() + b, a = system.num, system.den + ftype = 'fir' + elif (any(np.iscomplex(system.poles)) + or any(np.iscomplex(system.poles)) + or np.iscomplex(system.gain)): + # sosfilt & sosfiltfilt don't handle complex coeffs + iir_use_sos = False + system = ftype._as_tf() + b, a = system.num, system.den + else: + iir_use_sos = True + sos = zpk2sos(system.zeros, system.poles, system.gain) + sos = np.asarray(sos, dtype=result_type) + else: + raise ValueError('invalid ftype') + + sl = [slice(None)] * x.ndim + + if ftype == 'fir': + b = b / a + if zero_phase: + y = resample_poly(x, 1, q, axis=axis, window=b) + else: + # upfirdn is generally faster than lfilter by a factor equal to the + # downsampling factor, since it only calculates the needed outputs + n_out = x.shape[axis] // q + bool(x.shape[axis] % q) + y = upfirdn(b, x, up=1, down=q, axis=axis) + sl[axis] = slice(None, n_out, None) + + else: # IIR case + if zero_phase: + if iir_use_sos: + y = sosfiltfilt(sos, x, axis=axis) + else: + y = filtfilt(b, a, x, axis=axis) + else: + if iir_use_sos: + y = sosfilt(sos, x, axis=axis) + else: + y = lfilter(b, a, x, axis=axis) + + sl[axis] = slice(None, None, q) + + return y[tuple(sl)] diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_sigtools.cpython-310-x86_64-linux-gnu.so b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_sigtools.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9697a8a7dda74e71dadf9522d0ba943d2946a581 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_sigtools.cpython-310-x86_64-linux-gnu.so differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spectral_py.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spectral_py.py new file mode 100644 index 0000000000000000000000000000000000000000..5151b2b335172a485b5d13408d35290812cadc43 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spectral_py.py @@ -0,0 +1,2291 @@ +"""Tools for spectral analysis. +""" +import numpy as np +import numpy.typing as npt +from scipy import fft as sp_fft +from . import _signaltools +from .windows import get_window +from ._arraytools import const_ext, even_ext, odd_ext, zero_ext +import warnings +from typing import Literal + + +__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence', + 'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA'] + + +def lombscargle( + x: npt.ArrayLike, + y: npt.ArrayLike, + freqs: npt.ArrayLike, + precenter: bool = False, + normalize: bool | Literal["power", "normalize", "amplitude"] = False, + *, + weights: npt.NDArray | None = None, + floating_mean: bool = False, +) -> npt.NDArray: + """ + Compute the generalized Lomb-Scargle periodogram. + + The Lomb-Scargle periodogram was developed by Lomb [1]_ and further + extended by Scargle [2]_ to find, and test the significance of weak + periodic signals with uneven temporal sampling. The algorithm used + here is based on a weighted least-squares fit of the form + ``y(ω) = a*cos(ω*x) + b*sin(ω*x) + c``, where the fit is calculated for + each frequency independently. This algorithm was developed by Zechmeister + and Kürster which improves the Lomb-Scargle periodogram by enabling + the weighting of individual samples and calculating an unknown y offset + (also called a "floating-mean" model) [3]_. For more details, and practical + considerations, see the excellent reference on the Lomb-Scargle periodogram [4]_. + + When *normalize* is False (or "power") (default) the computed periodogram + is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic + signal with amplitude A for sufficiently large N. Where N is the length of x or y. + + When *normalize* is True (or "normalize") the computed periodogram is normalized + by the residuals of the data around a constant reference model (at zero). + + When *normalize* is "amplitude" the computed periodogram is the complex + representation of the amplitude and phase. + + Input arrays should be 1-D of a real floating data type, which are converted into + float64 arrays before processing. + + Parameters + ---------- + x : array_like + Sample times. + y : array_like + Measurement values. Values are assumed to have a baseline of ``y = 0``. If + there is a possibility of a y offset, it is recommended to set `floating_mean` + to True. + freqs : array_like + Angular frequencies (e.g., having unit rad/s=2π/s for `x` having unit s) for + output periodogram. Frequencies are normally >= 0, as any peak at ``-freq`` will + also exist at ``+freq``. + precenter : bool, optional + Pre-center measurement values by subtracting the mean, if True. This is + a legacy parameter and unnecessary if `floating_mean` is True. + normalize : bool | str, optional + Compute normalized or complex (amplitude + phase) periodogram. + Valid options are: ``False``/``"power"``, ``True``/``"normalize"``, or + ``"amplitude"``. + weights : array_like, optional + Weights for each sample. Weights must be nonnegative. + floating_mean : bool, optional + Determines a y offset for each frequency independently, if True. + Else the y offset is assumed to be `0`. + + Returns + ------- + pgram : array_like + Lomb-Scargle periodogram. + + Raises + ------ + ValueError + If any of the input arrays x, y, freqs, or weights are not 1D, or if any are + zero length. Or, if the input arrays x, y, and weights do not have the same + shape as each other. + ValueError + If any weight is < 0, or the sum of the weights is <= 0. + ValueError + If the normalize parameter is not one of the allowed options. + + See Also + -------- + periodogram: Power spectral density using a periodogram + welch: Power spectral density by Welch's method + csd: Cross spectral density by Welch's method + + Notes + ----- + The algorithm used will not automatically account for any unknown y offset, unless + floating_mean is True. Therefore, for most use cases, if there is a possibility of + a y offset, it is recommended to set floating_mean to True. If precenter is True, + it performs the operation ``y -= y.mean()``. However, precenter is a legacy + parameter, and unnecessary when floating_mean is True. Furthermore, the mean + removed by precenter does not account for sample weights, nor will it correct for + any bias due to consistently missing observations at peaks and/or troughs. When the + normalize parameter is "amplitude", for any frequency in freqs that is below + ``(2*pi)/(x.max() - x.min())``, the predicted amplitude will tend towards infinity. + The concept of a "Nyquist frequency" limit (see Nyquist-Shannon sampling theorem) + is not generally applicable to unevenly sampled data. Therefore, with unevenly + sampled data, valid frequencies in freqs can often be much higher than expected. + + References + ---------- + .. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced + data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976 + :doi:`10.1007/bf00648343` + + .. [2] J.D. Scargle "Studies in astronomical time series analysis. II - + Statistical aspects of spectral analysis of unevenly spaced data", + The Astrophysical Journal, vol 263, pp. 835-853, 1982 + :doi:`10.1086/160554` + + .. [3] M. Zechmeister and M. Kürster, "The generalised Lomb-Scargle periodogram. + A new formalism for the floating-mean and Keplerian periodograms," + Astronomy and Astrophysics, vol. 496, pp. 577-584, 2009 + :doi:`10.1051/0004-6361:200811296` + + .. [4] J.T. VanderPlas, "Understanding the Lomb-Scargle Periodogram," + The Astrophysical Journal Supplement Series, vol. 236, no. 1, p. 16, + May 2018 + :doi:`10.3847/1538-4365/aab766` + + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + + First define some input parameters for the signal: + + >>> A = 2. # amplitude + >>> c = 2. # offset + >>> w0 = 1. # rad/sec + >>> nin = 150 + >>> nout = 1002 + + Randomly generate sample times: + + >>> x = rng.uniform(0, 10*np.pi, nin) + + Plot a sine wave for the selected times: + + >>> y = A * np.cos(w0*x) + c + + Define the array of frequencies for which to compute the periodogram: + + >>> w = np.linspace(0.25, 10, nout) + + Calculate Lomb-Scargle periodogram for each of the normalize options: + + >>> from scipy.signal import lombscargle + >>> pgram_power = lombscargle(x, y, w, normalize=False) + >>> pgram_norm = lombscargle(x, y, w, normalize=True) + >>> pgram_amp = lombscargle(x, y, w, normalize='amplitude') + ... + >>> pgram_power_f = lombscargle(x, y, w, normalize=False, floating_mean=True) + >>> pgram_norm_f = lombscargle(x, y, w, normalize=True, floating_mean=True) + >>> pgram_amp_f = lombscargle(x, y, w, normalize='amplitude', floating_mean=True) + + Now make a plot of the input data: + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_t, ax_p, ax_n, ax_a) = plt.subplots(4, 1, figsize=(5, 6)) + >>> ax_t.plot(x, y, 'b+') + >>> ax_t.set_xlabel('Time [s]') + >>> ax_t.set_ylabel('Amplitude') + + Then plot the periodogram for each of the normalize options, as well as with and + without floating_mean=True: + + >>> ax_p.plot(w, pgram_power, label='default') + >>> ax_p.plot(w, pgram_power_f, label='floating_mean=True') + >>> ax_p.set_xlabel('Angular frequency [rad/s]') + >>> ax_p.set_ylabel('Power') + >>> ax_p.legend(prop={'size': 7}) + ... + >>> ax_n.plot(w, pgram_norm, label='default') + >>> ax_n.plot(w, pgram_norm_f, label='floating_mean=True') + >>> ax_n.set_xlabel('Angular frequency [rad/s]') + >>> ax_n.set_ylabel('Normalized') + >>> ax_n.legend(prop={'size': 7}) + ... + >>> ax_a.plot(w, np.abs(pgram_amp), label='default') + >>> ax_a.plot(w, np.abs(pgram_amp_f), label='floating_mean=True') + >>> ax_a.set_xlabel('Angular frequency [rad/s]') + >>> ax_a.set_ylabel('Amplitude') + >>> ax_a.legend(prop={'size': 7}) + ... + >>> plt.tight_layout() + >>> plt.show() + + """ + + # if no weights are provided, assume all data points are equally important + if weights is None: + weights = np.ones_like(y, dtype=np.float64) + else: + # if provided, make sure weights is an array and cast to float64 + weights = np.asarray(weights, dtype=np.float64) + + # make sure other inputs are arrays and cast to float64 + # done before validation, in case they were not arrays + x = np.asarray(x, dtype=np.float64) + y = np.asarray(y, dtype=np.float64) + freqs = np.asarray(freqs, dtype=np.float64) + + # validate input shapes + if not (x.ndim == 1 and x.size > 0 and x.shape == y.shape == weights.shape): + raise ValueError("Parameters x, y, weights must be 1-D arrays of " + "equal non-zero length!") + if not (freqs.ndim == 1 and freqs.size > 0): + raise ValueError("Parameter freqs must be a 1-D array of non-zero length!") + + # validate weights + if not (np.all(weights >= 0) and np.sum(weights) > 0): + raise ValueError("Parameter weights must have only non-negative entries " + "which sum to a positive value!") + + # validate normalize parameter + if isinstance(normalize, bool): + # if bool, convert to str literal + normalize = "normalize" if normalize else "power" + + if normalize not in ["power", "normalize", "amplitude"]: + raise ValueError( + "Normalize must be: False (or 'power'), True (or 'normalize'), " + "or 'amplitude'." + ) + + # weight vector must sum to 1 + weights *= 1.0 / weights.sum() + + # if requested, perform precenter + if precenter: + y -= y.mean() + + # transform arrays + # row vector + freqs = freqs.reshape(1, -1) + # column vectors + x = x.reshape(-1, 1) + y = y.reshape(-1, 1) + weights = weights.reshape(-1, 1) + + # store frequent intermediates + weights_y = weights * y + freqst = freqs * x + coswt = np.cos(freqst) + sinwt = np.sin(freqst) + + Y = np.dot(weights.T, y) # Eq. 7 + CC = np.dot(weights.T, coswt * coswt) # Eq. 13 + SS = 1.0 - CC # trig identity: S^2 = 1 - C^2 Eq.14 + CS = np.dot(weights.T, coswt * sinwt) # Eq. 15 + + if floating_mean: + C = np.dot(weights.T, coswt) # Eq. 8 + S = np.dot(weights.T, sinwt) # Eq. 9 + CC -= C * C # Eq. 13 + SS -= S * S # Eq. 14 + CS -= C * S # Eq. 15 + + # calculate tau (phase offset to eliminate CS variable) + tau = 0.5 * np.arctan2(2.0 * CS, CC - SS) # Eq. 19 + freqst_tau = freqst - tau + + # coswt and sinwt are now offset by tau, which eliminates CS + coswt_tau = np.cos(freqst_tau) + sinwt_tau = np.sin(freqst_tau) + + YC = np.dot(weights_y.T, coswt_tau) # Eq. 11 + YS = np.dot(weights_y.T, sinwt_tau) # Eq. 12 + CC = np.dot(weights.T, coswt_tau * coswt_tau) # Eq. 13, CC range is [0.5, 1.0] + SS = 1.0 - CC # trig identity: S^2 = 1 - C^2 Eq. 14, SS range is [0.0, 0.5] + + if floating_mean: + C = np.dot(weights.T, coswt_tau) # Eq. 8 + S = np.dot(weights.T, sinwt_tau) # Eq. 9 + YC -= Y * C # Eq. 11 + YS -= Y * S # Eq. 12 + CC -= C * C # Eq. 13, CC range is now [0.0, 1.0] + SS -= S * S # Eq. 14, SS range is now [0.0, 0.5] + + # to prevent division by zero errors with a and b, as well as correcting for + # numerical precision errors that lead to CC or SS being approximately -0.0, + # make sure CC and SS are both > 0 + epsneg = np.finfo(dtype=y.dtype).epsneg + CC[CC < epsneg] = epsneg + SS[SS < epsneg] = epsneg + + # calculate a and b + # where: y(w) = a*cos(w) + b*sin(w) + c + a = YC / CC # Eq. A.4 and 6, eliminating CS + b = YS / SS # Eq. A.4 and 6, eliminating CS + # c = Y - a * C - b * S + + # store final value as power in A^2 (i.e., (y units)^2) + pgram = 2.0 * (a * YC + b * YS) + + # squeeze back to a vector + pgram = np.squeeze(pgram) + + if normalize == "power": # (default) + # return the legacy power units ((A**2) * N/4) + + pgram *= float(x.shape[0]) / 4.0 + + elif normalize == "normalize": + # return the normalized power (power at current frequency wrt the entire signal) + # range will be [0, 1] + + YY = np.dot(weights_y.T, y) # Eq. 10 + if floating_mean: + YY -= Y * Y # Eq. 10 + + pgram *= 0.5 / np.squeeze(YY) # Eq. 20 + + else: # normalize == "amplitude": + # return the complex representation of the best-fit amplitude and phase + + # squeeze back to vectors + a = np.squeeze(a) + b = np.squeeze(b) + tau = np.squeeze(tau) + + # calculate the complex representation, and correct for tau rotation + pgram = (a + 1j * b) * np.exp(1j * tau) + + return pgram + + +def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant', + return_onesided=True, scaling='density', axis=-1): + """ + Estimate power spectral density using a periodogram. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be equal to the length + of the axis over which the periodogram is computed. Defaults + to 'boxcar'. + nfft : int, optional + Length of the FFT used. If `None` the length of `x` will be + used. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the power spectral density ('density') + where `Pxx` has units of V**2/Hz and computing the squared magnitude + spectrum ('spectrum') where `Pxx` has units of V**2, if `x` + is measured in V and `fs` is measured in Hz. Defaults to + 'density' + axis : int, optional + Axis along which the periodogram is computed; the default is + over the last axis (i.e. ``axis=-1``). + + Returns + ------- + f : ndarray + Array of sample frequencies. + Pxx : ndarray + Power spectral density or power spectrum of `x`. + + See Also + -------- + welch: Estimate power spectral density using Welch's method + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + + Notes + ----- + Consult the :ref:`tutorial_SpectralAnalysis` section of the :ref:`user_guide` + for a discussion of the scalings of the power spectral density and + the magnitude (squared) spectrum. + + .. versionadded:: 0.12.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by + 0.001 V**2/Hz of white noise sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2*np.sqrt(2) + >>> freq = 1234.0 + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / fs + >>> x = amp*np.sin(2*np.pi*freq*time) + >>> x += rng.normal(scale=np.sqrt(noise_power), size=time.shape) + + Compute and plot the power spectral density. + + >>> f, Pxx_den = signal.periodogram(x, fs) + >>> plt.semilogy(f, Pxx_den) + >>> plt.ylim([1e-7, 1e2]) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('PSD [V**2/Hz]') + >>> plt.show() + + If we average the last half of the spectral density, to exclude the + peak, we can recover the noise power on the signal. + + >>> np.mean(Pxx_den[25000:]) + 0.000985320699252543 + + Now compute and plot the power spectrum. + + >>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum') + >>> plt.figure() + >>> plt.semilogy(f, np.sqrt(Pxx_spec)) + >>> plt.ylim([1e-4, 1e1]) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('Linear spectrum [V RMS]') + >>> plt.show() + + The peak height in the power spectrum is an estimate of the RMS + amplitude. + + >>> np.sqrt(Pxx_spec.max()) + 2.0077340678640727 + + """ + x = np.asarray(x) + + if x.size == 0: + return np.empty(x.shape), np.empty(x.shape) + + if window is None: + window = 'boxcar' + + if nfft is None: + nperseg = x.shape[axis] + elif nfft == x.shape[axis]: + nperseg = nfft + elif nfft > x.shape[axis]: + nperseg = x.shape[axis] + elif nfft < x.shape[axis]: + s = [np.s_[:]]*len(x.shape) + s[axis] = np.s_[:nfft] + x = x[tuple(s)] + nperseg = nfft + nfft = None + + if hasattr(window, 'size'): + if window.size != nperseg: + raise ValueError('the size of the window must be the same size ' + 'of the input on the specified axis') + + return welch(x, fs=fs, window=window, nperseg=nperseg, noverlap=0, + nfft=nfft, detrend=detrend, return_onesided=return_onesided, + scaling=scaling, axis=axis) + + +def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, + detrend='constant', return_onesided=True, scaling='density', + axis=-1, average='mean'): + r""" + Estimate power spectral density using Welch's method. + + Welch's method [1]_ computes an estimate of the power spectral + density by dividing the data into overlapping segments, computing a + modified periodogram for each segment and averaging the + periodograms. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the power spectral density ('density') + where `Pxx` has units of V**2/Hz and computing the squared magnitude + spectrum ('spectrum') where `Pxx` has units of V**2, if `x` + is measured in V and `fs` is measured in Hz. Defaults to + 'density' + axis : int, optional + Axis along which the periodogram is computed; the default is + over the last axis (i.e. ``axis=-1``). + average : { 'mean', 'median' }, optional + Method to use when averaging periodograms. Defaults to 'mean'. + + .. versionadded:: 1.2.0 + + Returns + ------- + f : ndarray + Array of sample frequencies. + Pxx : ndarray + Power spectral density or power spectrum of x. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + + Notes + ----- + An appropriate amount of overlap will depend on the choice of window + and on your requirements. For the default Hann window an overlap of + 50% is a reasonable trade off between accurately estimating the + signal power, while not over counting any of the data. Narrower + windows may require a larger overlap. + + If `noverlap` is 0, this method is equivalent to Bartlett's method + [2]_. + + Consult the :ref:`tutorial_SpectralAnalysis` section of the :ref:`user_guide` + for a discussion of the scalings of the power spectral density and + the (squared) magnitude spectrum. + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] P. Welch, "The use of the fast Fourier transform for the + estimation of power spectra: A method based on time averaging + over short, modified periodograms", IEEE Trans. Audio + Electroacoust. vol. 15, pp. 70-73, 1967. + .. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika, vol. 37, pp. 1-16, 1950. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by + 0.001 V**2/Hz of white noise sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2*np.sqrt(2) + >>> freq = 1234.0 + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / fs + >>> x = amp*np.sin(2*np.pi*freq*time) + >>> x += rng.normal(scale=np.sqrt(noise_power), size=time.shape) + + Compute and plot the power spectral density. + + >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024) + >>> plt.semilogy(f, Pxx_den) + >>> plt.ylim([0.5e-3, 1]) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('PSD [V**2/Hz]') + >>> plt.show() + + If we average the last half of the spectral density, to exclude the + peak, we can recover the noise power on the signal. + + >>> np.mean(Pxx_den[256:]) + 0.0009924865443739191 + + Now compute and plot the power spectrum. + + >>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum') + >>> plt.figure() + >>> plt.semilogy(f, np.sqrt(Pxx_spec)) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('Linear spectrum [V RMS]') + >>> plt.show() + + The peak height in the power spectrum is an estimate of the RMS + amplitude. + + >>> np.sqrt(Pxx_spec.max()) + 2.0077340678640727 + + If we now introduce a discontinuity in the signal, by increasing the + amplitude of a small portion of the signal by 50, we can see the + corruption of the mean average power spectral density, but using a + median average better estimates the normal behaviour. + + >>> x[int(N//2):int(N//2)+10] *= 50. + >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024) + >>> f_med, Pxx_den_med = signal.welch(x, fs, nperseg=1024, average='median') + >>> plt.semilogy(f, Pxx_den, label='mean') + >>> plt.semilogy(f_med, Pxx_den_med, label='median') + >>> plt.ylim([0.5e-3, 1]) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('PSD [V**2/Hz]') + >>> plt.legend() + >>> plt.show() + + """ + freqs, Pxx = csd(x, x, fs=fs, window=window, nperseg=nperseg, + noverlap=noverlap, nfft=nfft, detrend=detrend, + return_onesided=return_onesided, scaling=scaling, + axis=axis, average=average) + + return freqs, Pxx.real + + +def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, + detrend='constant', return_onesided=True, scaling='density', + axis=-1, average='mean'): + r""" + Estimate the cross power spectral density, Pxy, using Welch's method. + + Parameters + ---------- + x : array_like + Time series of measurement values + y : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` and `y` time series. Defaults + to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap: int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the cross spectral density ('density') + where `Pxy` has units of V**2/Hz and computing the cross spectrum + ('spectrum') where `Pxy` has units of V**2, if `x` and `y` are + measured in V and `fs` is measured in Hz. Defaults to 'density' + axis : int, optional + Axis along which the CSD is computed for both inputs; the + default is over the last axis (i.e. ``axis=-1``). + average : { 'mean', 'median' }, optional + Method to use when averaging periodograms. If the spectrum is + complex, the average is computed separately for the real and + imaginary parts. Defaults to 'mean'. + + .. versionadded:: 1.2.0 + + Returns + ------- + f : ndarray + Array of sample frequencies. + Pxy : ndarray + Cross spectral density or cross power spectrum of x,y. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + welch: Power spectral density by Welch's method. [Equivalent to + csd(x,x)] + coherence: Magnitude squared coherence by Welch's method. + + Notes + ----- + By convention, Pxy is computed with the conjugate FFT of X + multiplied by the FFT of Y. + + If the input series differ in length, the shorter series will be + zero-padded to match. + + An appropriate amount of overlap will depend on the choice of window + and on your requirements. For the default Hann window an overlap of + 50% is a reasonable trade off between accurately estimating the + signal power, while not over counting any of the data. Narrower + windows may require a larger overlap. + + Consult the :ref:`tutorial_SpectralAnalysis` section of the :ref:`user_guide` + for a discussion of the scalings of a spectral density and an (amplitude) spectrum. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] P. Welch, "The use of the fast Fourier transform for the + estimation of power spectra: A method based on time averaging + over short, modified periodograms", IEEE Trans. Audio + Electroacoust. vol. 15, pp. 70-73, 1967. + .. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of + Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate two test signals with some common features. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 20 + >>> freq = 1234.0 + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / fs + >>> b, a = signal.butter(2, 0.25, 'low') + >>> x = rng.normal(scale=np.sqrt(noise_power), size=time.shape) + >>> y = signal.lfilter(b, a, x) + >>> x += amp*np.sin(2*np.pi*freq*time) + >>> y += rng.normal(scale=0.1*np.sqrt(noise_power), size=time.shape) + + Compute and plot the magnitude of the cross spectral density. + + >>> f, Pxy = signal.csd(x, y, fs, nperseg=1024) + >>> plt.semilogy(f, np.abs(Pxy)) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('CSD [V**2/Hz]') + >>> plt.show() + + """ + freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, + nfft, detrend, return_onesided, scaling, + axis, mode='psd') + + # Average over windows. + if len(Pxy.shape) >= 2 and Pxy.size > 0: + if Pxy.shape[-1] > 1: + if average == 'median': + # np.median must be passed real arrays for the desired result + bias = _median_bias(Pxy.shape[-1]) + if np.iscomplexobj(Pxy): + Pxy = (np.median(np.real(Pxy), axis=-1) + + 1j * np.median(np.imag(Pxy), axis=-1)) + else: + Pxy = np.median(Pxy, axis=-1) + Pxy /= bias + elif average == 'mean': + Pxy = Pxy.mean(axis=-1) + else: + raise ValueError(f'average must be "median" or "mean", got {average}') + else: + Pxy = np.reshape(Pxy, Pxy.shape[:-1]) + + return freqs, Pxy + + +def spectrogram(x, fs=1.0, window=('tukey', .25), nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1, mode='psd'): + """Compute a spectrogram with consecutive Fourier transforms (legacy function). + + Spectrograms can be used as a way of visualizing the change of a + nonstationary signal's frequency content over time. + + .. legacy:: function + + :class:`ShortTimeFFT` is a newer STFT / ISTFT implementation with more + features also including a :meth:`~ShortTimeFFT.spectrogram` method. + A :ref:`comparison ` between the + implementations can be found in the :ref:`tutorial_stft` section of + the :ref:`user_guide`. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. + Defaults to a Tukey window with shape parameter of 0.25. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 8``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the power spectral density ('density') + where `Sxx` has units of V**2/Hz and computing the power + spectrum ('spectrum') where `Sxx` has units of V**2, if `x` + is measured in V and `fs` is measured in Hz. Defaults to + 'density'. + axis : int, optional + Axis along which the spectrogram is computed; the default is over + the last axis (i.e. ``axis=-1``). + mode : str, optional + Defines what kind of return values are expected. Options are + ['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is + equivalent to the output of `stft` with no padding or boundary + extension. 'magnitude' returns the absolute magnitude of the + STFT. 'angle' and 'phase' return the complex angle of the STFT, + with and without unwrapping, respectively. + + Returns + ------- + f : ndarray + Array of sample frequencies. + t : ndarray + Array of segment times. + Sxx : ndarray + Spectrogram of x. By default, the last axis of Sxx corresponds + to the segment times. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + welch: Power spectral density by Welch's method. + csd: Cross spectral density by Welch's method. + ShortTimeFFT: Newer STFT/ISTFT implementation providing more features, + which also includes a :meth:`~ShortTimeFFT.spectrogram` + method. + + Notes + ----- + An appropriate amount of overlap will depend on the choice of window + and on your requirements. In contrast to welch's method, where the + entire data stream is averaged over, one may wish to use a smaller + overlap (or perhaps none at all) when computing a spectrogram, to + maintain some statistical independence between individual segments. + It is for this reason that the default window is a Tukey window with + 1/8th of a window's length overlap at each end. + + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck + "Discrete-Time Signal Processing", Prentice Hall, 1999. + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fftshift + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate a test signal, a 2 Vrms sine wave whose frequency is slowly + modulated around 3kHz, corrupted by white noise of exponentially + decreasing magnitude sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2 * np.sqrt(2) + >>> noise_power = 0.01 * fs / 2 + >>> time = np.arange(N) / float(fs) + >>> mod = 500*np.cos(2*np.pi*0.25*time) + >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod) + >>> noise = rng.normal(scale=np.sqrt(noise_power), size=time.shape) + >>> noise *= np.exp(-time/5) + >>> x = carrier + noise + + Compute and plot the spectrogram. + + >>> f, t, Sxx = signal.spectrogram(x, fs) + >>> plt.pcolormesh(t, f, Sxx, shading='gouraud') + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.show() + + Note, if using output that is not one sided, then use the following: + + >>> f, t, Sxx = signal.spectrogram(x, fs, return_onesided=False) + >>> plt.pcolormesh(t, fftshift(f), fftshift(Sxx, axes=0), shading='gouraud') + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.show() + + """ + modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase'] + if mode not in modelist: + raise ValueError(f'unknown value for mode {mode}, must be one of {modelist}') + + # need to set default for nperseg before setting default for noverlap below + window, nperseg = _triage_segments(window, nperseg, + input_length=x.shape[axis]) + + # Less overlap than welch, so samples are more statistically independent + if noverlap is None: + noverlap = nperseg // 8 + + if mode == 'psd': + freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg, + noverlap, nfft, detrend, + return_onesided, scaling, axis, + mode='psd') + + else: + freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg, + noverlap, nfft, detrend, + return_onesided, scaling, axis, + mode='stft') + + if mode == 'magnitude': + Sxx = np.abs(Sxx) + elif mode in ['angle', 'phase']: + Sxx = np.angle(Sxx) + if mode == 'phase': + # Sxx has one additional dimension for time strides + if axis < 0: + axis -= 1 + Sxx = np.unwrap(Sxx, axis=axis) + + # mode =='complex' is same as `stft`, doesn't need modification + + return freqs, time, Sxx + + +def check_COLA(window, nperseg, noverlap, tol=1e-10): + r"""Check whether the Constant OverLap Add (COLA) constraint is met. + + Parameters + ---------- + window : str or tuple or array_like + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. + nperseg : int + Length of each segment. + noverlap : int + Number of points to overlap between segments. + tol : float, optional + The allowed variance of a bin's weighted sum from the median bin + sum. + + Returns + ------- + verdict : bool + `True` if chosen combination satisfies COLA within `tol`, + `False` otherwise + + See Also + -------- + check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met + stft: Short Time Fourier Transform + istft: Inverse Short Time Fourier Transform + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT in + `istft`, it is sufficient that the signal windowing obeys the constraint of + "Constant OverLap Add" (COLA). This ensures that every point in the input + data is equally weighted, thereby avoiding aliasing and allowing full + reconstruction. + + Some examples of windows that satisfy COLA: + - Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ... + - Bartlett window at overlap of 1/2, 3/4, 5/6, ... + - Hann window at 1/2, 2/3, 3/4, ... + - Any Blackman family window at 2/3 overlap + - Any window with ``noverlap = nperseg-1`` + + A very comprehensive list of other windows may be found in [2]_, + wherein the COLA condition is satisfied when the "Amplitude + Flatness" is unity. + + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K + Publishing, 2011,ISBN 978-0-9745607-3-1. + .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and + spectral density estimation by the Discrete Fourier transform + (DFT), including a comprehensive list of window functions and + some new at-top windows", 2002, + http://hdl.handle.net/11858/00-001M-0000-0013-557A-5 + + Examples + -------- + >>> from scipy import signal + + Confirm COLA condition for rectangular window of 75% (3/4) overlap: + + >>> signal.check_COLA(signal.windows.boxcar(100), 100, 75) + True + + COLA is not true for 25% (1/4) overlap, though: + + >>> signal.check_COLA(signal.windows.boxcar(100), 100, 25) + False + + "Symmetrical" Hann window (for filter design) is not COLA: + + >>> signal.check_COLA(signal.windows.hann(120, sym=True), 120, 60) + False + + "Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for + overlap of 1/2, 2/3, 3/4, etc.: + + >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 60) + True + + >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 80) + True + + >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 90) + True + + """ + nperseg = int(nperseg) + + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + noverlap = int(noverlap) + + if isinstance(window, str) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if win.shape[0] != nperseg: + raise ValueError('window must have length of nperseg') + + step = nperseg - noverlap + binsums = sum(win[ii*step:(ii+1)*step] for ii in range(nperseg//step)) + + if nperseg % step != 0: + binsums[:nperseg % step] += win[-(nperseg % step):] + + deviation = binsums - np.median(binsums) + return np.max(np.abs(deviation)) < tol + + +def check_NOLA(window, nperseg, noverlap, tol=1e-10): + r"""Check whether the Nonzero Overlap Add (NOLA) constraint is met. + + Parameters + ---------- + window : str or tuple or array_like + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. + nperseg : int + Length of each segment. + noverlap : int + Number of points to overlap between segments. + tol : float, optional + The allowed variance of a bin's weighted sum from the median bin + sum. + + Returns + ------- + verdict : bool + `True` if chosen combination satisfies the NOLA constraint within + `tol`, `False` otherwise + + See Also + -------- + check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met + stft: Short Time Fourier Transform + istft: Inverse Short Time Fourier Transform + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT in + `istft`, the signal windowing must obey the constraint of "nonzero + overlap add" (NOLA): + + .. math:: \sum_{t}w^{2}[n-tH] \ne 0 + + for all :math:`n`, where :math:`w` is the window function, :math:`t` is the + frame index, and :math:`H` is the hop size (:math:`H` = `nperseg` - + `noverlap`). + + This ensures that the normalization factors in the denominator of the + overlap-add inversion equation are not zero. Only very pathological windows + will fail the NOLA constraint. + + .. versionadded:: 1.2.0 + + References + ---------- + .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K + Publishing, 2011,ISBN 978-0-9745607-3-1. + .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and + spectral density estimation by the Discrete Fourier transform + (DFT), including a comprehensive list of window functions and + some new at-top windows", 2002, + http://hdl.handle.net/11858/00-001M-0000-0013-557A-5 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + + Confirm NOLA condition for rectangular window of 75% (3/4) overlap: + + >>> signal.check_NOLA(signal.windows.boxcar(100), 100, 75) + True + + NOLA is also true for 25% (1/4) overlap: + + >>> signal.check_NOLA(signal.windows.boxcar(100), 100, 25) + True + + "Symmetrical" Hann window (for filter design) is also NOLA: + + >>> signal.check_NOLA(signal.windows.hann(120, sym=True), 120, 60) + True + + As long as there is overlap, it takes quite a pathological window to fail + NOLA: + + >>> w = np.ones(64, dtype="float") + >>> w[::2] = 0 + >>> signal.check_NOLA(w, 64, 32) + False + + If there is not enough overlap, a window with zeros at the ends will not + work: + + >>> signal.check_NOLA(signal.windows.hann(64), 64, 0) + False + >>> signal.check_NOLA(signal.windows.hann(64), 64, 1) + False + >>> signal.check_NOLA(signal.windows.hann(64), 64, 2) + True + + """ + nperseg = int(nperseg) + + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg') + if noverlap < 0: + raise ValueError('noverlap must be a nonnegative integer') + noverlap = int(noverlap) + + if isinstance(window, str) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if win.shape[0] != nperseg: + raise ValueError('window must have length of nperseg') + + step = nperseg - noverlap + binsums = sum(win[ii*step:(ii+1)*step]**2 for ii in range(nperseg//step)) + + if nperseg % step != 0: + binsums[:nperseg % step] += win[-(nperseg % step):]**2 + + return np.min(binsums) > tol + + +def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None, + detrend=False, return_onesided=True, boundary='zeros', padded=True, + axis=-1, scaling='spectrum'): + r"""Compute the Short Time Fourier Transform (legacy function). + + STFTs can be used as a way of quantifying the change of a + nonstationary signal's frequency and phase content over time. + + .. legacy:: function + + `ShortTimeFFT` is a newer STFT / ISTFT implementation with more + features. A :ref:`comparison ` between the + implementations can be found in the :ref:`tutorial_stft` section of the + :ref:`user_guide`. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to 256. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. When + specified, the COLA constraint must be met (see Notes below). + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to `False`. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + boundary : str or None, optional + Specifies whether the input signal is extended at both ends, and + how to generate the new values, in order to center the first + windowed segment on the first input point. This has the benefit + of enabling reconstruction of the first input point when the + employed window function starts at zero. Valid options are + ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to + 'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is + extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``. + padded : bool, optional + Specifies whether the input signal is zero-padded at the end to + make the signal fit exactly into an integer number of window + segments, so that all of the signal is included in the output. + Defaults to `True`. Padding occurs after boundary extension, if + `boundary` is not `None`, and `padded` is `True`, as is the + default. + axis : int, optional + Axis along which the STFT is computed; the default is over the + last axis (i.e. ``axis=-1``). + scaling: {'spectrum', 'psd'} + The default 'spectrum' scaling allows each frequency line of `Zxx` to + be interpreted as a magnitude spectrum. The 'psd' option scales each + line to a power spectral density - it allows to calculate the signal's + energy by numerically integrating over ``abs(Zxx)**2``. + + .. versionadded:: 1.9.0 + + Returns + ------- + f : ndarray + Array of sample frequencies. + t : ndarray + Array of segment times. + Zxx : ndarray + STFT of `x`. By default, the last axis of `Zxx` corresponds + to the segment times. + + See Also + -------- + istft: Inverse Short Time Fourier Transform + ShortTimeFFT: Newer STFT/ISTFT implementation providing more features. + check_COLA: Check whether the Constant OverLap Add (COLA) constraint + is met + check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met + welch: Power spectral density by Welch's method. + spectrogram: Spectrogram by Welch's method. + csd: Cross spectral density by Welch's method. + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT in + `istft`, the signal windowing must obey the constraint of "Nonzero + OverLap Add" (NOLA), and the input signal must have complete + windowing coverage (i.e. ``(x.shape[axis] - nperseg) % + (nperseg-noverlap) == 0``). The `padded` argument may be used to + accomplish this. + + Given a time-domain signal :math:`x[n]`, a window :math:`w[n]`, and a hop + size :math:`H` = `nperseg - noverlap`, the windowed frame at time index + :math:`t` is given by + + .. math:: x_{t}[n]=x[n]w[n-tH] + + The overlap-add (OLA) reconstruction equation is given by + + .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]} + + The NOLA constraint ensures that every normalization term that appears + in the denominator of the OLA reconstruction equation is nonzero. Whether a + choice of `window`, `nperseg`, and `noverlap` satisfy this constraint can + be tested with `check_NOLA`. + + + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck + "Discrete-Time Signal Processing", Prentice Hall, 1999. + .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from + Modified Short-Time Fourier Transform", IEEE 1984, + 10.1109/TASSP.1984.1164317 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate a test signal, a 2 Vrms sine wave whose frequency is slowly + modulated around 3kHz, corrupted by white noise of exponentially + decreasing magnitude sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2 * np.sqrt(2) + >>> noise_power = 0.01 * fs / 2 + >>> time = np.arange(N) / float(fs) + >>> mod = 500*np.cos(2*np.pi*0.25*time) + >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod) + >>> noise = rng.normal(scale=np.sqrt(noise_power), + ... size=time.shape) + >>> noise *= np.exp(-time/5) + >>> x = carrier + noise + + Compute and plot the STFT's magnitude. + + >>> f, t, Zxx = signal.stft(x, fs, nperseg=1000) + >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp, shading='gouraud') + >>> plt.title('STFT Magnitude') + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.show() + + Compare the energy of the signal `x` with the energy of its STFT: + + >>> E_x = sum(x**2) / fs # Energy of x + >>> # Calculate a two-sided STFT with PSD scaling: + >>> f, t, Zxx = signal.stft(x, fs, nperseg=1000, return_onesided=False, + ... scaling='psd') + >>> # Integrate numerically over abs(Zxx)**2: + >>> df, dt = f[1] - f[0], t[1] - t[0] + >>> E_Zxx = sum(np.sum(Zxx.real**2 + Zxx.imag**2, axis=0) * df) * dt + >>> # The energy is the same, but the numerical errors are quite large: + >>> np.isclose(E_x, E_Zxx, rtol=1e-2) + True + + """ + if scaling == 'psd': + scaling = 'density' + elif scaling != 'spectrum': + raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!") + + freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap, + nfft, detrend, return_onesided, + scaling=scaling, axis=axis, + mode='stft', boundary=boundary, + padded=padded) + + return freqs, time, Zxx + + +def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, + input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2, + scaling='spectrum'): + r"""Perform the inverse Short Time Fourier transform (legacy function). + + .. legacy:: function + + `ShortTimeFFT` is a newer STFT / ISTFT implementation with more + features. A :ref:`comparison ` between the + implementations can be found in the :ref:`tutorial_stft` section of the + :ref:`user_guide`. + + Parameters + ---------- + Zxx : array_like + STFT of the signal to be reconstructed. If a purely real array + is passed, it will be cast to a complex data type. + fs : float, optional + Sampling frequency of the time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. Must match the window used to generate the + STFT for faithful inversion. + nperseg : int, optional + Number of data points corresponding to each STFT segment. This + parameter must be specified if the number of data points per + segment is odd, or if the STFT was padded via ``nfft > + nperseg``. If `None`, the value depends on the shape of + `Zxx` and `input_onesided`. If `input_onesided` is `True`, + ``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise, + ``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`. + noverlap : int, optional + Number of points to overlap between segments. If `None`, half + of the segment length. Defaults to `None`. When specified, the + COLA constraint must be met (see Notes below), and should match + the parameter used to generate the STFT. Defaults to `None`. + nfft : int, optional + Number of FFT points corresponding to each STFT segment. This + parameter must be specified if the STFT was padded via ``nfft > + nperseg``. If `None`, the default values are the same as for + `nperseg`, detailed above, with one exception: if + `input_onesided` is True and + ``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on + that value. This case allows the proper inversion of an + odd-length unpadded STFT using ``nfft=None``. Defaults to + `None`. + input_onesided : bool, optional + If `True`, interpret the input array as one-sided FFTs, such + as is returned by `stft` with ``return_onesided=True`` and + `numpy.fft.rfft`. If `False`, interpret the input as a a + two-sided FFT. Defaults to `True`. + boundary : bool, optional + Specifies whether the input signal was extended at its + boundaries by supplying a non-`None` ``boundary`` argument to + `stft`. Defaults to `True`. + time_axis : int, optional + Where the time segments of the STFT is located; the default is + the last axis (i.e. ``axis=-1``). + freq_axis : int, optional + Where the frequency axis of the STFT is located; the default is + the penultimate axis (i.e. ``axis=-2``). + scaling: {'spectrum', 'psd'} + The default 'spectrum' scaling allows each frequency line of `Zxx` to + be interpreted as a magnitude spectrum. The 'psd' option scales each + line to a power spectral density - it allows to calculate the signal's + energy by numerically integrating over ``abs(Zxx)**2``. + + Returns + ------- + t : ndarray + Array of output data times. + x : ndarray + iSTFT of `Zxx`. + + See Also + -------- + stft: Short Time Fourier Transform + ShortTimeFFT: Newer STFT/ISTFT implementation providing more features. + check_COLA: Check whether the Constant OverLap Add (COLA) constraint + is met + check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT with + `istft`, the signal windowing must obey the constraint of "nonzero + overlap add" (NOLA): + + .. math:: \sum_{t}w^{2}[n-tH] \ne 0 + + This ensures that the normalization factors that appear in the denominator + of the overlap-add reconstruction equation + + .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]} + + are not zero. The NOLA constraint can be checked with the `check_NOLA` + function. + + An STFT which has been modified (via masking or otherwise) is not + guaranteed to correspond to a exactly realizible signal. This + function implements the iSTFT via the least-squares estimation + algorithm detailed in [2]_, which produces a signal that minimizes + the mean squared error between the STFT of the returned signal and + the modified STFT. + + + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck + "Discrete-Time Signal Processing", Prentice Hall, 1999. + .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from + Modified Short-Time Fourier Transform", IEEE 1984, + 10.1109/TASSP.1984.1164317 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by + 0.001 V**2/Hz of white noise sampled at 1024 Hz. + + >>> fs = 1024 + >>> N = 10*fs + >>> nperseg = 512 + >>> amp = 2 * np.sqrt(2) + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / float(fs) + >>> carrier = amp * np.sin(2*np.pi*50*time) + >>> noise = rng.normal(scale=np.sqrt(noise_power), + ... size=time.shape) + >>> x = carrier + noise + + Compute the STFT, and plot its magnitude + + >>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg) + >>> plt.figure() + >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp, shading='gouraud') + >>> plt.ylim([f[1], f[-1]]) + >>> plt.title('STFT Magnitude') + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.yscale('log') + >>> plt.show() + + Zero the components that are 10% or less of the carrier magnitude, + then convert back to a time series via inverse STFT + + >>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0) + >>> _, xrec = signal.istft(Zxx, fs) + + Compare the cleaned signal with the original and true carrier signals. + + >>> plt.figure() + >>> plt.plot(time, x, time, xrec, time, carrier) + >>> plt.xlim([2, 2.1]) + >>> plt.xlabel('Time [sec]') + >>> plt.ylabel('Signal') + >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier']) + >>> plt.show() + + Note that the cleaned signal does not start as abruptly as the original, + since some of the coefficients of the transient were also removed: + + >>> plt.figure() + >>> plt.plot(time, x, time, xrec, time, carrier) + >>> plt.xlim([0, 0.1]) + >>> plt.xlabel('Time [sec]') + >>> plt.ylabel('Signal') + >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier']) + >>> plt.show() + + """ + # Make sure input is an ndarray of appropriate complex dtype + Zxx = np.asarray(Zxx) + 0j + freq_axis = int(freq_axis) + time_axis = int(time_axis) + + if Zxx.ndim < 2: + raise ValueError('Input stft must be at least 2d!') + + if freq_axis == time_axis: + raise ValueError('Must specify differing time and frequency axes!') + + nseg = Zxx.shape[time_axis] + + if input_onesided: + # Assume even segment length + n_default = 2*(Zxx.shape[freq_axis] - 1) + else: + n_default = Zxx.shape[freq_axis] + + # Check windowing parameters + if nperseg is None: + nperseg = n_default + else: + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + if nfft is None: + if (input_onesided) and (nperseg == n_default + 1): + # Odd nperseg, no FFT padding + nfft = nperseg + else: + nfft = n_default + elif nfft < nperseg: + raise ValueError('nfft must be greater than or equal to nperseg.') + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg//2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + nstep = nperseg - noverlap + + # Rearrange axes if necessary + if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2: + # Turn negative indices to positive for the call to transpose + if freq_axis < 0: + freq_axis = Zxx.ndim + freq_axis + if time_axis < 0: + time_axis = Zxx.ndim + time_axis + zouter = list(range(Zxx.ndim)) + for ax in sorted([time_axis, freq_axis], reverse=True): + zouter.pop(ax) + Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis]) + + # Get window as array + if isinstance(window, str) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if win.shape[0] != nperseg: + raise ValueError(f'window must have length of {nperseg}') + + ifunc = sp_fft.irfft if input_onesided else sp_fft.ifft + xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :] + + # Initialize output and normalization arrays + outputlength = nperseg + (nseg-1)*nstep + x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype) + norm = np.zeros(outputlength, dtype=xsubs.dtype) + + if np.result_type(win, xsubs) != xsubs.dtype: + win = win.astype(xsubs.dtype) + + if scaling == 'spectrum': + xsubs *= win.sum() + elif scaling == 'psd': + xsubs *= np.sqrt(fs * sum(win**2)) + else: + raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!") + + # Construct the output from the ifft segments + # This loop could perhaps be vectorized/strided somehow... + for ii in range(nseg): + # Window the ifft + x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win + norm[..., ii*nstep:ii*nstep+nperseg] += win**2 + + # Remove extension points + if boundary: + x = x[..., nperseg//2:-(nperseg//2)] + norm = norm[..., nperseg//2:-(nperseg//2)] + + # Divide out normalization where non-tiny + if np.sum(norm > 1e-10) != len(norm): + warnings.warn( + "NOLA condition failed, STFT may not be invertible." + + (" Possibly due to missing boundary" if not boundary else ""), + stacklevel=2 + ) + x /= np.where(norm > 1e-10, norm, 1.0) + + if input_onesided: + x = x.real + + # Put axes back + if x.ndim > 1: + if time_axis != Zxx.ndim-1: + if freq_axis < time_axis: + time_axis -= 1 + x = np.moveaxis(x, -1, time_axis) + + time = np.arange(x.shape[0])/float(fs) + return time, x + + +def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', axis=-1): + r""" + Estimate the magnitude squared coherence estimate, Cxy, of + discrete-time signals X and Y using Welch's method. + + ``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power + spectral density estimates of X and Y, and `Pxy` is the cross + spectral density estimate of X and Y. + + Parameters + ---------- + x : array_like + Time series of measurement values + y : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` and `y` time series. Defaults + to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap: int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + axis : int, optional + Axis along which the coherence is computed for both inputs; the + default is over the last axis (i.e. ``axis=-1``). + + Returns + ------- + f : ndarray + Array of sample frequencies. + Cxy : ndarray + Magnitude squared coherence of x and y. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + welch: Power spectral density by Welch's method. + csd: Cross spectral density by Welch's method. + + Notes + ----- + An appropriate amount of overlap will depend on the choice of window + and on your requirements. For the default Hann window an overlap of + 50% is a reasonable trade off between accurately estimating the + signal power, while not over counting any of the data. Narrower + windows may require a larger overlap. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] P. Welch, "The use of the fast Fourier transform for the + estimation of power spectra: A method based on time averaging + over short, modified periodograms", IEEE Trans. Audio + Electroacoust. vol. 15, pp. 70-73, 1967. + .. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of + Signals" Prentice Hall, 2005 + + Examples + -------- + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + + Generate two test signals with some common features. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 20 + >>> freq = 1234.0 + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / fs + >>> b, a = signal.butter(2, 0.25, 'low') + >>> x = rng.normal(scale=np.sqrt(noise_power), size=time.shape) + >>> y = signal.lfilter(b, a, x) + >>> x += amp*np.sin(2*np.pi*freq*time) + >>> y += rng.normal(scale=0.1*np.sqrt(noise_power), size=time.shape) + + Compute and plot the coherence. + + >>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024) + >>> plt.semilogy(f, Cxy) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('Coherence') + >>> plt.show() + + """ + freqs, Pxx = welch(x, fs=fs, window=window, nperseg=nperseg, + noverlap=noverlap, nfft=nfft, detrend=detrend, + axis=axis) + _, Pyy = welch(y, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, + nfft=nfft, detrend=detrend, axis=axis) + _, Pxy = csd(x, y, fs=fs, window=window, nperseg=nperseg, + noverlap=noverlap, nfft=nfft, detrend=detrend, axis=axis) + + Cxy = np.abs(Pxy)**2 / Pxx / Pyy + + return freqs, Cxy + + +def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1, mode='psd', boundary=None, + padded=False): + """Calculate various forms of windowed FFTs for PSD, CSD, etc. + + This is a helper function that implements the commonality between + the stft, psd, csd, and spectrogram functions. It is not designed to + be called externally. The windows are not averaged over; the result + from each window is returned. + + Parameters + ---------- + x : array_like + Array or sequence containing the data to be analyzed. + y : array_like + Array or sequence containing the data to be analyzed. If this is + the same object in memory as `x` (i.e. ``_spectral_helper(x, + x, ...)``), the extra computations are spared. + fs : float, optional + Sampling frequency of the time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the cross spectral density ('density') + where `Pxy` has units of V**2/Hz and computing the cross + spectrum ('spectrum') where `Pxy` has units of V**2, if `x` + and `y` are measured in V and `fs` is measured in Hz. + Defaults to 'density' + axis : int, optional + Axis along which the FFTs are computed; the default is over the + last axis (i.e. ``axis=-1``). + mode: str {'psd', 'stft'}, optional + Defines what kind of return values are expected. Defaults to + 'psd'. + boundary : str or None, optional + Specifies whether the input signal is extended at both ends, and + how to generate the new values, in order to center the first + windowed segment on the first input point. This has the benefit + of enabling reconstruction of the first input point when the + employed window function starts at zero. Valid options are + ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to + `None`. + padded : bool, optional + Specifies whether the input signal is zero-padded at the end to + make the signal fit exactly into an integer number of window + segments, so that all of the signal is included in the output. + Defaults to `False`. Padding occurs after boundary extension, if + `boundary` is not `None`, and `padded` is `True`. + + Returns + ------- + freqs : ndarray + Array of sample frequencies. + t : ndarray + Array of times corresponding to each data segment + result : ndarray + Array of output data, contents dependent on *mode* kwarg. + + Notes + ----- + Adapted from matplotlib.mlab + + .. versionadded:: 0.16.0 + """ + if mode not in ['psd', 'stft']: + raise ValueError(f"Unknown value for mode {mode}, must be one of: " + "{'psd', 'stft'}") + + boundary_funcs = {'even': even_ext, + 'odd': odd_ext, + 'constant': const_ext, + 'zeros': zero_ext, + None: None} + + if boundary not in boundary_funcs: + raise ValueError(f"Unknown boundary option '{boundary}', " + f"must be one of: {list(boundary_funcs.keys())}") + + # If x and y are the same object we can save ourselves some computation. + same_data = y is x + + if not same_data and mode != 'psd': + raise ValueError("x and y must be equal if mode is 'stft'") + + axis = int(axis) + + # Ensure we have np.arrays, get outdtype + x = np.asarray(x) + if not same_data: + y = np.asarray(y) + outdtype = np.result_type(x, y, np.complex64) + else: + outdtype = np.result_type(x, np.complex64) + + if not same_data: + # Check if we can broadcast the outer axes together + xouter = list(x.shape) + youter = list(y.shape) + xouter.pop(axis) + youter.pop(axis) + try: + outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape + except ValueError as e: + raise ValueError('x and y cannot be broadcast together.') from e + + if same_data: + if x.size == 0: + return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape) + else: + if x.size == 0 or y.size == 0: + outshape = outershape + (min([x.shape[axis], y.shape[axis]]),) + emptyout = np.moveaxis(np.empty(outshape), -1, axis) + return emptyout, emptyout, emptyout + + if x.ndim > 1: + if axis != -1: + x = np.moveaxis(x, axis, -1) + if not same_data and y.ndim > 1: + y = np.moveaxis(y, axis, -1) + + # Check if x and y are the same length, zero-pad if necessary + if not same_data: + if x.shape[-1] != y.shape[-1]: + if x.shape[-1] < y.shape[-1]: + pad_shape = list(x.shape) + pad_shape[-1] = y.shape[-1] - x.shape[-1] + x = np.concatenate((x, np.zeros(pad_shape)), -1) + else: + pad_shape = list(y.shape) + pad_shape[-1] = x.shape[-1] - y.shape[-1] + y = np.concatenate((y, np.zeros(pad_shape)), -1) + + if nperseg is not None: # if specified by user + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + # parse window; if array like, then set nperseg = win.shape + win, nperseg = _triage_segments(window, nperseg, input_length=x.shape[-1]) + + if nfft is None: + nfft = nperseg + elif nfft < nperseg: + raise ValueError('nfft must be greater than or equal to nperseg.') + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg//2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + nstep = nperseg - noverlap + + # Padding occurs after boundary extension, so that the extended signal ends + # in zeros, instead of introducing an impulse at the end. + # I.e. if x = [..., 3, 2] + # extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0] + # pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3] + + if boundary is not None: + ext_func = boundary_funcs[boundary] + x = ext_func(x, nperseg//2, axis=-1) + if not same_data: + y = ext_func(y, nperseg//2, axis=-1) + + if padded: + # Pad to integer number of windowed segments + # I.e. make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg + nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg + zeros_shape = list(x.shape[:-1]) + [nadd] + x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1) + if not same_data: + zeros_shape = list(y.shape[:-1]) + [nadd] + y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1) + + # Handle detrending and window functions + if not detrend: + def detrend_func(d): + return d + elif not hasattr(detrend, '__call__'): + def detrend_func(d): + return _signaltools.detrend(d, type=detrend, axis=-1) + elif axis != -1: + # Wrap this function so that it receives a shape that it could + # reasonably expect to receive. + def detrend_func(d): + d = np.moveaxis(d, -1, axis) + d = detrend(d) + return np.moveaxis(d, axis, -1) + else: + detrend_func = detrend + + if np.result_type(win, np.complex64) != outdtype: + win = win.astype(outdtype) + + if scaling == 'density': + scale = 1.0 / (fs * (win*win).sum()) + elif scaling == 'spectrum': + scale = 1.0 / win.sum()**2 + else: + raise ValueError(f'Unknown scaling: {scaling!r}') + + if mode == 'stft': + scale = np.sqrt(scale) + + if return_onesided: + if np.iscomplexobj(x): + sides = 'twosided' + warnings.warn('Input data is complex, switching to return_onesided=False', + stacklevel=3) + else: + sides = 'onesided' + if not same_data: + if np.iscomplexobj(y): + sides = 'twosided' + warnings.warn('Input data is complex, switching to ' + 'return_onesided=False', + stacklevel=3) + else: + sides = 'twosided' + + if sides == 'twosided': + freqs = sp_fft.fftfreq(nfft, 1/fs) + elif sides == 'onesided': + freqs = sp_fft.rfftfreq(nfft, 1/fs) + + # Perform the windowed FFTs + result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides) + + if not same_data: + # All the same operations on the y data + result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft, + sides) + result = np.conjugate(result) * result_y + elif mode == 'psd': + result = np.conjugate(result) * result + + result *= scale + if sides == 'onesided' and mode == 'psd': + if nfft % 2: + result[..., 1:] *= 2 + else: + # Last point is unpaired Nyquist freq point, don't double + result[..., 1:-1] *= 2 + + time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, + nperseg - noverlap)/float(fs) + if boundary is not None: + time -= (nperseg/2) / fs + + result = result.astype(outdtype) + + # All imaginary parts are zero anyways + if same_data and mode != 'stft': + result = result.real + + # Output is going to have new last axis for time/window index, so a + # negative axis index shifts down one + if axis < 0: + axis -= 1 + + # Roll frequency axis back to axis where the data came from + result = np.moveaxis(result, -1, axis) + + return freqs, time, result + + +def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides): + """ + Calculate windowed FFT, for internal use by + `scipy.signal._spectral_helper`. + + This is a helper function that does the main FFT calculation for + `_spectral helper`. All input validation is performed there, and the + data axis is assumed to be the last axis of x. It is not designed to + be called externally. The windows are not averaged over; the result + from each window is returned. + + Returns + ------- + result : ndarray + Array of FFT data + + Notes + ----- + Adapted from matplotlib.mlab + + .. versionadded:: 0.16.0 + """ + # Created sliding window view of array + if nperseg == 1 and noverlap == 0: + result = x[..., np.newaxis] + else: + step = nperseg - noverlap + result = np.lib.stride_tricks.sliding_window_view( + x, window_shape=nperseg, axis=-1, writeable=True + ) + result = result[..., 0::step, :] + + # Detrend each data segment individually + result = detrend_func(result) + + # Apply window by multiplication + result = win * result + + # Perform the fft. Acts on last axis by default. Zero-pads automatically + if sides == 'twosided': + func = sp_fft.fft + else: + result = result.real + func = sp_fft.rfft + result = func(result, n=nfft) + + return result + + +def _triage_segments(window, nperseg, input_length): + """ + Parses window and nperseg arguments for spectrogram and _spectral_helper. + This is a helper function, not meant to be called externally. + + Parameters + ---------- + window : string, tuple, or ndarray + If window is specified by a string or tuple and nperseg is not + specified, nperseg is set to the default of 256 and returns a window of + that length. + If instead the window is array_like and nperseg is not specified, then + nperseg is set to the length of the window. A ValueError is raised if + the user supplies both an array_like window and a value for nperseg but + nperseg does not equal the length of the window. + + nperseg : int + Length of each segment + + input_length: int + Length of input signal, i.e. x.shape[-1]. Used to test for errors. + + Returns + ------- + win : ndarray + window. If function was called with string or tuple than this will hold + the actual array used as a window. + + nperseg : int + Length of each segment. If window is str or tuple, nperseg is set to + 256. If window is array_like, nperseg is set to the length of the + window. + """ + # parse window; if array like, then set nperseg = win.shape + if isinstance(window, str) or isinstance(window, tuple): + # if nperseg not specified + if nperseg is None: + nperseg = 256 # then change to default + if nperseg > input_length: + warnings.warn(f'nperseg = {nperseg:d} is greater than input length ' + f' = {input_length:d}, using nperseg = {input_length:d}', + stacklevel=3) + nperseg = input_length + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if input_length < win.shape[-1]: + raise ValueError('window is longer than input signal') + if nperseg is None: + nperseg = win.shape[0] + elif nperseg is not None: + if nperseg != win.shape[0]: + raise ValueError("value specified for nperseg is different" + " from length of window") + return win, nperseg + + +def _median_bias(n): + """ + Returns the bias of the median of a set of periodograms relative to + the mean. + + See Appendix B from [1]_ for details. + + Parameters + ---------- + n : int + Numbers of periodograms being averaged. + + Returns + ------- + bias : float + Calculated bias. + + References + ---------- + .. [1] B. Allen, W.G. Anderson, P.R. Brady, D.A. Brown, J.D.E. Creighton. + "FINDCHIRP: an algorithm for detection of gravitational waves from + inspiraling compact binaries", Physical Review D 85, 2012, + :arxiv:`gr-qc/0509116` + """ + ii_2 = 2 * np.arange(1., (n-1) // 2 + 1) + return 1 + np.sum(1. / (ii_2 + 1) - 1. / ii_2) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c776c84deedabf62573fdee3f6b0bea1089d0092 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline.pyi b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c4225577db7ea188a2add225ecec1fbec855de06 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline.pyi @@ -0,0 +1,34 @@ + +import numpy as np +from numpy.typing import NDArray + +FloatingArray = NDArray[np.float32] | NDArray[np.float64] +ComplexArray = NDArray[np.complex64] | NDArray[np.complex128] +FloatingComplexArray = FloatingArray | ComplexArray + + +def symiirorder1_ic(signal: FloatingComplexArray, + c0: float, + z1: float, + precision: float) -> FloatingComplexArray: + ... + + +def symiirorder2_ic_fwd(signal: FloatingArray, + r: float, + omega: float, + precision: float) -> FloatingArray: + ... + + +def symiirorder2_ic_bwd(signal: FloatingArray, + r: float, + omega: float, + precision: float) -> FloatingArray: + ... + + +def sepfir2d(input: FloatingComplexArray, + hrow: FloatingComplexArray, + hcol: FloatingComplexArray) -> FloatingComplexArray: + ... diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline_filters.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline_filters.py new file mode 100644 index 0000000000000000000000000000000000000000..eb7884c5cc5544e9fc6d37476016e7a2d0e81d57 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline_filters.py @@ -0,0 +1,808 @@ +from numpy import (asarray, pi, zeros_like, + array, arctan2, tan, ones, arange, floor, + r_, atleast_1d, sqrt, exp, greater, cos, add, sin, + moveaxis, abs, arctan, complex64, float32) +import numpy as np + +from scipy._lib._util import normalize_axis_index + +# From splinemodule.c +from ._spline import sepfir2d, symiirorder1_ic, symiirorder2_ic_fwd, symiirorder2_ic_bwd +from ._signaltools import lfilter, sosfilt, lfiltic +from ._arraytools import axis_slice, axis_reverse + +from scipy.interpolate import BSpline + + +__all__ = ['spline_filter', 'gauss_spline', + 'cspline1d', 'qspline1d', 'qspline2d', 'cspline2d', + 'cspline1d_eval', 'qspline1d_eval', 'symiirorder1', 'symiirorder2'] + + +def spline_filter(Iin, lmbda=5.0): + """Smoothing spline (cubic) filtering of a rank-2 array. + + Filter an input data set, `Iin`, using a (cubic) smoothing spline of + fall-off `lmbda`. + + Parameters + ---------- + Iin : array_like + input data set + lmbda : float, optional + spline smoothing fall-off value, default is `5.0`. + + Returns + ------- + res : ndarray + filtered input data + + Examples + -------- + We can filter an multi dimensional signal (ex: 2D image) using cubic + B-spline filter: + + >>> import numpy as np + >>> from scipy.signal import spline_filter + >>> import matplotlib.pyplot as plt + >>> orig_img = np.eye(20) # create an image + >>> orig_img[10, :] = 1.0 + >>> sp_filter = spline_filter(orig_img, lmbda=0.1) + >>> f, ax = plt.subplots(1, 2, sharex=True) + >>> for ind, data in enumerate([[orig_img, "original image"], + ... [sp_filter, "spline filter"]]): + ... ax[ind].imshow(data[0], cmap='gray_r') + ... ax[ind].set_title(data[1]) + >>> plt.tight_layout() + >>> plt.show() + + """ + if Iin.dtype not in [np.float32, np.float64, np.complex64, np.complex128]: + raise TypeError(f"Invalid data type for Iin: {Iin.dtype = }") + + # XXX: note that complex-valued computations are done in single precision + # this is historic, and the root reason is unclear, + # see https://github.com/scipy/scipy/issues/9209 + # Attempting to work in complex double precision leads to symiirorder1 + # failing to converge for the boundary conditions. + intype = Iin.dtype + hcol = array([1.0, 4.0, 1.0], np.float32) / 6.0 + if intype == np.complex128: + Iin = Iin.astype(np.complex64) + + ck = cspline2d(Iin, lmbda) + out = sepfir2d(ck, hcol, hcol) + out = out.astype(intype) + return out + + +_splinefunc_cache = {} + + +def gauss_spline(x, n): + r"""Gaussian approximation to B-spline basis function of order n. + + Parameters + ---------- + x : array_like + a knot vector + n : int + The order of the spline. Must be non-negative, i.e., n >= 0 + + Returns + ------- + res : ndarray + B-spline basis function values approximated by a zero-mean Gaussian + function. + + Notes + ----- + The B-spline basis function can be approximated well by a zero-mean + Gaussian function with standard-deviation equal to :math:`\sigma=(n+1)/12` + for large `n` : + + .. math:: \frac{1}{\sqrt {2\pi\sigma^2}}exp(-\frac{x^2}{2\sigma}) + + References + ---------- + .. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen + F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In: + Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational + Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer + Science, vol 4485. Springer, Berlin, Heidelberg + .. [2] http://folk.uio.no/inf3330/scripting/doc/python/SciPy/tutorial/old/node24.html + + Examples + -------- + We can calculate B-Spline basis functions approximated by a gaussian + distribution: + + >>> import numpy as np + >>> from scipy.signal import gauss_spline + >>> knots = np.array([-1.0, 0.0, -1.0]) + >>> gauss_spline(knots, 3) + array([0.15418033, 0.6909883, 0.15418033]) # may vary + + """ + x = asarray(x) + signsq = (n + 1) / 12.0 + return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq) + + +def _cubic(x): + x = asarray(x, dtype=float) + b = BSpline.basis_element([-2, -1, 0, 1, 2], extrapolate=False) + out = b(x) + out[(x < -2) | (x > 2)] = 0 + return out + + +def _quadratic(x): + x = abs(asarray(x, dtype=float)) + b = BSpline.basis_element([-1.5, -0.5, 0.5, 1.5], extrapolate=False) + out = b(x) + out[(x < -1.5) | (x > 1.5)] = 0 + return out + + +def _coeff_smooth(lam): + xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam) + omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi)) + rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam) + rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi) + return rho, omeg + + +def _hc(k, cs, rho, omega): + return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) * + greater(k, -1)) + + +def _hs(k, cs, rho, omega): + c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) / + (1 - 2 * rho * rho * cos(2 * omega) + rho ** 4)) + gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega) + ak = abs(k) + return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak)) + + +def _cubic_smooth_coeff(signal, lamb): + rho, omega = _coeff_smooth(lamb) + cs = 1 - 2 * rho * cos(omega) + rho * rho + K = len(signal) + k = arange(K) + + zi_2 = (_hc(0, cs, rho, omega) * signal[0] + + add.reduce(_hc(k + 1, cs, rho, omega) * signal)) + zi_1 = (_hc(0, cs, rho, omega) * signal[0] + + _hc(1, cs, rho, omega) * signal[1] + + add.reduce(_hc(k + 2, cs, rho, omega) * signal)) + + # Forward filter: + # for n in range(2, K): + # yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] - + # rho * rho * yp[n - 2]) + zi = lfiltic(cs, r_[1, -2 * rho * cos(omega), rho * rho], r_[zi_1, zi_2]) + zi = zi.reshape(1, -1) + + sos = r_[cs, 0, 0, 1, -2 * rho * cos(omega), rho * rho] + sos = sos.reshape(1, -1) + + yp, _ = sosfilt(sos, signal[2:], zi=zi) + yp = r_[zi_2, zi_1, yp] + + # Reverse filter: + # for n in range(K - 3, -1, -1): + # y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] - + # rho * rho * y[n + 2]) + + zi_2 = add.reduce((_hs(k, cs, rho, omega) + + _hs(k + 1, cs, rho, omega)) * signal[::-1]) + zi_1 = add.reduce((_hs(k - 1, cs, rho, omega) + + _hs(k + 2, cs, rho, omega)) * signal[::-1]) + + zi = lfiltic(cs, r_[1, -2 * rho * cos(omega), rho * rho], r_[zi_1, zi_2]) + zi = zi.reshape(1, -1) + y, _ = sosfilt(sos, yp[-3::-1], zi=zi) + y = r_[y[::-1], zi_1, zi_2] + return y + + +def _cubic_coeff(signal): + zi = -2 + sqrt(3) + K = len(signal) + powers = zi ** arange(K) + + if K == 1: + yplus = signal[0] + zi * add.reduce(powers * signal) + output = zi / (zi - 1) * yplus + return atleast_1d(output) + + # Forward filter: + # yplus[0] = signal[0] + zi * add.reduce(powers * signal) + # for k in range(1, K): + # yplus[k] = signal[k] + zi * yplus[k - 1] + + state = lfiltic(1, r_[1, -zi], atleast_1d(add.reduce(powers * signal))) + + b = ones(1) + a = r_[1, -zi] + yplus, _ = lfilter(b, a, signal, zi=state) + + # Reverse filter: + # output[K - 1] = zi / (zi - 1) * yplus[K - 1] + # for k in range(K - 2, -1, -1): + # output[k] = zi * (output[k + 1] - yplus[k]) + out_last = zi / (zi - 1) * yplus[K - 1] + state = lfiltic(-zi, r_[1, -zi], atleast_1d(out_last)) + + b = asarray([-zi]) + output, _ = lfilter(b, a, yplus[-2::-1], zi=state) + output = r_[output[::-1], out_last] + return output * 6.0 + + +def _quadratic_coeff(signal): + zi = -3 + 2 * sqrt(2.0) + K = len(signal) + powers = zi ** arange(K) + + if K == 1: + yplus = signal[0] + zi * add.reduce(powers * signal) + output = zi / (zi - 1) * yplus + return atleast_1d(output) + + # Forward filter: + # yplus[0] = signal[0] + zi * add.reduce(powers * signal) + # for k in range(1, K): + # yplus[k] = signal[k] + zi * yplus[k - 1] + + state = lfiltic(1, r_[1, -zi], atleast_1d(add.reduce(powers * signal))) + + b = ones(1) + a = r_[1, -zi] + yplus, _ = lfilter(b, a, signal, zi=state) + + # Reverse filter: + # output[K - 1] = zi / (zi - 1) * yplus[K - 1] + # for k in range(K - 2, -1, -1): + # output[k] = zi * (output[k + 1] - yplus[k]) + out_last = zi / (zi - 1) * yplus[K - 1] + state = lfiltic(-zi, r_[1, -zi], atleast_1d(out_last)) + + b = asarray([-zi]) + output, _ = lfilter(b, a, yplus[-2::-1], zi=state) + output = r_[output[::-1], out_last] + return output * 8.0 + + +def compute_root_from_lambda(lamb): + tmp = sqrt(3 + 144 * lamb) + xi = 1 - 96 * lamb + 24 * lamb * tmp + omega = arctan(sqrt((144 * lamb - 1.0) / xi)) + tmp2 = sqrt(xi) + r = ((24 * lamb - 1 - tmp2) / (24 * lamb) * + sqrt(48*lamb + 24 * lamb * tmp) / tmp2) + return r, omega + + +def cspline1d(signal, lamb=0.0): + """ + Compute cubic spline coefficients for rank-1 array. + + Find the cubic spline coefficients for a 1-D signal assuming + mirror-symmetric boundary conditions. To obtain the signal back from the + spline representation mirror-symmetric-convolve these coefficients with a + length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 . + + Parameters + ---------- + signal : ndarray + A rank-1 array representing samples of a signal. + lamb : float, optional + Smoothing coefficient, default is 0.0. + + Returns + ------- + c : ndarray + Cubic spline coefficients. + + See Also + -------- + cspline1d_eval : Evaluate a cubic spline at the new set of points. + + Examples + -------- + We can filter a signal to reduce and smooth out high-frequency noise with + a cubic spline: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import cspline1d, cspline1d_eval + >>> rng = np.random.default_rng() + >>> sig = np.repeat([0., 1., 0.], 100) + >>> sig += rng.standard_normal(len(sig))*0.05 # add noise + >>> time = np.linspace(0, len(sig)) + >>> filtered = cspline1d_eval(cspline1d(sig), time) + >>> plt.plot(sig, label="signal") + >>> plt.plot(time, filtered, label="filtered") + >>> plt.legend() + >>> plt.show() + + """ + if lamb != 0.0: + return _cubic_smooth_coeff(signal, lamb) + else: + return _cubic_coeff(signal) + + +def qspline1d(signal, lamb=0.0): + """Compute quadratic spline coefficients for rank-1 array. + + Parameters + ---------- + signal : ndarray + A rank-1 array representing samples of a signal. + lamb : float, optional + Smoothing coefficient (must be zero for now). + + Returns + ------- + c : ndarray + Quadratic spline coefficients. + + See Also + -------- + qspline1d_eval : Evaluate a quadratic spline at the new set of points. + + Notes + ----- + Find the quadratic spline coefficients for a 1-D signal assuming + mirror-symmetric boundary conditions. To obtain the signal back from the + spline representation mirror-symmetric-convolve these coefficients with a + length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 . + + Examples + -------- + We can filter a signal to reduce and smooth out high-frequency noise with + a quadratic spline: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import qspline1d, qspline1d_eval + >>> rng = np.random.default_rng() + >>> sig = np.repeat([0., 1., 0.], 100) + >>> sig += rng.standard_normal(len(sig))*0.05 # add noise + >>> time = np.linspace(0, len(sig)) + >>> filtered = qspline1d_eval(qspline1d(sig), time) + >>> plt.plot(sig, label="signal") + >>> plt.plot(time, filtered, label="filtered") + >>> plt.legend() + >>> plt.show() + + """ + if lamb != 0.0: + raise ValueError("Smoothing quadratic splines not supported yet.") + else: + return _quadratic_coeff(signal) + + +def collapse_2d(x, axis): + x = moveaxis(x, axis, -1) + x_shape = x.shape + x = x.reshape(-1, x.shape[-1]) + if not x.flags.c_contiguous: + x = x.copy() + return x, x_shape + + +def symiirorder_nd(func, input, *args, axis=-1, **kwargs): + axis = normalize_axis_index(axis, input.ndim) + input_shape = input.shape + input_ndim = input.ndim + if input.ndim > 1: + input, input_shape = collapse_2d(input, axis) + + out = func(input, *args, **kwargs) + + if input_ndim > 1: + out = out.reshape(input_shape) + out = moveaxis(out, -1, axis) + if not out.flags.c_contiguous: + out = out.copy() + return out + + +def qspline2d(signal, lamb=0.0, precision=-1.0): + """ + Coefficients for 2-D quadratic (2nd order) B-spline. + + Return the second-order B-spline coefficients over a regularly spaced + input grid for the two-dimensional input image. + + Parameters + ---------- + input : ndarray + The input signal. + lamb : float + Specifies the amount of smoothing in the transfer function. + precision : float + Specifies the precision for computing the infinite sum needed to apply + mirror-symmetric boundary conditions. + + Returns + ------- + output : ndarray + The filtered signal. + """ + if precision < 0.0 or precision >= 1.0: + if signal.dtype in [float32, complex64]: + precision = 1e-3 + else: + precision = 1e-6 + + if lamb > 0: + raise ValueError('lambda must be negative or zero') + + # normal quadratic spline + r = -3 + 2 * sqrt(2.0) + c0 = -r * 8.0 + z1 = r + + out = symiirorder_nd(symiirorder1, signal, c0, z1, precision, axis=-1) + out = symiirorder_nd(symiirorder1, out, c0, z1, precision, axis=0) + return out + + +def cspline2d(signal, lamb=0.0, precision=-1.0): + """ + Coefficients for 2-D cubic (3rd order) B-spline. + + Return the third-order B-spline coefficients over a regularly spaced + input grid for the two-dimensional input image. + + Parameters + ---------- + input : ndarray + The input signal. + lamb : float + Specifies the amount of smoothing in the transfer function. + precision : float + Specifies the precision for computing the infinite sum needed to apply + mirror-symmetric boundary conditions. + + Returns + ------- + output : ndarray + The filtered signal. + """ + if precision < 0.0 or precision >= 1.0: + if signal.dtype in [float32, complex64]: + precision = 1e-3 + else: + precision = 1e-6 + + if lamb <= 1 / 144.0: + # Normal cubic spline + r = -2 + sqrt(3.0) + out = symiirorder_nd( + symiirorder1, signal, -r * 6.0, r, precision=precision, axis=-1) + out = symiirorder_nd( + symiirorder1, out, -r * 6.0, r, precision=precision, axis=0) + return out + + r, omega = compute_root_from_lambda(lamb) + out = symiirorder_nd(symiirorder2, signal, r, omega, + precision=precision, axis=-1) + out = symiirorder_nd(symiirorder2, out, r, omega, + precision=precision, axis=0) + return out + + +def cspline1d_eval(cj, newx, dx=1.0, x0=0): + """Evaluate a cubic spline at the new set of points. + + `dx` is the old sample-spacing while `x0` was the old origin. In + other-words the old-sample points (knot-points) for which the `cj` + represent spline coefficients were at equally-spaced points of: + + oldx = x0 + j*dx j=0...N-1, with N=len(cj) + + Edges are handled using mirror-symmetric boundary conditions. + + Parameters + ---------- + cj : ndarray + cublic spline coefficients + newx : ndarray + New set of points. + dx : float, optional + Old sample-spacing, the default value is 1.0. + x0 : int, optional + Old origin, the default value is 0. + + Returns + ------- + res : ndarray + Evaluated a cubic spline points. + + See Also + -------- + cspline1d : Compute cubic spline coefficients for rank-1 array. + + Examples + -------- + We can filter a signal to reduce and smooth out high-frequency noise with + a cubic spline: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import cspline1d, cspline1d_eval + >>> rng = np.random.default_rng() + >>> sig = np.repeat([0., 1., 0.], 100) + >>> sig += rng.standard_normal(len(sig))*0.05 # add noise + >>> time = np.linspace(0, len(sig)) + >>> filtered = cspline1d_eval(cspline1d(sig), time) + >>> plt.plot(sig, label="signal") + >>> plt.plot(time, filtered, label="filtered") + >>> plt.legend() + >>> plt.show() + + """ + newx = (asarray(newx) - x0) / float(dx) + res = zeros_like(newx, dtype=cj.dtype) + if res.size == 0: + return res + N = len(cj) + cond1 = newx < 0 + cond2 = newx > (N - 1) + cond3 = ~(cond1 | cond2) + # handle general mirror-symmetry + res[cond1] = cspline1d_eval(cj, -newx[cond1]) + res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2]) + newx = newx[cond3] + if newx.size == 0: + return res + result = zeros_like(newx, dtype=cj.dtype) + jlower = floor(newx - 2).astype(int) + 1 + for i in range(4): + thisj = jlower + i + indj = thisj.clip(0, N - 1) # handle edge cases + result += cj[indj] * _cubic(newx - thisj) + res[cond3] = result + return res + + +def qspline1d_eval(cj, newx, dx=1.0, x0=0): + """Evaluate a quadratic spline at the new set of points. + + Parameters + ---------- + cj : ndarray + Quadratic spline coefficients + newx : ndarray + New set of points. + dx : float, optional + Old sample-spacing, the default value is 1.0. + x0 : int, optional + Old origin, the default value is 0. + + Returns + ------- + res : ndarray + Evaluated a quadratic spline points. + + See Also + -------- + qspline1d : Compute quadratic spline coefficients for rank-1 array. + + Notes + ----- + `dx` is the old sample-spacing while `x0` was the old origin. In + other-words the old-sample points (knot-points) for which the `cj` + represent spline coefficients were at equally-spaced points of:: + + oldx = x0 + j*dx j=0...N-1, with N=len(cj) + + Edges are handled using mirror-symmetric boundary conditions. + + Examples + -------- + We can filter a signal to reduce and smooth out high-frequency noise with + a quadratic spline: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import qspline1d, qspline1d_eval + >>> rng = np.random.default_rng() + >>> sig = np.repeat([0., 1., 0.], 100) + >>> sig += rng.standard_normal(len(sig))*0.05 # add noise + >>> time = np.linspace(0, len(sig)) + >>> filtered = qspline1d_eval(qspline1d(sig), time) + >>> plt.plot(sig, label="signal") + >>> plt.plot(time, filtered, label="filtered") + >>> plt.legend() + >>> plt.show() + + """ + newx = (asarray(newx) - x0) / dx + res = zeros_like(newx) + if res.size == 0: + return res + N = len(cj) + cond1 = newx < 0 + cond2 = newx > (N - 1) + cond3 = ~(cond1 | cond2) + # handle general mirror-symmetry + res[cond1] = qspline1d_eval(cj, -newx[cond1]) + res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2]) + newx = newx[cond3] + if newx.size == 0: + return res + result = zeros_like(newx) + jlower = floor(newx - 1.5).astype(int) + 1 + for i in range(3): + thisj = jlower + i + indj = thisj.clip(0, N - 1) # handle edge cases + result += cj[indj] * _quadratic(newx - thisj) + res[cond3] = result + return res + + +def symiirorder1(signal, c0, z1, precision=-1.0): + """ + Implement a smoothing IIR filter with mirror-symmetric boundary conditions + using a cascade of first-order sections. + + The second section uses a reversed sequence. This implements a system with + the following transfer function and mirror-symmetric boundary conditions:: + + c0 + H(z) = --------------------- + (1-z1/z) (1 - z1 z) + + The resulting signal will have mirror symmetric boundary conditions + as well. + + Parameters + ---------- + signal : ndarray + The input signal. If 2D, then the filter will be applied in a batched + fashion across the last axis. + c0, z1 : scalar + Parameters in the transfer function. + precision : + Specifies the precision for calculating initial conditions + of the recursive filter based on mirror-symmetric input. + + Returns + ------- + output : ndarray + The filtered signal. + """ + if np.abs(z1) >= 1: + raise ValueError('|z1| must be less than 1.0') + + if signal.ndim > 2: + raise ValueError('Input must be 1D or 2D') + + squeeze_dim = False + if signal.ndim == 1: + signal = signal[None, :] + squeeze_dim = True + + if np.issubdtype(signal.dtype, np.integer): + signal = signal.astype(np.promote_types(signal.dtype, np.float32)) + + y0 = symiirorder1_ic(signal, z1, precision) + + # Apply first the system 1 / (1 - z1 * z^-1) + b = np.ones(1, dtype=signal.dtype) + a = np.r_[1, -z1] + a = a.astype(signal.dtype) + + # Compute the initial state for lfilter. + zii = y0 * z1 + + y1, _ = lfilter(b, a, axis_slice(signal, 1), zi=zii) + y1 = np.c_[y0, y1] + + # Compute backward symmetric condition and apply the system + # c0 / (1 - z1 * z) + b = np.asarray([c0], dtype=signal.dtype) + out_last = -c0 / (z1 - 1.0) * axis_slice(y1, -1) + + # Compute the initial state for lfilter. + zii = out_last * z1 + + # Apply the system c0 / (1 - z1 * z) by reversing the output of the previous stage + out, _ = lfilter(b, a, axis_slice(y1, -2, step=-1), zi=zii) + out = np.c_[axis_reverse(out), out_last] + + if squeeze_dim: + out = out[0] + + return out + + +def symiirorder2(input, r, omega, precision=-1.0): + """ + Implement a smoothing IIR filter with mirror-symmetric boundary conditions + using a cascade of second-order sections. + + The second section uses a reversed sequence. This implements the following + transfer function:: + + cs^2 + H(z) = --------------------------------------- + (1 - a2/z - a3/z^2) (1 - a2 z - a3 z^2 ) + + where:: + + a2 = 2 * r * cos(omega) + a3 = - r ** 2 + cs = 1 - 2 * r * cos(omega) + r ** 2 + + Parameters + ---------- + input : ndarray + The input signal. + r, omega : float + Parameters in the transfer function. + precision : float + Specifies the precision for calculating initial conditions + of the recursive filter based on mirror-symmetric input. + + Returns + ------- + output : ndarray + The filtered signal. + """ + if r >= 1.0: + raise ValueError('r must be less than 1.0') + + if input.ndim > 2: + raise ValueError('Input must be 1D or 2D') + + if not input.flags.c_contiguous: + input = input.copy() + + squeeze_dim = False + if input.ndim == 1: + input = input[None, :] + squeeze_dim = True + + if np.issubdtype(input.dtype, np.integer): + input = input.astype(np.promote_types(input.dtype, np.float32)) + + rsq = r * r + a2 = 2 * r * np.cos(omega) + a3 = -rsq + cs = np.atleast_1d(1 - 2 * r * np.cos(omega) + rsq) + sos = np.atleast_2d(np.r_[cs, 0, 0, 1, -a2, -a3]).astype(input.dtype) + + # Find the starting (forward) conditions. + ic_fwd = symiirorder2_ic_fwd(input, r, omega, precision) + + # Apply first the system cs / (1 - a2 * z^-1 - a3 * z^-2) + # Compute the initial conditions in the form expected by sosfilt + # coef = np.asarray([[a3, a2], [0, a3]], dtype=input.dtype) + coef = np.r_[a3, a2, 0, a3].reshape(2, 2).astype(input.dtype) + zi = np.matmul(coef, ic_fwd[:, :, None])[:, :, 0] + + y_fwd, _ = sosfilt(sos, axis_slice(input, 2), zi=zi[None]) + y_fwd = np.c_[ic_fwd, y_fwd] + + # Then compute the symmetric backward starting conditions + ic_bwd = symiirorder2_ic_bwd(input, r, omega, precision) + + # Apply the system cs / (1 - a2 * z^1 - a3 * z^2) + # Compute the initial conditions in the form expected by sosfilt + zi = np.matmul(coef, ic_bwd[:, :, None])[:, :, 0] + y, _ = sosfilt(sos, axis_slice(y_fwd, -3, step=-1), zi=zi[None]) + out = np.c_[axis_reverse(y), axis_reverse(ic_bwd)] + + if squeeze_dim: + out = out[0] + + return out diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_upfirdn.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_upfirdn.py new file mode 100644 index 0000000000000000000000000000000000000000..d64cc142ff194b1404e380507289ddbaffab3359 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_upfirdn.py @@ -0,0 +1,216 @@ +# Code adapted from "upfirdn" python library with permission: +# +# Copyright (c) 2009, Motorola, Inc +# +# All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of Motorola nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import numpy as np + +from ._upfirdn_apply import _output_len, _apply, mode_enum + +__all__ = ['upfirdn', '_output_len'] + +_upfirdn_modes = [ + 'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect', + 'antisymmetric', 'antireflect', 'line', +] + + +def _pad_h(h, up): + """Store coefficients in a transposed, flipped arrangement. + + For example, suppose upRate is 3, and the + input number of coefficients is 10, represented as h[0], ..., h[9]. + + Then the internal buffer will look like this:: + + h[9], h[6], h[3], h[0], // flipped phase 0 coefs + 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded) + 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded) + + """ + h_padlen = len(h) + (-len(h) % up) + h_full = np.zeros(h_padlen, h.dtype) + h_full[:len(h)] = h + h_full = h_full.reshape(-1, up).T[:, ::-1].ravel() + return h_full + + +def _check_mode(mode): + mode = mode.lower() + enum = mode_enum(mode) + return enum + + +class _UpFIRDn: + """Helper for resampling.""" + + def __init__(self, h, x_dtype, up, down): + h = np.asarray(h) + if h.ndim != 1 or h.size == 0: + raise ValueError('h must be 1-D with non-zero length') + self._output_type = np.result_type(h.dtype, x_dtype, np.float32) + h = np.asarray(h, self._output_type) + self._up = int(up) + self._down = int(down) + if self._up < 1 or self._down < 1: + raise ValueError('Both up and down must be >= 1') + # This both transposes, and "flips" each phase for filtering + self._h_trans_flip = _pad_h(h, self._up) + self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip) + self._h_len_orig = len(h) + + def apply_filter(self, x, axis=-1, mode='constant', cval=0): + """Apply the prepared filter to the specified axis of N-D signal x.""" + output_len = _output_len(self._h_len_orig, x.shape[axis], + self._up, self._down) + # Explicit use of np.int64 for output_shape dtype avoids OverflowError + # when allocating large array on platforms where intp is 32 bits. + output_shape = np.asarray(x.shape, dtype=np.int64) + output_shape[axis] = output_len + out = np.zeros(output_shape, dtype=self._output_type, order='C') + axis = axis % x.ndim + mode = _check_mode(mode) + _apply(np.asarray(x, self._output_type), + self._h_trans_flip, out, + self._up, self._down, axis, mode, cval) + return out + + +def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0): + """Upsample, FIR filter, and downsample. + + Parameters + ---------- + h : array_like + 1-D FIR (finite-impulse response) filter coefficients. + x : array_like + Input signal array. + up : int, optional + Upsampling rate. Default is 1. + down : int, optional + Downsampling rate. Default is 1. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + mode : str, optional + The signal extension mode to use. The set + ``{"constant", "symmetric", "reflect", "edge", "wrap"}`` correspond to + modes provided by `numpy.pad`. ``"smooth"`` implements a smooth + extension by extending based on the slope of the last 2 points at each + end of the array. ``"antireflect"`` and ``"antisymmetric"`` are + anti-symmetric versions of ``"reflect"`` and ``"symmetric"``. The mode + `"line"` extends the signal based on a linear trend defined by the + first and last points along the ``axis``. + + .. versionadded:: 1.4.0 + cval : float, optional + The constant value to use when ``mode == "constant"``. + + .. versionadded:: 1.4.0 + + Returns + ------- + y : ndarray + The output signal array. Dimensions will be the same as `x` except + for along `axis`, which will change size according to the `h`, + `up`, and `down` parameters. + + Notes + ----- + The algorithm is an implementation of the block diagram shown on page 129 + of the Vaidyanathan text [1]_ (Figure 4.3-8d). + + The direct approach of upsampling by factor of P with zero insertion, + FIR filtering of length ``N``, and downsampling by factor of Q is + O(N*Q) per output sample. The polyphase implementation used here is + O(N/P). + + .. versionadded:: 0.18 + + References + ---------- + .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks, + Prentice Hall, 1993. + + Examples + -------- + Simple operations: + + >>> import numpy as np + >>> from scipy.signal import upfirdn + >>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter + array([ 1., 2., 3., 2., 1.]) + >>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion + array([ 1., 0., 0., 2., 0., 0., 3.]) + >>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold + array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.]) + >>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation + array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5]) + >>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3 + array([ 0., 3., 6., 9.]) + >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3 + array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5]) + + Apply a single filter to multiple signals: + + >>> x = np.reshape(np.arange(8), (4, 2)) + >>> x + array([[0, 1], + [2, 3], + [4, 5], + [6, 7]]) + + Apply along the last dimension of ``x``: + + >>> h = [1, 1] + >>> upfirdn(h, x, 2) + array([[ 0., 0., 1., 1.], + [ 2., 2., 3., 3.], + [ 4., 4., 5., 5.], + [ 6., 6., 7., 7.]]) + + Apply along the 0th dimension of ``x``: + + >>> upfirdn(h, x, 2, axis=0) + array([[ 0., 1.], + [ 0., 1.], + [ 2., 3.], + [ 2., 3.], + [ 4., 5.], + [ 4., 5.], + [ 6., 7.], + [ 6., 7.]]) + """ + x = np.asarray(x) + ufd = _UpFIRDn(h, x.dtype, up, down) + # This is equivalent to (but faster than) using np.apply_along_axis + return ufd.apply_filter(x, axis, mode, cval) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_waveforms.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_waveforms.py new file mode 100644 index 0000000000000000000000000000000000000000..a6be46cfd38674ee8c3ae89c9762461440c1e620 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_waveforms.py @@ -0,0 +1,696 @@ +# Author: Travis Oliphant +# 2003 +# +# Feb. 2010: Updated by Warren Weckesser: +# Rewrote much of chirp() +# Added sweep_poly() +import numpy as np +from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \ + exp, cos, sin, polyval, polyint + + +__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly', + 'unit_impulse'] + + +def sawtooth(t, width=1): + """ + Return a periodic sawtooth or triangle waveform. + + The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the + interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval + ``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1]. + + Note that this is not band-limited. It produces an infinite number + of harmonics, which are aliased back and forth across the frequency + spectrum. + + Parameters + ---------- + t : array_like + Time. + width : array_like, optional + Width of the rising ramp as a proportion of the total cycle. + Default is 1, producing a rising ramp, while 0 produces a falling + ramp. `width` = 0.5 produces a triangle wave. + If an array, causes wave shape to change over time, and must be the + same length as t. + + Returns + ------- + y : ndarray + Output array containing the sawtooth waveform. + + Examples + -------- + A 5 Hz waveform sampled at 500 Hz for 1 second: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(0, 1, 500) + >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t)) + + """ + t, w = asarray(t), asarray(width) + w = asarray(w + (t - t)) + t = asarray(t + (w - w)) + if t.dtype.char in ['fFdD']: + ytype = t.dtype.char + else: + ytype = 'd' + y = zeros(t.shape, ytype) + + # width must be between 0 and 1 inclusive + mask1 = (w > 1) | (w < 0) + place(y, mask1, nan) + + # take t modulo 2*pi + tmod = mod(t, 2 * pi) + + # on the interval 0 to width*2*pi function is + # tmod / (pi*w) - 1 + mask2 = (1 - mask1) & (tmod < w * 2 * pi) + tsub = extract(mask2, tmod) + wsub = extract(mask2, w) + place(y, mask2, tsub / (pi * wsub) - 1) + + # on the interval width*2*pi to 2*pi function is + # (pi*(w+1)-tmod) / (pi*(1-w)) + + mask3 = (1 - mask1) & (1 - mask2) + tsub = extract(mask3, tmod) + wsub = extract(mask3, w) + place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub))) + return y + + +def square(t, duty=0.5): + """ + Return a periodic square-wave waveform. + + The square wave has a period ``2*pi``, has value +1 from 0 to + ``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in + the interval [0,1]. + + Note that this is not band-limited. It produces an infinite number + of harmonics, which are aliased back and forth across the frequency + spectrum. + + Parameters + ---------- + t : array_like + The input time array. + duty : array_like, optional + Duty cycle. Default is 0.5 (50% duty cycle). + If an array, causes wave shape to change over time, and must be the + same length as t. + + Returns + ------- + y : ndarray + Output array containing the square waveform. + + Examples + -------- + A 5 Hz waveform sampled at 500 Hz for 1 second: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(0, 1, 500, endpoint=False) + >>> plt.plot(t, signal.square(2 * np.pi * 5 * t)) + >>> plt.ylim(-2, 2) + + A pulse-width modulated sine wave: + + >>> plt.figure() + >>> sig = np.sin(2 * np.pi * t) + >>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2) + >>> plt.subplot(2, 1, 1) + >>> plt.plot(t, sig) + >>> plt.subplot(2, 1, 2) + >>> plt.plot(t, pwm) + >>> plt.ylim(-1.5, 1.5) + + """ + t, w = asarray(t), asarray(duty) + w = asarray(w + (t - t)) + t = asarray(t + (w - w)) + if t.dtype.char in ['fFdD']: + ytype = t.dtype.char + else: + ytype = 'd' + + y = zeros(t.shape, ytype) + + # width must be between 0 and 1 inclusive + mask1 = (w > 1) | (w < 0) + place(y, mask1, nan) + + # on the interval 0 to duty*2*pi function is 1 + tmod = mod(t, 2 * pi) + mask2 = (1 - mask1) & (tmod < w * 2 * pi) + place(y, mask2, 1) + + # on the interval duty*2*pi to 2*pi function is + # (pi*(w+1)-tmod) / (pi*(1-w)) + mask3 = (1 - mask1) & (1 - mask2) + place(y, mask3, -1) + return y + + +def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, + retenv=False): + """ + Return a Gaussian modulated sinusoid: + + ``exp(-a t^2) exp(1j*2*pi*fc*t).`` + + If `retquad` is True, then return the real and imaginary parts + (in-phase and quadrature). + If `retenv` is True, then return the envelope (unmodulated signal). + Otherwise, return the real part of the modulated sinusoid. + + Parameters + ---------- + t : ndarray or the string 'cutoff' + Input array. + fc : float, optional + Center frequency (e.g. Hz). Default is 1000. + bw : float, optional + Fractional bandwidth in frequency domain of pulse (e.g. Hz). + Default is 0.5. + bwr : float, optional + Reference level at which fractional bandwidth is calculated (dB). + Default is -6. + tpr : float, optional + If `t` is 'cutoff', then the function returns the cutoff + time for when the pulse amplitude falls below `tpr` (in dB). + Default is -60. + retquad : bool, optional + If True, return the quadrature (imaginary) as well as the real part + of the signal. Default is False. + retenv : bool, optional + If True, return the envelope of the signal. Default is False. + + Returns + ------- + yI : ndarray + Real part of signal. Always returned. + yQ : ndarray + Imaginary part of signal. Only returned if `retquad` is True. + yenv : ndarray + Envelope of signal. Only returned if `retenv` is True. + + Examples + -------- + Plot real component, imaginary component, and envelope for a 5 Hz pulse, + sampled at 100 Hz for 2 seconds: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(-1, 1, 2 * 100, endpoint=False) + >>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True) + >>> plt.plot(t, i, t, q, t, e, '--') + + """ + if fc < 0: + raise ValueError(f"Center frequency (fc={fc:.2f}) must be >=0.") + if bw <= 0: + raise ValueError(f"Fractional bandwidth (bw={bw:.2f}) must be > 0.") + if bwr >= 0: + raise ValueError(f"Reference level for bandwidth (bwr={bwr:.2f}) " + "must be < 0 dB") + + # exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f) + + ref = pow(10.0, bwr / 20.0) + # fdel = fc*bw/2: g(fdel) = ref --- solve this for a + # + # pi^2/a * fc^2 * bw^2 /4=-log(ref) + a = -(pi * fc * bw) ** 2 / (4.0 * log(ref)) + + if isinstance(t, str): + if t == 'cutoff': # compute cut_off point + # Solve exp(-a tc**2) = tref for tc + # tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20) + if tpr >= 0: + raise ValueError("Reference level for time cutoff must " + "be < 0 dB") + tref = pow(10.0, tpr / 20.0) + return sqrt(-log(tref) / a) + else: + raise ValueError("If `t` is a string, it must be 'cutoff'") + + yenv = exp(-a * t * t) + yI = yenv * cos(2 * pi * fc * t) + yQ = yenv * sin(2 * pi * fc * t) + if not retquad and not retenv: + return yI + if not retquad and retenv: + return yI, yenv + if retquad and not retenv: + return yI, yQ + if retquad and retenv: + return yI, yQ, yenv + + +def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True, *, + complex=False): + r"""Frequency-swept cosine generator. + + In the following, 'Hz' should be interpreted as 'cycles per unit'; + there is no requirement here that the unit is one second. The + important distinction is that the units of rotation are cycles, not + radians. Likewise, `t` could be a measurement of space instead of time. + + Parameters + ---------- + t : array_like + Times at which to evaluate the waveform. + f0 : float + Frequency (e.g. Hz) at time t=0. + t1 : float + Time at which `f1` is specified. + f1 : float + Frequency (e.g. Hz) of the waveform at time `t1`. + method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional + Kind of frequency sweep. If not given, `linear` is assumed. See + Notes below for more details. + phi : float, optional + Phase offset, in degrees. Default is 0. + vertex_zero : bool, optional + This parameter is only used when `method` is 'quadratic'. + It determines whether the vertex of the parabola that is the graph + of the frequency is at t=0 or t=t1. + complex : bool, optional + This parameter creates a complex-valued analytic signal instead of a + real-valued signal. It allows the use of complex baseband (in communications + domain). Default is False. + + .. versionadded:: 1.15.0 + + Returns + ------- + y : ndarray + A numpy array containing the signal evaluated at `t` with the requested + time-varying frequency. More precisely, the function returns + ``exp(1j*phase + 1j*(pi/180)*phi) if complex else cos(phase + (pi/180)*phi)`` + where `phase` is the integral (from 0 to `t`) of ``2*pi*f(t)``. + The instantaneous frequency ``f(t)`` is defined below. + + See Also + -------- + sweep_poly + + Notes + ----- + There are four possible options for the parameter `method`, which have a (long) + standard form and some allowed abbreviations. The formulas for the instantaneous + frequency :math:`f(t)` of the generated signal are as follows: + + 1. Parameter `method` in ``('linear', 'lin', 'li')``: + + .. math:: + f(t) = f_0 + \beta\, t \quad\text{with}\quad + \beta = \frac{f_1 - f_0}{t_1} + + Frequency :math:`f(t)` varies linearly over time with a constant rate + :math:`\beta`. + + 2. Parameter `method` in ``('quadratic', 'quad', 'q')``: + + .. math:: + f(t) = + \begin{cases} + f_0 + \beta\, t^2 & \text{if vertex_zero is True,}\\ + f_1 + \beta\, (t_1 - t)^2 & \text{otherwise,} + \end{cases} + \quad\text{with}\quad + \beta = \frac{f_1 - f_0}{t_1^2} + + The graph of the frequency f(t) is a parabola through :math:`(0, f_0)` and + :math:`(t_1, f_1)`. By default, the vertex of the parabola is at + :math:`(0, f_0)`. If `vertex_zero` is ``False``, then the vertex is at + :math:`(t_1, f_1)`. + To use a more general quadratic function, or an arbitrary + polynomial, use the function `scipy.signal.sweep_poly`. + + 3. Parameter `method` in ``('logarithmic', 'log', 'lo')``: + + .. math:: + f(t) = f_0 \left(\frac{f_1}{f_0}\right)^{t/t_1} + + :math:`f_0` and :math:`f_1` must be nonzero and have the same sign. + This signal is also known as a geometric or exponential chirp. + + 4. Parameter `method` in ``('hyperbolic', 'hyp')``: + + .. math:: + f(t) = \frac{\alpha}{\beta\, t + \gamma} \quad\text{with}\quad + \alpha = f_0 f_1 t_1, \ \beta = f_0 - f_1, \ \gamma = f_1 t_1 + + :math:`f_0` and :math:`f_1` must be nonzero. + + + Examples + -------- + For the first example, a linear chirp ranging from 6 Hz to 1 Hz over 10 seconds is + plotted: + + >>> import numpy as np + >>> from matplotlib.pyplot import tight_layout + >>> from scipy.signal import chirp, square, ShortTimeFFT + >>> from scipy.signal.windows import gaussian + >>> import matplotlib.pyplot as plt + ... + >>> N, T = 1000, 0.01 # number of samples and sampling interval for 10 s signal + >>> t = np.arange(N) * T # timestamps + ... + >>> x_lin = chirp(t, f0=6, f1=1, t1=10, method='linear') + ... + >>> fg0, ax0 = plt.subplots() + >>> ax0.set_title(r"Linear Chirp from $f(0)=6\,$Hz to $f(10)=1\,$Hz") + >>> ax0.set(xlabel="Time $t$ in Seconds", ylabel=r"Amplitude $x_\text{lin}(t)$") + >>> ax0.plot(t, x_lin) + >>> plt.show() + + The following four plots each show the short-time Fourier transform of a chirp + ranging from 45 Hz to 5 Hz with different values for the parameter `method` + (and `vertex_zero`): + + >>> x_qu0 = chirp(t, f0=45, f1=5, t1=N*T, method='quadratic', vertex_zero=True) + >>> x_qu1 = chirp(t, f0=45, f1=5, t1=N*T, method='quadratic', vertex_zero=False) + >>> x_log = chirp(t, f0=45, f1=5, t1=N*T, method='logarithmic') + >>> x_hyp = chirp(t, f0=45, f1=5, t1=N*T, method='hyperbolic') + ... + >>> win = gaussian(50, std=12, sym=True) + >>> SFT = ShortTimeFFT(win, hop=2, fs=1/T, mfft=800, scale_to='magnitude') + >>> ts = ("'quadratic', vertex_zero=True", "'quadratic', vertex_zero=False", + ... "'logarithmic'", "'hyperbolic'") + >>> fg1, ax1s = plt.subplots(2, 2, sharex='all', sharey='all', + ... figsize=(6, 5), layout="constrained") + >>> for x_, ax_, t_ in zip([x_qu0, x_qu1, x_log, x_hyp], ax1s.ravel(), ts): + ... aSx = abs(SFT.stft(x_)) + ... im_ = ax_.imshow(aSx, origin='lower', aspect='auto', extent=SFT.extent(N), + ... cmap='plasma') + ... ax_.set_title(t_) + ... if t_ == "'hyperbolic'": + ... fg1.colorbar(im_, ax=ax1s, label='Magnitude $|S_z(t,f)|$') + >>> _ = fg1.supxlabel("Time $t$ in Seconds") # `_ =` is needed to pass doctests + >>> _ = fg1.supylabel("Frequency $f$ in Hertz") + >>> plt.show() + + Finally, the short-time Fourier transform of a complex-valued linear chirp + ranging from -30 Hz to 30 Hz is depicted: + + >>> z_lin = chirp(t, f0=-30, f1=30, t1=N*T, method="linear", complex=True) + >>> SFT.fft_mode = 'centered' # needed to work with complex signals + >>> aSz = abs(SFT.stft(z_lin)) + ... + >>> fg2, ax2 = plt.subplots() + >>> ax2.set_title(r"Linear Chirp from $-30\,$Hz to $30\,$Hz") + >>> ax2.set(xlabel="Time $t$ in Seconds", ylabel="Frequency $f$ in Hertz") + >>> im2 = ax2.imshow(aSz, origin='lower', aspect='auto', + ... extent=SFT.extent(N), cmap='viridis') + >>> fg2.colorbar(im2, label='Magnitude $|S_z(t,f)|$') + >>> plt.show() + + Note that using negative frequencies makes only sense with complex-valued signals. + Furthermore, the magnitude of the complex exponential function is one whereas the + magnitude of the real-valued cosine function is only 1/2. + """ + # 'phase' is computed in _chirp_phase, to make testing easier. + phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero) + np.deg2rad(phi) + return np.exp(1j*phase) if complex else np.cos(phase) + + +def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True): + """ + Calculate the phase used by `chirp` to generate its output. + + See `chirp` for a description of the arguments. + + """ + t = asarray(t) + f0 = float(f0) + t1 = float(t1) + f1 = float(f1) + if method in ['linear', 'lin', 'li']: + beta = (f1 - f0) / t1 + phase = 2 * pi * (f0 * t + 0.5 * beta * t * t) + + elif method in ['quadratic', 'quad', 'q']: + beta = (f1 - f0) / (t1 ** 2) + if vertex_zero: + phase = 2 * pi * (f0 * t + beta * t ** 3 / 3) + else: + phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3) + + elif method in ['logarithmic', 'log', 'lo']: + if f0 * f1 <= 0.0: + raise ValueError("For a logarithmic chirp, f0 and f1 must be " + "nonzero and have the same sign.") + if f0 == f1: + phase = 2 * pi * f0 * t + else: + beta = t1 / log(f1 / f0) + phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0) + + elif method in ['hyperbolic', 'hyp']: + if f0 == 0 or f1 == 0: + raise ValueError("For a hyperbolic chirp, f0 and f1 must be " + "nonzero.") + if f0 == f1: + # Degenerate case: constant frequency. + phase = 2 * pi * f0 * t + else: + # Singular point: the instantaneous frequency blows up + # when t == sing. + sing = -f1 * t1 / (f0 - f1) + phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing)) + + else: + raise ValueError("method must be 'linear', 'quadratic', 'logarithmic', " + f"or 'hyperbolic', but a value of {method!r} was given.") + + return phase + + +def sweep_poly(t, poly, phi=0): + """ + Frequency-swept cosine generator, with a time-dependent frequency. + + This function generates a sinusoidal function whose instantaneous + frequency varies with time. The frequency at time `t` is given by + the polynomial `poly`. + + Parameters + ---------- + t : ndarray + Times at which to evaluate the waveform. + poly : 1-D array_like or instance of numpy.poly1d + The desired frequency expressed as a polynomial. If `poly` is + a list or ndarray of length n, then the elements of `poly` are + the coefficients of the polynomial, and the instantaneous + frequency is + + ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` + + If `poly` is an instance of numpy.poly1d, then the + instantaneous frequency is + + ``f(t) = poly(t)`` + + phi : float, optional + Phase offset, in degrees, Default: 0. + + Returns + ------- + sweep_poly : ndarray + A numpy array containing the signal evaluated at `t` with the + requested time-varying frequency. More precisely, the function + returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral + (from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above. + + See Also + -------- + chirp + + Notes + ----- + .. versionadded:: 0.8.0 + + If `poly` is a list or ndarray of length `n`, then the elements of + `poly` are the coefficients of the polynomial, and the instantaneous + frequency is: + + ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` + + If `poly` is an instance of `numpy.poly1d`, then the instantaneous + frequency is: + + ``f(t) = poly(t)`` + + Finally, the output `s` is: + + ``cos(phase + (pi/180)*phi)`` + + where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``, + ``f(t)`` as defined above. + + Examples + -------- + Compute the waveform with instantaneous frequency:: + + f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2 + + over the interval 0 <= t <= 10. + + >>> import numpy as np + >>> from scipy.signal import sweep_poly + >>> p = np.poly1d([0.025, -0.36, 1.25, 2.0]) + >>> t = np.linspace(0, 10, 5001) + >>> w = sweep_poly(t, p) + + Plot it: + + >>> import matplotlib.pyplot as plt + >>> plt.subplot(2, 1, 1) + >>> plt.plot(t, w) + >>> plt.title("Sweep Poly\\nwith frequency " + + ... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$") + >>> plt.subplot(2, 1, 2) + >>> plt.plot(t, p(t), 'r', label='f(t)') + >>> plt.legend() + >>> plt.xlabel('t') + >>> plt.tight_layout() + >>> plt.show() + + """ + # 'phase' is computed in _sweep_poly_phase, to make testing easier. + phase = _sweep_poly_phase(t, poly) + # Convert to radians. + phi *= pi / 180 + return cos(phase + phi) + + +def _sweep_poly_phase(t, poly): + """ + Calculate the phase used by sweep_poly to generate its output. + + See `sweep_poly` for a description of the arguments. + + """ + # polyint handles lists, ndarrays and instances of poly1d automatically. + intpoly = polyint(poly) + phase = 2 * pi * polyval(intpoly, t) + return phase + + +def unit_impulse(shape, idx=None, dtype=float): + r""" + Unit impulse signal (discrete delta function) or unit basis vector. + + Parameters + ---------- + shape : int or tuple of int + Number of samples in the output (1-D), or a tuple that represents the + shape of the output (N-D). + idx : None or int or tuple of int or 'mid', optional + Index at which the value is 1. If None, defaults to the 0th element. + If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in + all dimensions. If an int, the impulse will be at `idx` in all + dimensions. + dtype : data-type, optional + The desired data-type for the array, e.g., ``numpy.int8``. Default is + ``numpy.float64``. + + Returns + ------- + y : ndarray + Output array containing an impulse signal. + + Notes + ----- + In digital signal processing literature the unit impulse signal is often + represented by the Kronecker delta. [1]_ I.e., a signal :math:`u_k[n]`, + which is zero everywhere except being one at the :math:`k`-th sample, + can be expressed as + + .. math:: + + u_k[n] = \delta[n-k] \equiv \delta_{n,k}\ . + + Furthermore, the unit impulse is frequently interpreted as the discrete-time + version of the continuous-time Dirac distribution. [2]_ + + References + ---------- + .. [1] "Kronecker delta", *Wikipedia*, + https://en.wikipedia.org/wiki/Kronecker_delta#Digital_signal_processing + .. [2] "Dirac delta function" *Wikipedia*, + https://en.wikipedia.org/wiki/Dirac_delta_function#Relationship_to_the_Kronecker_delta + + .. versionadded:: 0.19.0 + + Examples + -------- + An impulse at the 0th element (:math:`\\delta[n]`): + + >>> from scipy import signal + >>> signal.unit_impulse(8) + array([ 1., 0., 0., 0., 0., 0., 0., 0.]) + + Impulse offset by 2 samples (:math:`\\delta[n-2]`): + + >>> signal.unit_impulse(7, 2) + array([ 0., 0., 1., 0., 0., 0., 0.]) + + 2-dimensional impulse, centered: + + >>> signal.unit_impulse((3, 3), 'mid') + array([[ 0., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 0.]]) + + Impulse at (2, 2), using broadcasting: + + >>> signal.unit_impulse((4, 4), 2) + array([[ 0., 0., 0., 0.], + [ 0., 0., 0., 0.], + [ 0., 0., 1., 0.], + [ 0., 0., 0., 0.]]) + + Plot the impulse response of a 4th-order Butterworth lowpass filter: + + >>> imp = signal.unit_impulse(100, 'mid') + >>> b, a = signal.butter(4, 0.2) + >>> response = signal.lfilter(b, a, imp) + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> plt.plot(np.arange(-50, 50), imp) + >>> plt.plot(np.arange(-50, 50), response) + >>> plt.margins(0.1, 0.1) + >>> plt.xlabel('Time [samples]') + >>> plt.ylabel('Amplitude') + >>> plt.grid(True) + >>> plt.show() + + """ + out = zeros(shape, dtype) + + shape = np.atleast_1d(shape) + + if idx is None: + idx = (0,) * len(shape) + elif idx == 'mid': + idx = tuple(shape // 2) + elif not hasattr(idx, "__iter__"): + idx = (idx,) * len(shape) + + out[idx] = 1 + return out diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_wavelets.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_wavelets.py new file mode 100644 index 0000000000000000000000000000000000000000..2b9f8fa32672e3f252f0f4ec4e387e0d474dc21e --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_wavelets.py @@ -0,0 +1,29 @@ +import numpy as np +from scipy.signal import convolve + + +def _ricker(points, a): + A = 2 / (np.sqrt(3 * a) * (np.pi**0.25)) + wsq = a**2 + vec = np.arange(0, points) - (points - 1.0) / 2 + xsq = vec**2 + mod = (1 - xsq / wsq) + gauss = np.exp(-xsq / (2 * wsq)) + total = A * mod * gauss + return total + + +def _cwt(data, wavelet, widths, dtype=None, **kwargs): + # Determine output type + if dtype is None: + if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG': + dtype = np.complex128 + else: + dtype = np.float64 + + output = np.empty((len(widths), len(data)), dtype=dtype) + for ind, width in enumerate(widths): + N = np.min([10 * width, len(data)]) + wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1]) + output[ind] = convolve(data, wavelet_data, mode='same') + return output diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/bsplines.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/bsplines.py new file mode 100644 index 0000000000000000000000000000000000000000..0328d45c107bda78cbdbd374148237ca09ac411d --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/bsplines.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'spline_filter', 'gauss_spline', + 'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval', + 'cspline2d', 'sepfir2d' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="bsplines", + private_modules=["_spline_filters"], all=__all__, + attribute=name) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/filter_design.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..41dc230a7f24a7ac3209f821d8d0f9417130afbd --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/filter_design.py @@ -0,0 +1,28 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize', + 'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign', + 'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel', + 'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord', + 'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap', + 'BadCoefficients', 'freqs_zpk', 'freqz_zpk', + 'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay', + 'sosfreqz', 'freqz_sos', 'iirnotch', 'iirpeak', 'bilinear_zpk', + 'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk', + 'gammatone', 'iircomb', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="filter_design", + private_modules=["_filter_design"], all=__all__, + attribute=name) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/fir_filter_design.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/fir_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..2214b82998bdefd2c6d6171cc952adf827269736 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/fir_filter_design.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'kaiser_beta', 'kaiser_atten', 'kaiserord', + 'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="fir_filter_design", + private_modules=["_fir_filter_design"], all=__all__, + attribute=name) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/lti_conversion.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/lti_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..7080990afc9e23e51e8a45aaa146b64c58dda3cf --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/lti_conversion.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk', + 'cont2discrete', 'tf2zpk', 'zpk2tf', 'normalize' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="lti_conversion", + private_modules=["_lti_conversion"], all=__all__, + attribute=name) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/ltisys.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/ltisys.py new file mode 100644 index 0000000000000000000000000000000000000000..5123068de559f124bf444c12ef9824c3d14de64f --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/ltisys.py @@ -0,0 +1,25 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace', + 'lsim', 'impulse', 'step', 'bode', + 'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse', + 'dfreqresp', 'dbode', + 'tf2zpk', 'zpk2tf', 'normalize', 'freqs', + 'freqz', 'freqs_zpk', 'freqz_zpk', 'tf2ss', 'abcd_normalize', + 'ss2tf', 'zpk2ss', 'ss2zpk', 'cont2discrete', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="ltisys", + private_modules=["_ltisys"], all=__all__, + attribute=name) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/signaltools.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/signaltools.py new file mode 100644 index 0000000000000000000000000000000000000000..85d426f5fb2605c639fc6dbd1b4d0284a3f11e1b --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/signaltools.py @@ -0,0 +1,27 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'correlate', 'correlation_lags', 'correlate2d', + 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve', + 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter', + 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2', + 'unique_roots', 'invres', 'invresz', 'residue', + 'residuez', 'resample', 'resample_poly', 'detrend', + 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method', + 'filtfilt', 'decimate', 'vectorstrength', + 'dlti', 'upfirdn', 'get_window', 'cheby1', 'firwin' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="signaltools", + private_modules=["_signaltools"], all=__all__, + attribute=name) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/spectral.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..299ebed781b00a1f1f35e96c54f4c20d9bd9d0fc --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/spectral.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'periodogram', 'welch', 'lombscargle', 'csd', 'coherence', + 'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA', + 'get_window', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="spectral", + private_modules=["_spectral_py"], all=__all__, + attribute=name) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/spline.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/spline.py new file mode 100644 index 0000000000000000000000000000000000000000..7afd0d0a14beecd5bc4522050eaf3b195f1a3601 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/spline.py @@ -0,0 +1,25 @@ +# This file is not meant for public use and will be removed in the future +# versions of SciPy. Use the `scipy.signal` namespace for importing the +# functions included below. + +import warnings + +from . import _spline + +__all__ = ['sepfir2d'] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + if name not in __all__: + raise AttributeError( + f"scipy.signal.spline is deprecated and has no attribute {name}. " + "Try looking in scipy.signal instead.") + + warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, " + "the `scipy.signal.spline` namespace is deprecated.", + category=DeprecationWarning, stacklevel=2) + return getattr(_spline, name) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/_scipy_spectral_test_shim.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/_scipy_spectral_test_shim.py new file mode 100644 index 0000000000000000000000000000000000000000..c23f310bcae4fa85558f7f07cddb25874a0ec7d1 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/_scipy_spectral_test_shim.py @@ -0,0 +1,488 @@ +"""Helpers to utilize existing stft / istft tests for testing `ShortTimeFFT`. + +This module provides the functions stft_compare() and istft_compare(), which, +compares the output between the existing (i)stft() and the shortTimeFFT based +_(i)stft_wrapper() implementations in this module. + +For testing add the following imports to the file ``tests/test_spectral.py``:: + + from ._scipy_spectral_test_shim import stft_compare as stft + from ._scipy_spectral_test_shim import istft_compare as istft + +and remove the existing imports of stft and istft. + +The idea of these wrappers is not to provide a backward-compatible interface +but to demonstrate that the ShortTimeFFT implementation is at least as capable +as the existing one and delivers comparable results. Furthermore, the +wrappers highlight the different philosophies of the implementations, +especially in the border handling. +""" +import platform +from typing import cast, Literal + +import numpy as np +from numpy.testing import assert_allclose + +from scipy.signal import ShortTimeFFT +from scipy.signal import csd, get_window, stft, istft +from scipy.signal._arraytools import const_ext, even_ext, odd_ext, zero_ext +from scipy.signal._short_time_fft import FFT_MODE_TYPE +from scipy.signal._spectral_py import _spectral_helper, _triage_segments, \ + _median_bias + + +def _stft_wrapper(x, fs=1.0, window='hann', nperseg=256, noverlap=None, + nfft=None, detrend=False, return_onesided=True, + boundary='zeros', padded=True, axis=-1, scaling='spectrum'): + """Wrapper for the SciPy `stft()` function based on `ShortTimeFFT` for + unit testing. + + Handling the boundary and padding is where `ShortTimeFFT` and `stft()` + differ in behavior. Parts of `_spectral_helper()` were copied to mimic + the` stft()` behavior. + + This function is meant to be solely used by `stft_compare()`. + """ + if scaling not in ('psd', 'spectrum'): # same errors as in original stft: + raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!") + + # The following lines are taken from the original _spectral_helper(): + boundary_funcs = {'even': even_ext, + 'odd': odd_ext, + 'constant': const_ext, + 'zeros': zero_ext, + None: None} + + if boundary not in boundary_funcs: + raise ValueError(f"Unknown boundary option '{boundary}', must be one" + + f" of: {list(boundary_funcs.keys())}") + if x.size == 0: + return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape) + + if nperseg is not None: # if specified by user + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + # parse window; if array like, then set nperseg = win.shape + win, nperseg = _triage_segments(window, nperseg, + input_length=x.shape[axis]) + + if nfft is None: + nfft = nperseg + elif nfft < nperseg: + raise ValueError('nfft must be greater than or equal to nperseg.') + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg//2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + nstep = nperseg - noverlap + n = x.shape[axis] + + # Padding occurs after boundary extension, so that the extended signal ends + # in zeros, instead of introducing an impulse at the end. + # I.e. if x = [..., 3, 2] + # extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0] + # pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3] + + if boundary is not None: + ext_func = boundary_funcs[boundary] + # Extend by nperseg//2 in front and back: + x = ext_func(x, nperseg//2, axis=axis) + + if padded: + # Pad to integer number of windowed segments + # I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg + x = np.moveaxis(x, axis, -1) + + # This is an edge case where shortTimeFFT returns one more time slice + # than the Scipy stft() shorten to remove last time slice: + if n % 2 == 1 and nperseg % 2 == 1 and noverlap % 2 == 1: + x = x[..., :axis - 1] + + nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg + zeros_shape = list(x.shape[:-1]) + [nadd] + x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1) + x = np.moveaxis(x, -1, axis) + + # ... end original _spectral_helper() code. + scale_to = {'spectrum': 'magnitude', 'psd': 'psd'}[scaling] + + if np.iscomplexobj(x) and return_onesided: + return_onesided = False + # using cast() to make mypy happy: + fft_mode = cast(FFT_MODE_TYPE, 'onesided' if return_onesided else 'twosided') + + ST = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft, + scale_to=scale_to, phase_shift=None) + + k_off = nperseg // 2 + p0 = 0 # ST.lower_border_end[1] + 1 + nn = x.shape[axis] if padded else n+k_off+1 + p1 = ST.upper_border_begin(nn)[1] # ST.p_max(n) + 1 + + # This is bad hack to pass the test test_roundtrip_boundary_extension(): + if padded is True and nperseg - noverlap == 1: + p1 -= nperseg // 2 - 1 # the reasoning behind this is not clear to me + + detr = None if detrend is False else detrend + Sxx = ST.stft_detrend(x, detr, p0, p1, k_offset=k_off, axis=axis) + t = ST.t(nn, 0, p1 - p0, k_offset=0 if boundary is not None else k_off) + if x.dtype in (np.float32, np.complex64): + Sxx = Sxx.astype(np.complex64) + + # workaround for test_average_all_segments() - seems to be buggy behavior: + if boundary is None and padded is False: + t, Sxx = t[1:-1], Sxx[..., :-2] + t -= k_off / fs + + return ST.f, t, Sxx + + +def _istft_wrapper(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, input_onesided=True, boundary=True, time_axis=-1, + freq_axis=-2, scaling='spectrum') -> \ + tuple[np.ndarray, np.ndarray, tuple[int, int]]: + """Wrapper for the SciPy `istft()` function based on `ShortTimeFFT` for + unit testing. + + Note that only option handling is implemented as far as to handle the unit + tests. E.g., the case ``nperseg=None`` is not handled. + + This function is meant to be solely used by `istft_compare()`. + """ + # *** Lines are taken from _spectral_py.istft() ***: + if Zxx.ndim < 2: + raise ValueError('Input stft must be at least 2d!') + + if freq_axis == time_axis: + raise ValueError('Must specify differing time and frequency axes!') + + nseg = Zxx.shape[time_axis] + + if input_onesided: + # Assume even segment length + n_default = 2*(Zxx.shape[freq_axis] - 1) + else: + n_default = Zxx.shape[freq_axis] + + # Check windowing parameters + if nperseg is None: + nperseg = n_default + else: + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + if nfft is None: + if input_onesided and (nperseg == n_default + 1): + # Odd nperseg, no FFT padding + nfft = nperseg + else: + nfft = n_default + elif nfft < nperseg: + raise ValueError('nfft must be greater than or equal to nperseg.') + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg//2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + nstep = nperseg - noverlap + + # Get window as array + if isinstance(window, str) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if win.shape[0] != nperseg: + raise ValueError(f'window must have length of {nperseg}') + + outputlength = nperseg + (nseg-1)*nstep + # *** End block of: Taken from _spectral_py.istft() *** + + # Using cast() to make mypy happy: + fft_mode = cast(FFT_MODE_TYPE, 'onesided' if input_onesided else 'twosided') + scale_to = cast(Literal['magnitude', 'psd'], + {'spectrum': 'magnitude', 'psd': 'psd'}[scaling]) + + ST = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft, + scale_to=scale_to, phase_shift=None) + + if boundary: + j = nperseg if nperseg % 2 == 0 else nperseg - 1 + k0 = ST.k_min + nperseg // 2 + k1 = outputlength - j + k0 + else: + raise NotImplementedError("boundary=False does not make sense with" + + "ShortTimeFFT.istft()!") + + x = ST.istft(Zxx, k0=k0, k1=k1, f_axis=freq_axis, t_axis=time_axis) + t = np.arange(k1 - k0) * ST.T + k_hi = ST.upper_border_begin(k1 - k0)[0] + # using cast() to make mypy happy: + return t, x, (ST.lower_border_end[0], k_hi) + + +def _csd_wrapper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1, average='mean'): + """Wrapper for the `csd()` function based on `ShortTimeFFT` for + unit testing. + """ + freqs, _, Pxy = _csd_test_shim(x, y, fs, window, nperseg, noverlap, nfft, + detrend, return_onesided, scaling, axis) + + # The following code is taken from csd(): + if len(Pxy.shape) >= 2 and Pxy.size > 0: + if Pxy.shape[-1] > 1: + if average == 'median': + # np.median must be passed real arrays for the desired result + bias = _median_bias(Pxy.shape[-1]) + if np.iscomplexobj(Pxy): + Pxy = (np.median(np.real(Pxy), axis=-1) + + 1j * np.median(np.imag(Pxy), axis=-1)) + else: + Pxy = np.median(Pxy, axis=-1) + Pxy /= bias + elif average == 'mean': + Pxy = Pxy.mean(axis=-1) + else: + raise ValueError(f'average must be "median" or "mean", got {average}') + else: + Pxy = np.reshape(Pxy, Pxy.shape[:-1]) + + return freqs, Pxy + + +def _csd_test_shim(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1): + """Compare output of _spectral_helper() and ShortTimeFFT, more + precisely _spect_helper_csd() for used in csd_wrapper(). + + The motivation of this function is to test if the ShortTimeFFT-based + wrapper `_spect_helper_csd()` returns the same values as `_spectral_helper`. + This function should only be usd by csd() in (unit) testing. + """ + freqs, t, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft, + detrend, return_onesided, scaling, axis, + mode='psd') + freqs1, Pxy1 = _spect_helper_csd(x, y, fs, window, nperseg, noverlap, nfft, + detrend, return_onesided, scaling, axis) + + np.testing.assert_allclose(freqs1, freqs) + amax_Pxy = max(np.abs(Pxy).max(), 1) if Pxy.size else 1 + atol = np.finfo(Pxy.dtype).resolution * amax_Pxy # needed for large Pxy + # for c_ in range(Pxy.shape[-1]): + # np.testing.assert_allclose(Pxy1[:, c_], Pxy[:, c_], atol=atol) + np.testing.assert_allclose(Pxy1, Pxy, atol=atol) + return freqs, t, Pxy + + +def _spect_helper_csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1): + """Wrapper for replacing _spectral_helper() by using the ShortTimeFFT + for use by csd(). + + This function should be only used by _csd_test_shim() and is only useful + for testing the ShortTimeFFT implementation. + """ + + # The following lines are taken from the original _spectral_helper(): + same_data = y is x + axis = int(axis) + + # Ensure we have np.arrays, get outdtype + x = np.asarray(x) + if not same_data: + y = np.asarray(y) + # outdtype = np.result_type(x, y, np.complex64) + # else: + # outdtype = np.result_type(x, np.complex64) + + if not same_data: + # Check if we can broadcast the outer axes together + xouter = list(x.shape) + youter = list(y.shape) + xouter.pop(axis) + youter.pop(axis) + try: + outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape + except ValueError as e: + raise ValueError('x and y cannot be broadcast together.') from e + + if same_data: + if x.size == 0: + return np.empty(x.shape), np.empty(x.shape) + else: + if x.size == 0 or y.size == 0: + outshape = outershape + (min([x.shape[axis], y.shape[axis]]),) + emptyout = np.moveaxis(np.empty(outshape), -1, axis) + return emptyout, emptyout + + if nperseg is not None: # if specified by user + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + # parse window; if array like, then set nperseg = win.shape + n = x.shape[axis] if same_data else max(x.shape[axis], y.shape[axis]) + win, nperseg = _triage_segments(window, nperseg, input_length=n) + + if nfft is None: + nfft = nperseg + elif nfft < nperseg: + raise ValueError('nfft must be greater than or equal to nperseg.') + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg // 2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + nstep = nperseg - noverlap + + if np.iscomplexobj(x) and return_onesided: + return_onesided = False + + # using cast() to make mypy happy: + fft_mode = cast(FFT_MODE_TYPE, 'onesided' if return_onesided + else 'twosided') + scale = {'spectrum': 'magnitude', 'density': 'psd'}[scaling] + SFT = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft, + scale_to=scale, phase_shift=None) + + # _spectral_helper() calculates X.conj()*Y instead of X*Y.conj(): + Pxy = SFT.spectrogram(y, x, detr=None if detrend is False else detrend, + p0=0, p1=(n-noverlap)//SFT.hop, k_offset=nperseg//2, + axis=axis).conj() + # Note: + # 'onesided2X' scaling of ShortTimeFFT conflicts with the + # scaling='spectrum' parameter, since it doubles the squared magnitude, + # which in the view of the ShortTimeFFT implementation does not make sense. + # Hence, the doubling of the square is implemented here: + if return_onesided: + f_axis = Pxy.ndim - 1 + axis if axis < 0 else axis + Pxy = np.moveaxis(Pxy, f_axis, -1) + Pxy[..., 1:-1 if SFT.mfft % 2 == 0 else None] *= 2 + Pxy = np.moveaxis(Pxy, -1, f_axis) + + return SFT.f, Pxy + + +def stft_compare(x, fs=1.0, window='hann', nperseg=256, noverlap=None, + nfft=None, detrend=False, return_onesided=True, + boundary='zeros', padded=True, axis=-1, scaling='spectrum'): + """Assert that the results from the existing `stft()` and `_stft_wrapper()` + are close to each other. + + For comparing the STFT values an absolute tolerance of the floating point + resolution was added to circumvent problems with the following tests: + * For float32 the tolerances are much higher in + TestSTFT.test_roundtrip_float32()). + * The TestSTFT.test_roundtrip_scaling() has a high relative deviation. + Interestingly this did not appear in Scipy 1.9.1 but only in the current + development version. + """ + kw = dict(x=x, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, + nfft=nfft, detrend=detrend, return_onesided=return_onesided, + boundary=boundary, padded=padded, axis=axis, scaling=scaling) + f, t, Zxx = stft(**kw) + f_wrapper, t_wrapper, Zxx_wrapper = _stft_wrapper(**kw) + + e_msg_part = " of `stft_wrapper()` differ from `stft()`." + assert_allclose(f_wrapper, f, err_msg=f"Frequencies {e_msg_part}") + assert_allclose(t_wrapper, t, err_msg=f"Time slices {e_msg_part}") + + # Adapted tolerances to account for: + atol = np.finfo(Zxx.dtype).resolution * 2 + assert_allclose(Zxx_wrapper, Zxx, atol=atol, + err_msg=f"STFT values {e_msg_part}") + return f, t, Zxx + + +def istft_compare(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, input_onesided=True, boundary=True, time_axis=-1, + freq_axis=-2, scaling='spectrum'): + """Assert that the results from the existing `istft()` and + `_istft_wrapper()` are close to each other. + + Quirks: + * If ``boundary=False`` the comparison is skipped, since it does not + make sense with ShortTimeFFT.istft(). Only used in test + TestSTFT.test_roundtrip_boundary_extension(). + * If ShortTimeFFT.istft() decides the STFT is not invertible, the + comparison is skipped, since istft() only emits a warning and does not + return a correct result. Only used in + ShortTimeFFT.test_roundtrip_not_nola(). + * For comparing the signals an absolute tolerance of the floating point + resolution was added to account for the low accuracy of float32 (Occurs + only in TestSTFT.test_roundtrip_float32()). + """ + kw = dict(Zxx=Zxx, fs=fs, window=window, nperseg=nperseg, + noverlap=noverlap, nfft=nfft, input_onesided=input_onesided, + boundary=boundary, time_axis=time_axis, freq_axis=freq_axis, + scaling=scaling) + + t, x = istft(**kw) + if not boundary: # skip test_roundtrip_boundary_extension(): + return t, x # _istft_wrapper does() not implement this case + try: # if inversion fails, istft() only emits a warning: + t_wrapper, x_wrapper, (k_lo, k_hi) = _istft_wrapper(**kw) + except ValueError as v: # Do nothing if inversion fails: + if v.args[0] == "Short-time Fourier Transform not invertible!": + return t, x + raise v + + e_msg_part = " of `istft_wrapper()` differ from `istft()`" + assert_allclose(t, t_wrapper, err_msg=f"Sample times {e_msg_part}") + + # Adapted tolerances to account for resolution loss: + atol = np.finfo(x.dtype).resolution*2 # instead of default atol = 0 + rtol = 1e-7 # default for np.allclose() + + # Relax atol on 32-Bit platforms a bit to pass CI tests. + # - Not clear why there are discrepancies (in the FFT maybe?) + # - Not sure what changed on 'i686' since earlier on those test passed + if x.dtype == np.float32 and platform.machine() == 'i686': + # float32 gets only used by TestSTFT.test_roundtrip_float32() so + # we are using the tolerances from there to circumvent CI problems + atol, rtol = 1e-4, 1e-5 + elif platform.machine() in ('aarch64', 'i386', 'i686'): + atol = max(atol, 1e-12) # 2e-15 seems too tight for 32-Bit platforms + + assert_allclose(x_wrapper[k_lo:k_hi], x[k_lo:k_hi], atol=atol, rtol=rtol, + err_msg=f"Signal values {e_msg_part}") + return t, x + + +def csd_compare(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1, average='mean'): + """Assert that the results from the existing `csd()` and `_csd_wrapper()` + are close to each other. """ + kw = dict(x=x, y=y, fs=fs, window=window, nperseg=nperseg, + noverlap=noverlap, nfft=nfft, detrend=detrend, + return_onesided=return_onesided, scaling=scaling, axis=axis, + average=average) + freqs0, Pxy0 = csd(**kw) + freqs1, Pxy1 = _csd_wrapper(**kw) + + assert_allclose(freqs1, freqs0) + assert_allclose(Pxy1, Pxy0) + assert_allclose(freqs1, freqs0) + return freqs0, Pxy0 diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/mpsig.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/mpsig.py new file mode 100644 index 0000000000000000000000000000000000000000..d129de74e5df00c22bc0b82c7d3f7b52483941f9 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/mpsig.py @@ -0,0 +1,122 @@ +""" +Some signal functions implemented using mpmath. +""" + +try: + import mpmath +except ImportError: + mpmath = None + + +def _prod(seq): + """Returns the product of the elements in the sequence `seq`.""" + p = 1 + for elem in seq: + p *= elem + return p + + +def _relative_degree(z, p): + """ + Return relative degree of transfer function from zeros and poles. + + This is simply len(p) - len(z), which must be nonnegative. + A ValueError is raised if len(p) < len(z). + """ + degree = len(p) - len(z) + if degree < 0: + raise ValueError("Improper transfer function. " + "Must have at least as many poles as zeros.") + return degree + + +def _zpkbilinear(z, p, k, fs): + """Bilinear transformation to convert a filter from analog to digital.""" + + degree = _relative_degree(z, p) + + fs2 = 2*fs + + # Bilinear transform the poles and zeros + z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z] + p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p] + + # Any zeros that were at infinity get moved to the Nyquist frequency + z_z.extend([-1] * degree) + + # Compensate for gain change + numer = _prod(fs2 - z1 for z1 in z) + denom = _prod(fs2 - p1 for p1 in p) + k_z = k * numer / denom + + return z_z, p_z, k_z.real + + +def _zpklp2lp(z, p, k, wo=1): + """Transform a lowpass filter to a different cutoff frequency.""" + + degree = _relative_degree(z, p) + + # Scale all points radially from origin to shift cutoff frequency + z_lp = [wo * z1 for z1 in z] + p_lp = [wo * p1 for p1 in p] + + # Each shifted pole decreases gain by wo, each shifted zero increases it. + # Cancel out the net change to keep overall gain the same + k_lp = k * wo**degree + + return z_lp, p_lp, k_lp + + +def _butter_analog_poles(n): + """ + Poles of an analog Butterworth lowpass filter. + + This is the same calculation as scipy.signal.buttap(n) or + scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used, + and only the poles are returned. + """ + poles = [-mpmath.exp(1j*mpmath.pi*k/(2*n)) for k in range(-n+1, n, 2)] + return poles + + +def butter_lp(n, Wn): + """ + Lowpass Butterworth digital filter design. + + This computes the same result as scipy.signal.butter(n, Wn, output='zpk'), + but it uses mpmath, and the results are returned in lists instead of NumPy + arrays. + """ + zeros = [] + poles = _butter_analog_poles(n) + k = 1 + fs = 2 + warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs) + z, p, k = _zpklp2lp(zeros, poles, k, wo=warped) + z, p, k = _zpkbilinear(z, p, k, fs=fs) + return z, p, k + + +def zpkfreqz(z, p, k, worN=None): + """ + Frequency response of a filter in zpk format, using mpmath. + + This is the same calculation as scipy.signal.freqz, but the input is in + zpk format, the calculation is performed using mpath, and the results are + returned in lists instead of NumPy arrays. + """ + if worN is None or isinstance(worN, int): + N = worN or 512 + ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)] + else: + ws = worN + + h = [] + for wk in ws: + zm1 = mpmath.exp(1j * wk) + numer = _prod([zm1 - t for t in z]) + denom = _prod([zm1 - t for t in p]) + hk = k * numer / denom + h.append(hk) + return ws, h diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_array_tools.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_array_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..4bda9716e0bc4b6ad3ed0c3954147043b74c421a --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_array_tools.py @@ -0,0 +1,111 @@ +import numpy as np + +from scipy._lib._array_api import xp_assert_equal +from pytest import raises as assert_raises + +from scipy.signal._arraytools import (axis_slice, axis_reverse, + odd_ext, even_ext, const_ext, zero_ext) + + +class TestArrayTools: + + def test_axis_slice(self): + a = np.arange(12).reshape(3, 4) + + s = axis_slice(a, start=0, stop=1, axis=0) + xp_assert_equal(s, a[0:1, :]) + + s = axis_slice(a, start=-1, axis=0) + xp_assert_equal(s, a[-1:, :]) + + s = axis_slice(a, start=0, stop=1, axis=1) + xp_assert_equal(s, a[:, 0:1]) + + s = axis_slice(a, start=-1, axis=1) + xp_assert_equal(s, a[:, -1:]) + + s = axis_slice(a, start=0, step=2, axis=0) + xp_assert_equal(s, a[::2, :]) + + s = axis_slice(a, start=0, step=2, axis=1) + xp_assert_equal(s, a[:, ::2]) + + def test_axis_reverse(self): + a = np.arange(12).reshape(3, 4) + + r = axis_reverse(a, axis=0) + xp_assert_equal(r, a[::-1, :]) + + r = axis_reverse(a, axis=1) + xp_assert_equal(r, a[:, ::-1]) + + def test_odd_ext(self): + a = np.array([[1, 2, 3, 4, 5], + [9, 8, 7, 6, 5]]) + + odd = odd_ext(a, 2, axis=1) + expected = np.array([[-1, 0, 1, 2, 3, 4, 5, 6, 7], + [11, 10, 9, 8, 7, 6, 5, 4, 3]]) + xp_assert_equal(odd, expected) + + odd = odd_ext(a, 1, axis=0) + expected = np.array([[-7, -4, -1, 2, 5], + [1, 2, 3, 4, 5], + [9, 8, 7, 6, 5], + [17, 14, 11, 8, 5]]) + xp_assert_equal(odd, expected) + + assert_raises(ValueError, odd_ext, a, 2, axis=0) + assert_raises(ValueError, odd_ext, a, 5, axis=1) + + def test_even_ext(self): + a = np.array([[1, 2, 3, 4, 5], + [9, 8, 7, 6, 5]]) + + even = even_ext(a, 2, axis=1) + expected = np.array([[3, 2, 1, 2, 3, 4, 5, 4, 3], + [7, 8, 9, 8, 7, 6, 5, 6, 7]]) + xp_assert_equal(even, expected) + + even = even_ext(a, 1, axis=0) + expected = np.array([[9, 8, 7, 6, 5], + [1, 2, 3, 4, 5], + [9, 8, 7, 6, 5], + [1, 2, 3, 4, 5]]) + xp_assert_equal(even, expected) + + assert_raises(ValueError, even_ext, a, 2, axis=0) + assert_raises(ValueError, even_ext, a, 5, axis=1) + + def test_const_ext(self): + a = np.array([[1, 2, 3, 4, 5], + [9, 8, 7, 6, 5]]) + + const = const_ext(a, 2, axis=1) + expected = np.array([[1, 1, 1, 2, 3, 4, 5, 5, 5], + [9, 9, 9, 8, 7, 6, 5, 5, 5]]) + xp_assert_equal(const, expected) + + const = const_ext(a, 1, axis=0) + expected = np.array([[1, 2, 3, 4, 5], + [1, 2, 3, 4, 5], + [9, 8, 7, 6, 5], + [9, 8, 7, 6, 5]]) + xp_assert_equal(const, expected) + + def test_zero_ext(self): + a = np.array([[1, 2, 3, 4, 5], + [9, 8, 7, 6, 5]]) + + zero = zero_ext(a, 2, axis=1) + expected = np.array([[0, 0, 1, 2, 3, 4, 5, 0, 0], + [0, 0, 9, 8, 7, 6, 5, 0, 0]]) + xp_assert_equal(zero, expected) + + zero = zero_ext(a, 1, axis=0) + expected = np.array([[0, 0, 0, 0, 0], + [1, 2, 3, 4, 5], + [9, 8, 7, 6, 5], + [0, 0, 0, 0, 0]]) + xp_assert_equal(zero, expected) + diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_bsplines.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_bsplines.py new file mode 100644 index 0000000000000000000000000000000000000000..9c7baf2d8d9f2f9a84452965c35b5262ca20105d --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_bsplines.py @@ -0,0 +1,330 @@ +# pylint: disable=missing-docstring +import numpy as np + +from scipy._lib._array_api import ( + assert_almost_equal, xp_assert_close, xp_assert_equal +) +import pytest +from pytest import raises + +import scipy.signal._spline_filters as bsp +from scipy import signal + + +class TestBSplines: + """Test behaviors of B-splines. Some of the values tested against were + returned as of SciPy 1.1.0 and are included for regression testing + purposes. Others (at integer points) are compared to theoretical + expressions (cf. Unser, Aldroubi, Eden, IEEE TSP 1993, Table 1).""" + + def test_spline_filter(self): + rng = np.random.RandomState(12457) + # Test the type-error branch + raises(TypeError, bsp.spline_filter, np.asarray([0]), 0) + # Test the real branch + data_array_real = rng.rand(12, 12) + # make the magnitude exceed 1, and make some negative + data_array_real = 10*(1-2*data_array_real) + result_array_real = np.asarray( + [[-.463312621, 8.33391222, .697290949, 5.28390836, + 5.92066474, 6.59452137, 9.84406950, -8.78324188, + 7.20675750, -8.17222994, -4.38633345, 9.89917069], + [2.67755154, 6.24192170, -3.15730578, 9.87658581, + -9.96930425, 3.17194115, -4.50919947, 5.75423446, + 9.65979824, -8.29066885, .971416087, -2.38331897], + [-7.08868346, 4.89887705, -1.37062289, 7.70705838, + 2.51526461, 3.65885497, 5.16786604, -8.77715342e-03, + 4.10533325, 9.04761993, -.577960351, 9.86382519], + [-4.71444301, -1.68038985, 2.84695116, 1.14315938, + -3.17127091, 1.91830461, 7.13779687, -5.35737482, + -9.66586425, -9.87717456, 9.93160672, 4.71948144], + [9.49551194, -1.92958436, 6.25427993, -9.05582911, + 3.97562282, 7.68232426, -1.04514824, -5.86021443, + -8.43007451, 5.47528997, 2.06330736, -8.65968112], + [-8.91720100, 8.87065356, 3.76879937, 2.56222894, + -.828387146, 8.72288903, 6.42474741, -6.84576083, + 9.94724115, 6.90665380, -6.61084494, -9.44907391], + [9.25196790, -.774032030, 7.05371046, -2.73505725, + 2.53953305, -1.82889155, 2.95454824, -1.66362046, + 5.72478916, -3.10287679, 1.54017123, -7.87759020], + [-3.98464539, -2.44316992, -1.12708657, 1.01725672, + -8.89294671, -5.42145629, -6.16370321, 2.91775492, + 9.64132208, .702499998, -2.02622392, 1.56308431], + [-2.22050773, 7.89951554, 5.98970713, -7.35861835, + 5.45459283, -7.76427957, 3.67280490, -4.05521315, + 4.51967507, -3.22738749, -3.65080177, 3.05630155], + [-6.21240584, -.296796126, -8.34800163, 9.21564563, + -3.61958784, -4.77120006, -3.99454057, 1.05021988e-03, + -6.95982829, 6.04380797, 8.43181250, -2.71653339], + [1.19638037, 6.99718842e-02, 6.72020394, -2.13963198, + 3.75309875, -5.70076744, 5.92143551, -7.22150575, + -3.77114594, -1.11903194, -5.39151466, 3.06620093], + [9.86326886, 1.05134482, -7.75950607, -3.64429655, + 7.81848957, -9.02270373, 3.73399754, -4.71962549, + -7.71144306, 3.78263161, 6.46034818, -4.43444731]]) + xp_assert_close(bsp.spline_filter(data_array_real, 0), + result_array_real) + + def test_spline_filter_complex(self): + rng = np.random.RandomState(12457) + data_array_complex = rng.rand(7, 7) + rng.rand(7, 7)*1j + # make the magnitude exceed 1, and make some negative + data_array_complex = 10*(1+1j-2*data_array_complex) + result_array_complex = np.asarray( + [[-4.61489230e-01-1.92994022j, 8.33332443+6.25519943j, + 6.96300745e-01-9.05576038j, 5.28294849+3.97541356j, + 5.92165565+7.68240595j, 6.59493160-1.04542804j, + 9.84503460-5.85946894j], + [-8.78262329-8.4295969j, 7.20675516+5.47528982j, + -8.17223072+2.06330729j, -4.38633347-8.65968037j, + 9.89916801-8.91720295j, 2.67755103+8.8706522j, + 6.24192142+3.76879835j], + [-3.15627527+2.56303072j, 9.87658501-0.82838702j, + -9.96930313+8.72288895j, 3.17193985+6.42474651j, + -4.50919819-6.84576082j, 5.75423431+9.94723988j, + 9.65979767+6.90665293j], + [-8.28993416-6.61064005j, 9.71416473e-01-9.44907284j, + -2.38331890+9.25196648j, -7.08868170-0.77403212j, + 4.89887714+7.05371094j, -1.37062311-2.73505688j, + 7.70705748+2.5395329j], + [2.51528406-1.82964492j, 3.65885472+2.95454836j, + 5.16786575-1.66362023j, -8.77737999e-03+5.72478867j, + 4.10533333-3.10287571j, 9.04761887+1.54017115j, + -5.77960968e-01-7.87758923j], + [9.86398506-3.98528528j, -4.71444130-2.44316983j, + -1.68038976-1.12708664j, 2.84695053+1.01725709j, + 1.14315915-8.89294529j, -3.17127085-5.42145538j, + 1.91830420-6.16370344j], + [7.13875294+2.91851187j, -5.35737514+9.64132309j, + -9.66586399+0.70250005j, -9.87717438-2.0262239j, + 9.93160629+1.5630846j, 4.71948051-2.22050714j, + 9.49550819+7.8995142j]]) + # FIXME: for complex types, the computations are done in + # single precision (reason unclear). When this is changed, + # this test needs updating. + xp_assert_close(bsp.spline_filter(data_array_complex, 0), + result_array_complex, rtol=1e-6) + + def test_gauss_spline(self): + np.random.seed(12459) + assert_almost_equal(bsp.gauss_spline(0, 0), 1.381976597885342) + xp_assert_close(bsp.gauss_spline(np.asarray([1.]), 1), + np.asarray([0.04865217]), atol=1e-9 + ) + + def test_gauss_spline_list(self): + # regression test for gh-12152 (accept array_like) + knots = [-1.0, 0.0, -1.0] + assert_almost_equal(bsp.gauss_spline(knots, 3), + np.asarray([0.15418033, 0.6909883, 0.15418033]) + ) + + def test_cspline1d(self): + np.random.seed(12462) + xp_assert_equal(bsp.cspline1d(np.asarray([0])), [0.]) + c1d = np.asarray([1.21037185, 1.86293902, 2.98834059, 4.11660378, + 4.78893826]) + # test lamda != 0 + xp_assert_close(bsp.cspline1d(np.asarray([1., 2, 3, 4, 5]), 1), c1d) + c1d0 = np.asarray([0.78683946, 2.05333735, 2.99981113, 3.94741812, + 5.21051638]) + xp_assert_close(bsp.cspline1d(np.asarray([1., 2, 3, 4, 5])), c1d0) + + def test_qspline1d(self): + np.random.seed(12463) + xp_assert_equal(bsp.qspline1d(np.asarray([0])), [0.]) + # test lamda != 0 + raises(ValueError, bsp.qspline1d, np.asarray([1., 2, 3, 4, 5]), 1.) + raises(ValueError, bsp.qspline1d, np.asarray([1., 2, 3, 4, 5]), -1.) + q1d0 = np.asarray([0.85350007, 2.02441743, 2.99999534, 3.97561055, + 5.14634135]) + xp_assert_close(bsp.qspline1d(np.asarray([1., 2, 3, 4, 5])), q1d0) + + def test_cspline1d_eval(self): + np.random.seed(12464) + xp_assert_close(bsp.cspline1d_eval(np.asarray([0., 0]), [0.]), + np.asarray([0.]) + ) + xp_assert_equal(bsp.cspline1d_eval(np.asarray([1., 0, 1]), []), + np.asarray([]) + ) + x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6] + dx = x[1] - x[0] + newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1., + -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6., + 6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12., + 12.5] + y = np.asarray([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879, + 1.396, 4.094]) + cj = bsp.cspline1d(y) + newy = np.asarray([6.203, 4.41570658, 3.514, 5.16924703, 6.864, 6.04643068, + 4.21600281, 6.04643068, 6.864, 5.16924703, 3.514, + 4.41570658, 6.203, 6.80717667, 6.759, 6.98971173, 7.433, + 7.79560142, 7.874, 7.41525761, 5.879, 3.18686814, 1.396, + 2.24889482, 4.094, 2.24889482, 1.396, 3.18686814, 5.879, + 7.41525761, 7.874, 7.79560142, 7.433, 6.98971173, 6.759, + 6.80717667, 6.203, 4.41570658]) + xp_assert_close(bsp.cspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy) + + def test_qspline1d_eval(self): + np.random.seed(12465) + xp_assert_close(bsp.qspline1d_eval(np.asarray([0., 0]), [0.]), + np.asarray([0.]) + ) + xp_assert_equal(bsp.qspline1d_eval(np.asarray([1., 0, 1]), []), + np.asarray([]) + ) + x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6] + dx = x[1]-x[0] + newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1., + -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6., + 6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12., + 12.5] + y = np.asarray([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879, + 1.396, 4.094]) + cj = bsp.qspline1d(y) + newy = np.asarray([6.203, 4.49418159, 3.514, 5.18390821, 6.864, 5.91436915, + 4.21600002, 5.91436915, 6.864, 5.18390821, 3.514, + 4.49418159, 6.203, 6.71900226, 6.759, 7.03980488, 7.433, + 7.81016848, 7.874, 7.32718426, 5.879, 3.23872593, 1.396, + 2.34046013, 4.094, 2.34046013, 1.396, 3.23872593, 5.879, + 7.32718426, 7.874, 7.81016848, 7.433, 7.03980488, 6.759, + 6.71900226, 6.203, 4.49418159]) + xp_assert_close(bsp.qspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy) + + +# i/o dtypes with scipy 1.9.1, likely fixed by backwards compat +sepfir_dtype_map = {np.uint8: np.float32, int: np.float64, + np.float32: np.float32, float: float, + np.complex64: np.complex64, complex: complex} + +class TestSepfir2d: + def test_sepfir2d_invalid_filter(self): + filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0]) + image = np.random.rand(7, 9) + # No error for odd lengths + signal.sepfir2d(image, filt, filt[2:]) + + # Row or column filter must be odd + with pytest.raises(ValueError, match="odd length"): + signal.sepfir2d(image, filt, filt[1:]) + with pytest.raises(ValueError, match="odd length"): + signal.sepfir2d(image, filt[1:], filt) + + # Filters must be 1-dimensional + with pytest.raises(ValueError, match="object too deep"): + signal.sepfir2d(image, filt.reshape(1, -1), filt) + with pytest.raises(ValueError, match="object too deep"): + signal.sepfir2d(image, filt, filt.reshape(1, -1)) + + def test_sepfir2d_invalid_image(self): + filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0]) + image = np.random.rand(8, 8) + + # Image must be 2 dimensional + with pytest.raises(ValueError, match="object too deep"): + signal.sepfir2d(image.reshape(4, 4, 4), filt, filt) + + with pytest.raises(ValueError, match="object of too small depth"): + signal.sepfir2d(image[0], filt, filt) + + @pytest.mark.parametrize('dtyp', + [np.uint8, int, np.float32, float, np.complex64, complex] + ) + def test_simple(self, dtyp): + # test values on a paper-and-pencil example + a = np.array([[1, 2, 3, 3, 2, 1], + [1, 2, 3, 3, 2, 1], + [1, 2, 3, 3, 2, 1], + [1, 2, 3, 3, 2, 1]], dtype=dtyp) + h1 = [0.5, 1, 0.5] + h2 = [1] + result = signal.sepfir2d(a, h1, h2) + dt = sepfir_dtype_map[dtyp] + expected = np.asarray([[2.5, 4. , 5.5, 5.5, 4. , 2.5], + [2.5, 4. , 5.5, 5.5, 4. , 2.5], + [2.5, 4. , 5.5, 5.5, 4. , 2.5], + [2.5, 4. , 5.5, 5.5, 4. , 2.5]], dtype=dt) + xp_assert_close(result, expected, atol=1e-16) + + result = signal.sepfir2d(a, h2, h1) + expected = np.asarray([[2., 4., 6., 6., 4., 2.], + [2., 4., 6., 6., 4., 2.], + [2., 4., 6., 6., 4., 2.], + [2., 4., 6., 6., 4., 2.]], dtype=dt) + xp_assert_close(result, expected, atol=1e-16) + + @pytest.mark.parametrize('dtyp', + [np.uint8, int, np.float32, float, np.complex64, complex] + ) + def test_strided(self, dtyp): + a = np.array([[1, 2, 3, 3, 2, 1, 1, 2, 3], + [1, 2, 3, 3, 2, 1, 1, 2, 3], + [1, 2, 3, 3, 2, 1, 1, 2, 3], + [1, 2, 3, 3, 2, 1, 1, 2, 3]]) + h1, h2 = [0.5, 1, 0.5], [1] + result_strided = signal.sepfir2d(a[:, ::2], h1, h2) + result_contig = signal.sepfir2d(a[:, ::2].copy(), h1, h2) + xp_assert_close(result_strided, result_contig, atol=1e-15) + assert result_strided.dtype == result_contig.dtype + + @pytest.mark.xfail(reason="XXX: filt.size > image.shape: flaky") + def test_sepfir2d_strided_2(self): + # XXX: this test is flaky: fails on some reruns, with + # result[0, 1] and result[1, 1] being ~1e+224. + np.random.seed(1234) + filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0, 3.0, 2.0]) + image = np.random.rand(4, 4) + + expected = np.asarray([[36.018162, 30.239061, 38.71187 , 43.878183], + [38.180999, 35.824583, 43.525247, 43.874945], + [43.269533, 40.834018, 46.757772, 44.276423], + [49.120928, 39.681844, 43.596067, 45.085854]]) + xp_assert_close(signal.sepfir2d(image, filt, filt[::3]), expected) + + @pytest.mark.xfail(reason="XXX: flaky. pointers OOB on some platforms") + @pytest.mark.parametrize('dtyp', + [np.uint8, int, np.float32, float, np.complex64, complex] + ) + def test_sepfir2d_strided_3(self, dtyp): + # NB: 'image' and 'filt' dtypes match here. Otherwise we can run into + # unsafe casting errors for many combinations. Historically, dtype handling + # in `sepfir2d` is a tad baroque; fixing it is an enhancement. + filt = np.array([1, 2, 4, 2, 1, 3, 2], dtype=dtyp) + image = np.asarray([[0, 3, 0, 1, 2], + [2, 2, 3, 3, 3], + [0, 1, 3, 0, 3], + [2, 3, 0, 1, 3], + [3, 3, 2, 1, 2]], dtype=dtyp) + + expected = [[123., 101., 91., 136., 127.], + [133., 125., 126., 152., 160.], + [136., 137., 150., 162., 177.], + [133., 124., 132., 148., 147.], + [173., 158., 152., 164., 141.]] + expected = np.asarray(expected) + result = signal.sepfir2d(image, filt, filt[::3]) + xp_assert_close(result, expected, atol=1e-15) + assert result.dtype == sepfir_dtype_map[dtyp] + + expected = [[22., 35., 41., 31., 47.], + [27., 39., 48., 47., 55.], + [33., 42., 49., 53., 59.], + [39., 44., 41., 36., 48.], + [67., 62., 47., 34., 46.]] + expected = np.asarray(expected) + result = signal.sepfir2d(image, filt[::3], filt[::3]) + xp_assert_close(result, expected, atol=1e-15) + assert result.dtype == sepfir_dtype_map[dtyp] + + +def test_cspline2d(): + np.random.seed(181819142) + image = np.random.rand(71, 73) + signal.cspline2d(image, 8.0) + + +def test_qspline2d(): + np.random.seed(181819143) + image = np.random.rand(71, 73) + signal.qspline2d(image) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_cont2discrete.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_cont2discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..0b4008afd4728acb5b63a6626440b214f2d90f6f --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_cont2discrete.py @@ -0,0 +1,417 @@ +import numpy as np +from scipy._lib._array_api import ( + assert_array_almost_equal, assert_almost_equal, xp_assert_close +) + +import pytest +from scipy.signal import cont2discrete as c2d +from scipy.signal import dlsim, ss2tf, ss2zpk, lsim, lti +from scipy.signal import tf2ss, impulse, dimpulse, step, dstep + +# Author: Jeffrey Armstrong +# March 29, 2011 + + +class TestC2D: + @pytest.mark.thread_unsafe # due to Cython fused types, see cython#6506 + def test_zoh(self): + ac = np.eye(2, dtype=np.float64) + bc = np.full((2, 1), 0.5, dtype=np.float64) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + ad_truth = 1.648721270700128 * np.eye(2) + bd_truth = np.full((2, 1), 0.324360635350064) + # c and d in discrete should be equal to their continuous counterparts + dt_requested = 0.5 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='zoh') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cc, cd) + assert_array_almost_equal(dc, dd) + assert_almost_equal(dt_requested, dt) + + def test_foh(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + # True values are verified with Matlab + ad_truth = 1.648721270700128 * np.eye(2) + bd_truth = np.full((2, 1), 0.420839287058789) + cd_truth = cc + dd_truth = np.array([[0.260262223725224], + [0.297442541400256], + [-0.144098411624840]]) + dt_requested = 0.5 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='foh') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_impulse(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [0.0]]) + + # True values are verified with Matlab + ad_truth = 1.648721270700128 * np.eye(2) + bd_truth = np.full((2, 1), 0.412180317675032) + cd_truth = cc + dd_truth = np.array([[0.4375], [0.5], [0.3125]]) + dt_requested = 0.5 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='impulse') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_gbt(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + alpha = 1.0 / 3.0 + + ad_truth = 1.6 * np.eye(2) + bd_truth = np.full((2, 1), 0.3) + cd_truth = np.array([[0.9, 1.2], + [1.2, 1.2], + [1.2, 0.3]]) + dd_truth = np.array([[0.175], + [0.2], + [-0.205]]) + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='gbt', alpha=alpha) + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + + def test_euler(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + + ad_truth = 1.5 * np.eye(2) + bd_truth = np.full((2, 1), 0.25) + cd_truth = np.array([[0.75, 1.0], + [1.0, 1.0], + [1.0, 0.25]]) + dd_truth = dc + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='euler') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_backward_diff(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + + ad_truth = 2.0 * np.eye(2) + bd_truth = np.full((2, 1), 0.5) + cd_truth = np.array([[1.5, 2.0], + [2.0, 2.0], + [2.0, 0.5]]) + dd_truth = np.array([[0.875], + [1.0], + [0.295]]) + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='backward_diff') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + + def test_bilinear(self): + ac = np.eye(2) + bc = np.full((2, 1), 0.5) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + + ad_truth = (5.0 / 3.0) * np.eye(2) + bd_truth = np.full((2, 1), 1.0 / 3.0) + cd_truth = np.array([[1.0, 4.0 / 3.0], + [4.0 / 3.0, 4.0 / 3.0], + [4.0 / 3.0, 1.0 / 3.0]]) + dd_truth = np.array([[0.291666666666667], + [1.0 / 3.0], + [-0.121666666666667]]) + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='bilinear') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + # Same continuous system again, but change sampling rate + + ad_truth = 1.4 * np.eye(2) + bd_truth = np.full((2, 1), 0.2) + cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]]) + dd_truth = np.array([[0.175], [0.2], [-0.205]]) + + dt_requested = 1.0 / 3.0 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='bilinear') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_transferfunction(self): + numc = np.array([0.25, 0.25, 0.5]) + denc = np.array([0.75, 0.75, 1.0]) + + numd = np.array([[1.0 / 3.0, -0.427419169438754, 0.221654141101125]]) + dend = np.array([1.0, -1.351394049721225, 0.606530659712634]) + + dt_requested = 0.5 + + num, den, dt = c2d((numc, denc), dt_requested, method='zoh') + + assert_array_almost_equal(numd, num) + assert_array_almost_equal(dend, den) + assert_almost_equal(dt_requested, dt) + + def test_zerospolesgain(self): + zeros_c = np.array([0.5, -0.5]) + poles_c = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) + k_c = 1.0 + + zeros_d = [1.23371727305860, 0.735356894461267] + polls_d = [0.938148335039729 + 0.346233593780536j, + 0.938148335039729 - 0.346233593780536j] + k_d = 1.0 + + dt_requested = 0.5 + + zeros, poles, k, dt = c2d((zeros_c, poles_c, k_c), dt_requested, + method='zoh') + + assert_array_almost_equal(zeros_d, zeros) + assert_array_almost_equal(polls_d, poles) + assert_almost_equal(k_d, k) + assert_almost_equal(dt_requested, dt) + + def test_gbt_with_sio_tf_and_zpk(self): + """Test method='gbt' with alpha=0.25 for tf and zpk cases.""" + # State space coefficients for the continuous SIO system. + A = -1.0 + B = 1.0 + C = 1.0 + D = 0.5 + + # The continuous transfer function coefficients. + cnum, cden = ss2tf(A, B, C, D) + + # Continuous zpk representation + cz, cp, ck = ss2zpk(A, B, C, D) + + h = 1.0 + alpha = 0.25 + + # Explicit formulas, in the scalar case. + Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A) + Bd = h * B / (1 - alpha * h * A) + Cd = C / (1 - alpha * h * A) + Dd = D + alpha * C * Bd + + # Convert the explicit solution to tf + dnum, dden = ss2tf(Ad, Bd, Cd, Dd) + + # Compute the discrete tf using cont2discrete. + c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha) + + xp_assert_close(dnum, c2dnum) + xp_assert_close(dden, c2dden) + + # Convert explicit solution to zpk. + dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd) + + # Compute the discrete zpk using cont2discrete. + c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha) + + xp_assert_close(dz, c2dz) + xp_assert_close(dp, c2dp) + xp_assert_close(dk, c2dk) + + def test_discrete_approx(self): + """ + Test that the solution to the discrete approximation of a continuous + system actually approximates the solution to the continuous system. + This is an indirect test of the correctness of the implementation + of cont2discrete. + """ + + def u(t): + return np.sin(2.5 * t) + + a = np.array([[-0.01]]) + b = np.array([[1.0]]) + c = np.array([[1.0]]) + d = np.array([[0.2]]) + x0 = 1.0 + + t = np.linspace(0, 10.0, 101) + dt = t[1] - t[0] + u1 = u(t) + + # Use lsim to compute the solution to the continuous system. + t, yout, xout = lsim((a, b, c, d), T=t, U=u1, X0=x0) + + # Convert the continuous system to a discrete approximation. + dsys = c2d((a, b, c, d), dt, method='bilinear') + + # Use dlsim with the pairwise averaged input to compute the output + # of the discrete system. + u2 = 0.5 * (u1[:-1] + u1[1:]) + t2 = t[:-1] + td2, yd2, xd2 = dlsim(dsys, u=u2.reshape(-1, 1), t=t2, x0=x0) + + # ymid is the average of consecutive terms of the "exact" output + # computed by lsim2. This is what the discrete approximation + # actually approximates. + ymid = 0.5 * (yout[:-1] + yout[1:]) + + xp_assert_close(yd2.ravel(), ymid, rtol=1e-4) + + def test_simo_tf(self): + # See gh-5753 + tf = ([[1, 0], [1, 1]], [1, 1]) + num, den, dt = c2d(tf, 0.01) + + assert dt == 0.01 # sanity check + xp_assert_close(den, [1, -0.990404983], rtol=1e-3) + xp_assert_close(num, [[1, -1], [1, -0.99004983]], rtol=1e-3) + + def test_multioutput(self): + ts = 0.01 # time step + + tf = ([[1, -3], [1, 5]], [1, 1]) + num, den, dt = c2d(tf, ts) + + tf1 = (tf[0][0], tf[1]) + num1, den1, dt1 = c2d(tf1, ts) + + tf2 = (tf[0][1], tf[1]) + num2, den2, dt2 = c2d(tf2, ts) + + # Sanity checks + assert dt == dt1 + assert dt == dt2 + + # Check that we get the same results + xp_assert_close(num, np.vstack((num1, num2)), rtol=1e-13) + + # Single input, so the denominator should + # not be multidimensional like the numerator + xp_assert_close(den, den1, rtol=1e-13) + xp_assert_close(den, den2, rtol=1e-13) + +class TestC2dLti: + def test_c2d_ss(self): + # StateSpace + A = np.array([[-0.3, 0.1], [0.2, -0.7]]) + B = np.array([[0], [1]]) + C = np.array([[1, 0]]) + D = 0 + + A_res = np.array([[0.985136404135682, 0.004876671474795], + [0.009753342949590, 0.965629718236502]]) + B_res = np.array([[0.000122937599964], [0.049135527547844]]) + + sys_ssc = lti(A, B, C, D) + sys_ssd = sys_ssc.to_discrete(0.05) + + xp_assert_close(sys_ssd.A, A_res) + xp_assert_close(sys_ssd.B, B_res) + xp_assert_close(sys_ssd.C, C) + xp_assert_close(sys_ssd.D, np.zeros_like(sys_ssd.D)) + + def test_c2d_tf(self): + + sys = lti([0.5, 0.3], [1.0, 0.4]) + sys = sys.to_discrete(0.005) + + # Matlab results + num_res = np.array([0.5, -0.485149004980066]) + den_res = np.array([1.0, -0.980198673306755]) + + # Somehow a lot of numerical errors + xp_assert_close(sys.den, den_res, atol=0.02) + xp_assert_close(sys.num, num_res, atol=0.02) + + +class TestC2dInvariants: + # Some test cases for checking the invariances. + # Array of triplets: (system, sample time, number of samples) + cases = [ + (tf2ss([1, 1], [1, 1.5, 1]), 0.25, 10), + (tf2ss([1, 2], [1, 1.5, 3, 1]), 0.5, 10), + (tf2ss(0.1, [1, 1, 2, 1]), 0.5, 10), + ] + + # Check that systems discretized with the impulse-invariant + # method really hold the invariant + @pytest.mark.parametrize("sys,sample_time,samples_number", cases) + def test_impulse_invariant(self, sys, sample_time, samples_number): + time = np.arange(samples_number) * sample_time + _, yout_cont = impulse(sys, T=time) + _, yout_disc = dimpulse(c2d(sys, sample_time, method='impulse'), + n=len(time)) + xp_assert_close(sample_time * yout_cont.ravel(), yout_disc[0].ravel()) + + # Step invariant should hold for ZOH discretized systems + @pytest.mark.parametrize("sys,sample_time,samples_number", cases) + def test_step_invariant(self, sys, sample_time, samples_number): + time = np.arange(samples_number) * sample_time + _, yout_cont = step(sys, T=time) + _, yout_disc = dstep(c2d(sys, sample_time, method='zoh'), n=len(time)) + xp_assert_close(yout_cont.ravel(), yout_disc[0].ravel()) + + # Linear invariant should hold for FOH discretized systems + @pytest.mark.parametrize("sys,sample_time,samples_number", cases) + def test_linear_invariant(self, sys, sample_time, samples_number): + time = np.arange(samples_number) * sample_time + _, yout_cont, _ = lsim(sys, T=time, U=time) + _, yout_disc, _ = dlsim(c2d(sys, sample_time, method='foh'), u=time) + xp_assert_close(yout_cont.ravel(), yout_disc.ravel()) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_czt.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_czt.py new file mode 100644 index 0000000000000000000000000000000000000000..35087d99fec5057131f0735d43e0faa33a74ef82 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_czt.py @@ -0,0 +1,221 @@ +# This program is public domain +# Authors: Paul Kienzle, Nadav Horesh +''' +A unit test module for czt.py +''' +import pytest +from scipy._lib._array_api import xp_assert_close +from scipy.fft import fft +from scipy.signal import (czt, zoom_fft, czt_points, CZT, ZoomFFT) +import numpy as np + + +def check_czt(x): + # Check that czt is the equivalent of normal fft + y = fft(x) + y1 = czt(x) + xp_assert_close(y1, y, rtol=1e-13) + + # Check that interpolated czt is the equivalent of normal fft + y = fft(x, 100*len(x)) + y1 = czt(x, 100*len(x)) + xp_assert_close(y1, y, rtol=1e-12) + + +def check_zoom_fft(x): + # Check that zoom_fft is the equivalent of normal fft + y = fft(x) + y1 = zoom_fft(x, [0, 2-2./len(y)], endpoint=True) + xp_assert_close(y1, y, rtol=1e-11, atol=1e-14) + y1 = zoom_fft(x, [0, 2]) + xp_assert_close(y1, y, rtol=1e-11, atol=1e-14) + + # Test fn scalar + y1 = zoom_fft(x, 2-2./len(y), endpoint=True) + xp_assert_close(y1, y, rtol=1e-11, atol=1e-14) + y1 = zoom_fft(x, 2) + xp_assert_close(y1, y, rtol=1e-11, atol=1e-14) + + # Check that zoom_fft with oversampling is equivalent to zero padding + over = 10 + yover = fft(x, over*len(x)) + y2 = zoom_fft(x, [0, 2-2./len(yover)], m=len(yover), endpoint=True) + xp_assert_close(y2, yover, rtol=1e-12, atol=1e-10) + y2 = zoom_fft(x, [0, 2], m=len(yover)) + xp_assert_close(y2, yover, rtol=1e-12, atol=1e-10) + + # Check that zoom_fft works on a subrange + w = np.linspace(0, 2-2./len(x), len(x)) + f1, f2 = w[3], w[6] + y3 = zoom_fft(x, [f1, f2], m=3*over+1, endpoint=True) + idx3 = slice(3*over, 6*over+1) + xp_assert_close(y3, yover[idx3], rtol=1e-13) + + +def test_1D(): + # Test of 1D version of the transforms + + rng = np.random.RandomState(0) # Deterministic randomness + + # Random signals + lengths = rng.randint(8, 200, 20) + np.append(lengths, 1) + for length in lengths: + x = rng.random(length) + check_zoom_fft(x) + check_czt(x) + + # Gauss + t = np.linspace(-2, 2, 128) + x = np.exp(-t**2/0.01) + check_zoom_fft(x) + + # Linear + x = [1, 2, 3, 4, 5, 6, 7] + check_zoom_fft(x) + + # Check near powers of two + check_zoom_fft(range(126-31)) + check_zoom_fft(range(127-31)) + check_zoom_fft(range(128-31)) + check_zoom_fft(range(129-31)) + check_zoom_fft(range(130-31)) + + # Check transform on n-D array input + x = np.reshape(np.arange(3*2*28), (3, 2, 28)) + y1 = zoom_fft(x, [0, 2-2./28]) + y2 = zoom_fft(x[2, 0, :], [0, 2-2./28]) + xp_assert_close(y1[2, 0], y2, rtol=1e-13, atol=1e-12) + + y1 = zoom_fft(x, [0, 2], endpoint=False) + y2 = zoom_fft(x[2, 0, :], [0, 2], endpoint=False) + xp_assert_close(y1[2, 0], y2, rtol=1e-13, atol=1e-12) + + # Random (not a test condition) + x = rng.rand(101) + check_zoom_fft(x) + + # Spikes + t = np.linspace(0, 1, 128) + x = np.sin(2*np.pi*t*5)+np.sin(2*np.pi*t*13) + check_zoom_fft(x) + + # Sines + x = np.zeros(100, dtype=complex) + x[[1, 5, 21]] = 1 + check_zoom_fft(x) + + # Sines plus complex component + x += 1j*np.linspace(0, 0.5, x.shape[0]) + check_zoom_fft(x) + + +def test_large_prime_lengths(): + rng = np.random.RandomState(0) # Deterministic randomness + for N in (101, 1009, 10007): + x = rng.rand(N) + y = fft(x) + y1 = czt(x) + xp_assert_close(y, y1, rtol=1e-12) + + +@pytest.mark.slow +def test_czt_vs_fft(): + rng = np.random.RandomState(123) # Deterministic randomness + random_lengths = rng.exponential(100000, size=10).astype('int') + for n in random_lengths: + a = rng.randn(n) + xp_assert_close(czt(a), fft(a), rtol=1e-11) + + +def test_empty_input(): + with pytest.raises(ValueError, match='Invalid number of CZT'): + czt([]) + with pytest.raises(ValueError, match='Invalid number of CZT'): + zoom_fft([], 0.5) + + +def test_0_rank_input(): + with pytest.raises(IndexError, match='tuple index out of range'): + czt(5) + with pytest.raises(IndexError, match='tuple index out of range'): + zoom_fft(5, 0.5) + + +@pytest.mark.parametrize('impulse', ([0, 0, 1], [0, 0, 1, 0, 0], + np.concatenate((np.array([0, 0, 1]), + np.zeros(100))))) +@pytest.mark.parametrize('m', (1, 3, 5, 8, 101, 1021)) +@pytest.mark.parametrize('a', (1, 2, 0.5, 1.1)) +# Step that tests away from the unit circle, but not so far it explodes from +# numerical error +@pytest.mark.parametrize('w', (None, 0.98534 + 0.17055j)) +def test_czt_math(impulse, m, w, a): + # z-transform of an impulse is 1 everywhere + xp_assert_close(czt(impulse[2:], m=m, w=w, a=a), + np.ones(m, dtype=np.complex128), rtol=1e-10) + + # z-transform of a delayed impulse is z**-1 + xp_assert_close(czt(impulse[1:], m=m, w=w, a=a), + czt_points(m=m, w=w, a=a)**-1, rtol=1e-10) + + # z-transform of a 2-delayed impulse is z**-2 + xp_assert_close(czt(impulse, m=m, w=w, a=a), + czt_points(m=m, w=w, a=a)**-2, rtol=1e-10) + + +def test_int_args(): + # Integer argument `a` was producing all 0s + xp_assert_close(abs(czt([0, 1], m=10, a=2)), 0.5*np.ones(10), rtol=1e-15) + xp_assert_close(czt_points(11, w=2), + 1/(2**np.arange(11, dtype=np.complex128)), rtol=1e-30) + + +def test_czt_points(): + for N in (1, 2, 3, 8, 11, 100, 101, 10007): + xp_assert_close(czt_points(N), np.exp(2j*np.pi*np.arange(N)/N), + rtol=1e-30) + + xp_assert_close(czt_points(7, w=1), np.ones(7, dtype=np.complex128), rtol=1e-30) + xp_assert_close(czt_points(11, w=2.), + 1/(2**np.arange(11, dtype=np.complex128)), rtol=1e-30) + + func = CZT(12, m=11, w=2., a=1) + xp_assert_close(func.points(), 1/(2**np.arange(11)), rtol=1e-30) + + +@pytest.mark.parametrize('cls, args', [(CZT, (100,)), (ZoomFFT, (100, 0.2))]) +def test_CZT_size_mismatch(cls, args): + # Data size doesn't match function's expected size + myfunc = cls(*args) + with pytest.raises(ValueError, match='CZT defined for'): + myfunc(np.arange(5)) + + +def test_invalid_range(): + with pytest.raises(ValueError, match='2-length sequence'): + ZoomFFT(100, [1, 2, 3]) + + +@pytest.mark.parametrize('m', [0, -11, 5.5, 4.0]) +def test_czt_points_errors(m): + # Invalid number of points + with pytest.raises(ValueError, match='Invalid number of CZT'): + czt_points(m) + + +@pytest.mark.parametrize('size', [0, -5, 3.5, 4.0]) +def test_nonsense_size(size): + # Numpy and Scipy fft() give ValueError for 0 output size, so we do, too + with pytest.raises(ValueError, match='Invalid number of CZT'): + CZT(size, 3) + with pytest.raises(ValueError, match='Invalid number of CZT'): + ZoomFFT(size, 0.2, 3) + with pytest.raises(ValueError, match='Invalid number of CZT'): + CZT(3, size) + with pytest.raises(ValueError, match='Invalid number of CZT'): + ZoomFFT(3, 0.2, size) + with pytest.raises(ValueError, match='Invalid number of CZT'): + czt([1, 2, 3], size) + with pytest.raises(ValueError, match='Invalid number of CZT'): + zoom_fft([1, 2, 3], 0.2, size) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_dltisys.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_dltisys.py new file mode 100644 index 0000000000000000000000000000000000000000..872541543ba485f3e8a17735bee471875f92fd05 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_dltisys.py @@ -0,0 +1,599 @@ +# Author: Jeffrey Armstrong +# April 4, 2011 + +import numpy as np +from numpy.testing import suppress_warnings +from pytest import raises as assert_raises +from scipy._lib._array_api import ( + assert_array_almost_equal, assert_almost_equal, xp_assert_close, xp_assert_equal, +) + +from scipy.signal import (dlsim, dstep, dimpulse, tf2zpk, lti, dlti, + StateSpace, TransferFunction, ZerosPolesGain, + dfreqresp, dbode, BadCoefficients) + + +class TestDLTI: + + def test_dlsim(self): + + a = np.asarray([[0.9, 0.1], [-0.2, 0.9]]) + b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]]) + c = np.asarray([[0.1, 0.3]]) + d = np.asarray([[0.0, -0.1, 0.0]]) + dt = 0.5 + + # Create an input matrix with inputs down the columns (3 cols) and its + # respective time input vector + u = np.hstack((np.linspace(0, 4.0, num=5)[:, np.newaxis], + np.full((5, 1), 0.01), + np.full((5, 1), -0.002))) + t_in = np.linspace(0, 2.0, num=5) + + # Define the known result + yout_truth = np.array([[-0.001, + -0.00073, + 0.039446, + 0.0915387, + 0.13195948]]).T + xout_truth = np.asarray([[0, 0], + [0.0012, 0.0005], + [0.40233, 0.00071], + [1.163368, -0.079327], + [2.2402985, -0.3035679]]) + + tout, yout, xout = dlsim((a, b, c, d, dt), u, t_in) + + assert_array_almost_equal(yout_truth, yout) + assert_array_almost_equal(xout_truth, xout) + assert_array_almost_equal(t_in, tout) + + # Make sure input with single-dimension doesn't raise error + dlsim((1, 2, 3), 4) + + # Interpolated control - inputs should have different time steps + # than the discrete model uses internally + u_sparse = u[[0, 4], :] + t_sparse = np.asarray([0.0, 2.0]) + + tout, yout, xout = dlsim((a, b, c, d, dt), u_sparse, t_sparse) + + assert_array_almost_equal(yout_truth, yout) + assert_array_almost_equal(xout_truth, xout) + assert len(tout) == len(yout) + + # Transfer functions (assume dt = 0.5) + num = np.asarray([1.0, -0.1]) + den = np.asarray([0.3, 1.0, 0.2]) + yout_truth = np.array([[0.0, + 0.0, + 3.33333333333333, + -4.77777777777778, + 23.0370370370370]]).T + + # Assume use of the first column of the control input built earlier + tout, yout = dlsim((num, den, 0.5), u[:, 0], t_in) + + assert_array_almost_equal(yout, yout_truth) + assert_array_almost_equal(t_in, tout) + + # Retest the same with a 1-D input vector + uflat = np.asarray(u[:, 0]) + uflat = uflat.reshape((5,)) + tout, yout = dlsim((num, den, 0.5), uflat, t_in) + + assert_array_almost_equal(yout, yout_truth) + assert_array_almost_equal(t_in, tout) + + # zeros-poles-gain representation + zd = np.array([0.5, -0.5]) + pd = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) + k = 1.0 + yout_truth = np.array([[0.0, 1.0, 2.0, 2.25, 2.5]]).T + + tout, yout = dlsim((zd, pd, k, 0.5), u[:, 0], t_in) + + assert_array_almost_equal(yout, yout_truth) + assert_array_almost_equal(t_in, tout) + + # Raise an error for continuous-time systems + system = lti([1], [1, 1]) + assert_raises(AttributeError, dlsim, system, u) + + def test_dstep(self): + + a = np.asarray([[0.9, 0.1], [-0.2, 0.9]]) + b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]]) + c = np.asarray([[0.1, 0.3]]) + d = np.asarray([[0.0, -0.1, 0.0]]) + dt = 0.5 + + # Because b.shape[1] == 3, dstep should result in a tuple of three + # result vectors + yout_step_truth = (np.asarray([0.0, 0.04, 0.052, 0.0404, 0.00956, + -0.036324, -0.093318, -0.15782348, + -0.226628324, -0.2969374948]), + np.asarray([-0.1, -0.075, -0.058, -0.04815, + -0.04453, -0.0461895, -0.0521812, + -0.061588875, -0.073549579, + -0.08727047595]), + np.asarray([0.0, -0.01, -0.013, -0.0101, -0.00239, + 0.009081, 0.0233295, 0.03945587, + 0.056657081, 0.0742343737])) + + tout, yout = dstep((a, b, c, d, dt), n=10) + + assert len(yout) == 3 + + for i in range(0, len(yout)): + assert yout[i].shape[0] == 10 + assert_array_almost_equal(yout[i].flatten(), yout_step_truth[i]) + + # Check that the other two inputs (tf, zpk) will work as well + tfin = ([1.0], [1.0, 1.0], 0.5) + yout_tfstep = np.asarray([0.0, 1.0, 0.0]) + tout, yout = dstep(tfin, n=3) + assert len(yout) == 1 + assert_array_almost_equal(yout[0].flatten(), yout_tfstep) + + zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,) + tout, yout = dstep(zpkin, n=3) + assert len(yout) == 1 + assert_array_almost_equal(yout[0].flatten(), yout_tfstep) + + # Raise an error for continuous-time systems + system = lti([1], [1, 1]) + assert_raises(AttributeError, dstep, system) + + def test_dimpulse(self): + + a = np.asarray([[0.9, 0.1], [-0.2, 0.9]]) + b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]]) + c = np.asarray([[0.1, 0.3]]) + d = np.asarray([[0.0, -0.1, 0.0]]) + dt = 0.5 + + # Because b.shape[1] == 3, dimpulse should result in a tuple of three + # result vectors + yout_imp_truth = (np.asarray([0.0, 0.04, 0.012, -0.0116, -0.03084, + -0.045884, -0.056994, -0.06450548, + -0.068804844, -0.0703091708]), + np.asarray([-0.1, 0.025, 0.017, 0.00985, 0.00362, + -0.0016595, -0.0059917, -0.009407675, + -0.011960704, -0.01372089695]), + np.asarray([0.0, -0.01, -0.003, 0.0029, 0.00771, + 0.011471, 0.0142485, 0.01612637, + 0.017201211, 0.0175772927])) + + tout, yout = dimpulse((a, b, c, d, dt), n=10) + + assert len(yout) == 3 + + for i in range(0, len(yout)): + assert yout[i].shape[0] == 10 + assert_array_almost_equal(yout[i].flatten(), yout_imp_truth[i]) + + # Check that the other two inputs (tf, zpk) will work as well + tfin = ([1.0], [1.0, 1.0], 0.5) + yout_tfimpulse = np.asarray([0.0, 1.0, -1.0]) + tout, yout = dimpulse(tfin, n=3) + assert len(yout) == 1 + assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse) + + zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,) + tout, yout = dimpulse(zpkin, n=3) + assert len(yout) == 1 + assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse) + + # Raise an error for continuous-time systems + system = lti([1], [1, 1]) + assert_raises(AttributeError, dimpulse, system) + + def test_dlsim_trivial(self): + a = np.array([[0.0]]) + b = np.array([[0.0]]) + c = np.array([[0.0]]) + d = np.array([[0.0]]) + n = 5 + u = np.zeros(n).reshape(-1, 1) + tout, yout, xout = dlsim((a, b, c, d, 1), u) + xp_assert_equal(tout, np.arange(float(n))) + xp_assert_equal(yout, np.zeros((n, 1))) + xp_assert_equal(xout, np.zeros((n, 1))) + + def test_dlsim_simple1d(self): + a = np.array([[0.5]]) + b = np.array([[0.0]]) + c = np.array([[1.0]]) + d = np.array([[0.0]]) + n = 5 + u = np.zeros(n).reshape(-1, 1) + tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1) + xp_assert_equal(tout, np.arange(float(n))) + expected = (0.5 ** np.arange(float(n))).reshape(-1, 1) + xp_assert_equal(yout, expected) + xp_assert_equal(xout, expected) + + def test_dlsim_simple2d(self): + lambda1 = 0.5 + lambda2 = 0.25 + a = np.array([[lambda1, 0.0], + [0.0, lambda2]]) + b = np.array([[0.0], + [0.0]]) + c = np.array([[1.0, 0.0], + [0.0, 1.0]]) + d = np.array([[0.0], + [0.0]]) + n = 5 + u = np.zeros(n).reshape(-1, 1) + tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1) + xp_assert_equal(tout, np.arange(float(n))) + # The analytical solution: + expected = (np.array([lambda1, lambda2]) ** + np.arange(float(n)).reshape(-1, 1)) + xp_assert_equal(yout, expected) + xp_assert_equal(xout, expected) + + def test_more_step_and_impulse(self): + lambda1 = 0.5 + lambda2 = 0.75 + a = np.array([[lambda1, 0.0], + [0.0, lambda2]]) + b = np.array([[1.0, 0.0], + [0.0, 1.0]]) + c = np.array([[1.0, 1.0]]) + d = np.array([[0.0, 0.0]]) + + n = 10 + + # Check a step response. + ts, ys = dstep((a, b, c, d, 1), n=n) + + # Create the exact step response. + stp0 = (1.0 / (1 - lambda1)) * (1.0 - lambda1 ** np.arange(n)) + stp1 = (1.0 / (1 - lambda2)) * (1.0 - lambda2 ** np.arange(n)) + + xp_assert_close(ys[0][:, 0], stp0) + xp_assert_close(ys[1][:, 0], stp1) + + # Check an impulse response with an initial condition. + x0 = np.array([1.0, 1.0]) + ti, yi = dimpulse((a, b, c, d, 1), n=n, x0=x0) + + # Create the exact impulse response. + imp = (np.array([lambda1, lambda2]) ** + np.arange(-1, n + 1).reshape(-1, 1)) + imp[0, :] = 0.0 + # Analytical solution to impulse response + y0 = imp[:n, 0] + np.dot(imp[1:n + 1, :], x0) + y1 = imp[:n, 1] + np.dot(imp[1:n + 1, :], x0) + + xp_assert_close(yi[0][:, 0], y0) + xp_assert_close(yi[1][:, 0], y1) + + # Check that dt=0.1, n=3 gives 3 time values. + system = ([1.0], [1.0, -0.5], 0.1) + t, (y,) = dstep(system, n=3) + xp_assert_close(t, [0, 0.1, 0.2]) + xp_assert_equal(y.T, [[0, 1.0, 1.5]]) + t, (y,) = dimpulse(system, n=3) + xp_assert_close(t, [0, 0.1, 0.2]) + xp_assert_equal(y.T, [[0, 1, 0.5]]) + + +class TestDlti: + def test_dlti_instantiation(self): + # Test that lti can be instantiated. + + dt = 0.05 + # TransferFunction + s = dlti([1], [-1], dt=dt) + assert isinstance(s, TransferFunction) + assert isinstance(s, dlti) + assert not isinstance(s, lti) + assert s.dt == dt + + # ZerosPolesGain + s = dlti(np.array([]), np.array([-1]), 1, dt=dt) + assert isinstance(s, ZerosPolesGain) + assert isinstance(s, dlti) + assert not isinstance(s, lti) + assert s.dt == dt + + # StateSpace + s = dlti([1], [-1], 1, 3, dt=dt) + assert isinstance(s, StateSpace) + assert isinstance(s, dlti) + assert not isinstance(s, lti) + assert s.dt == dt + + # Number of inputs + assert_raises(ValueError, dlti, 1) + assert_raises(ValueError, dlti, 1, 1, 1, 1, 1) + + +class TestStateSpaceDisc: + def test_initialization(self): + # Check that all initializations work + dt = 0.05 + StateSpace(1, 1, 1, 1, dt=dt) + StateSpace([1], [2], [3], [4], dt=dt) + StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]), + np.array([[1, 0]]), np.array([[0]]), dt=dt) + StateSpace(1, 1, 1, 1, dt=True) + + def test_conversion(self): + # Check the conversion functions + s = StateSpace(1, 2, 3, 4, dt=0.05) + assert isinstance(s.to_ss(), StateSpace) + assert isinstance(s.to_tf(), TransferFunction) + assert isinstance(s.to_zpk(), ZerosPolesGain) + + # Make sure copies work + assert StateSpace(s) is not s + assert s.to_ss() is not s + + def test_properties(self): + # Test setters/getters for cross class properties. + # This implicitly tests to_tf() and to_zpk() + + # Getters + s = StateSpace(1, 1, 1, 1, dt=0.05) + xp_assert_equal(s.poles, [1.]) + xp_assert_equal(s.zeros, [0.]) + + +class TestTransferFunction: + def test_initialization(self): + # Check that all initializations work + dt = 0.05 + TransferFunction(1, 1, dt=dt) + TransferFunction([1], [2], dt=dt) + TransferFunction(np.array([1]), np.array([2]), dt=dt) + TransferFunction(1, 1, dt=True) + + def test_conversion(self): + # Check the conversion functions + s = TransferFunction([1, 0], [1, -1], dt=0.05) + assert isinstance(s.to_ss(), StateSpace) + assert isinstance(s.to_tf(), TransferFunction) + assert isinstance(s.to_zpk(), ZerosPolesGain) + + # Make sure copies work + assert TransferFunction(s) is not s + assert s.to_tf() is not s + + def test_properties(self): + # Test setters/getters for cross class properties. + # This implicitly tests to_ss() and to_zpk() + + # Getters + s = TransferFunction([1, 0], [1, -1], dt=0.05) + xp_assert_equal(s.poles, [1.]) + xp_assert_equal(s.zeros, [0.]) + + +class TestZerosPolesGain: + def test_initialization(self): + # Check that all initializations work + dt = 0.05 + ZerosPolesGain(1, 1, 1, dt=dt) + ZerosPolesGain([1], [2], 1, dt=dt) + ZerosPolesGain(np.array([1]), np.array([2]), 1, dt=dt) + ZerosPolesGain(1, 1, 1, dt=True) + + def test_conversion(self): + # Check the conversion functions + s = ZerosPolesGain(1, 2, 3, dt=0.05) + assert isinstance(s.to_ss(), StateSpace) + assert isinstance(s.to_tf(), TransferFunction) + assert isinstance(s.to_zpk(), ZerosPolesGain) + + # Make sure copies work + assert ZerosPolesGain(s) is not s + assert s.to_zpk() is not s + + +class Test_dfreqresp: + + def test_manual(self): + # Test dfreqresp() real part calculation (manual sanity check). + # 1st order low-pass filter: H(z) = 1 / (z - 0.2), + system = TransferFunction(1, [1, -0.2], dt=0.1) + w = [0.1, 1, 10] + w, H = dfreqresp(system, w=w) + + # test real + expected_re = [1.2383, 0.4130, -0.7553] + assert_almost_equal(H.real, expected_re, decimal=4) + + # test imag + expected_im = [-0.1555, -1.0214, 0.3955] + assert_almost_equal(H.imag, expected_im, decimal=4) + + def test_auto(self): + # Test dfreqresp() real part calculation. + # 1st order low-pass filter: H(z) = 1 / (z - 0.2), + system = TransferFunction(1, [1, -0.2], dt=0.1) + w = [0.1, 1, 10, 100] + w, H = dfreqresp(system, w=w) + jw = np.exp(w * 1j) + y = np.polyval(system.num, jw) / np.polyval(system.den, jw) + + # test real + expected_re = y.real + assert_almost_equal(H.real, expected_re) + + # test imag + expected_im = y.imag + assert_almost_equal(H.imag, expected_im) + + def test_freq_range(self): + # Test that freqresp() finds a reasonable frequency range. + # 1st order low-pass filter: H(z) = 1 / (z - 0.2), + # Expected range is from 0.01 to 10. + system = TransferFunction(1, [1, -0.2], dt=0.1) + n = 10 + expected_w = np.linspace(0, np.pi, 10, endpoint=False) + w, H = dfreqresp(system, n=n) + assert_almost_equal(w, expected_w) + + def test_pole_one(self): + # Test that freqresp() doesn't fail on a system with a pole at 0. + # integrator, pole at zero: H(s) = 1 / s + system = TransferFunction([1], [1, -1], dt=0.1) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, message="divide by zero") + sup.filter(RuntimeWarning, message="invalid value encountered") + w, H = dfreqresp(system, n=2) + assert w[0] == 0. # a fail would give not-a-number + + def test_error(self): + # Raise an error for continuous-time systems + system = lti([1], [1, 1]) + assert_raises(AttributeError, dfreqresp, system) + + def test_from_state_space(self): + # H(z) = 2 / z^3 - 0.5 * z^2 + + system_TF = dlti([2], [1, -0.5, 0, 0]) + + A = np.array([[0.5, 0, 0], + [1, 0, 0], + [0, 1, 0]]) + B = np.array([[1, 0, 0]]).T + C = np.array([[0, 0, 2]]) + D = 0 + + system_SS = dlti(A, B, C, D) + w = 10.0**np.arange(-3,0,.5) + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + w1, H1 = dfreqresp(system_TF, w=w) + w2, H2 = dfreqresp(system_SS, w=w) + + assert_almost_equal(H1, H2) + + def test_from_zpk(self): + # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2), + system_ZPK = dlti([],[0.2],0.3) + system_TF = dlti(0.3, [1, -0.2]) + w = [0.1, 1, 10, 100] + w1, H1 = dfreqresp(system_ZPK, w=w) + w2, H2 = dfreqresp(system_TF, w=w) + assert_almost_equal(H1, H2) + + +class Test_bode: + + def test_manual(self): + # Test bode() magnitude calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2), + dt = 0.1 + system = TransferFunction(0.3, [1, -0.2], dt=dt) + w = [0.1, 0.5, 1, np.pi] + w2, mag, phase = dbode(system, w=w) + + # Test mag + expected_mag = [-8.5329, -8.8396, -9.6162, -12.0412] + assert_almost_equal(mag, expected_mag, decimal=4) + + # Test phase + expected_phase = [-7.1575, -35.2814, -67.9809, -180.0000] + assert_almost_equal(phase, expected_phase, decimal=4) + + # Test frequency + xp_assert_equal(np.array(w) / dt, w2) + + def test_auto(self): + # Test bode() magnitude calculation. + # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2), + system = TransferFunction(0.3, [1, -0.2], dt=0.1) + w = np.array([0.1, 0.5, 1, np.pi]) + w2, mag, phase = dbode(system, w=w) + jw = np.exp(w * 1j) + y = np.polyval(system.num, jw) / np.polyval(system.den, jw) + + # Test mag + expected_mag = 20.0 * np.log10(abs(y)) + assert_almost_equal(mag, expected_mag) + + # Test phase + expected_phase = np.rad2deg(np.angle(y)) + assert_almost_equal(phase, expected_phase) + + def test_range(self): + # Test that bode() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2), + dt = 0.1 + system = TransferFunction(0.3, [1, -0.2], dt=0.1) + n = 10 + # Expected range is from 0.01 to 10. + expected_w = np.linspace(0, np.pi, n, endpoint=False) / dt + w, mag, phase = dbode(system, n=n) + assert_almost_equal(w, expected_w) + + def test_pole_one(self): + # Test that freqresp() doesn't fail on a system with a pole at 0. + # integrator, pole at zero: H(s) = 1 / s + system = TransferFunction([1], [1, -1], dt=0.1) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, message="divide by zero") + sup.filter(RuntimeWarning, message="invalid value encountered") + w, mag, phase = dbode(system, n=2) + assert w[0] == 0. # a fail would give not-a-number + + def test_imaginary(self): + # bode() should not fail on a system with pure imaginary poles. + # The test passes if bode doesn't raise an exception. + system = TransferFunction([1], [1, 0, 100], dt=0.1) + dbode(system, n=2) + + def test_error(self): + # Raise an error for continuous-time systems + system = lti([1], [1, 1]) + assert_raises(AttributeError, dbode, system) + + +class TestTransferFunctionZConversion: + """Test private conversions between 'z' and 'z**-1' polynomials.""" + + def test_full(self): + # Numerator and denominator same order + num = np.asarray([2.0, 3, 4]) + den = np.asarray([5.0, 6, 7]) + num2, den2 = TransferFunction._z_to_zinv(num, den) + xp_assert_equal(num, num2) + xp_assert_equal(den, den2) + + num2, den2 = TransferFunction._zinv_to_z(num, den) + xp_assert_equal(num, num2) + xp_assert_equal(den, den2) + + def test_numerator(self): + # Numerator lower order than denominator + num = np.asarray([2.0, 3]) + den = np.asarray([50, 6, 7]) + num2, den2 = TransferFunction._z_to_zinv(num, den) + xp_assert_equal([0.0, 2, 3], num2) + xp_assert_equal(den, den2) + + num2, den2 = TransferFunction._zinv_to_z(num, den) + xp_assert_equal([2.0, 3, 0], num2) + xp_assert_equal(den, den2) + + def test_denominator(self): + # Numerator higher order than denominator + num = np.asarray([2., 3, 4]) + den = np.asarray([5.0, 6]) + num2, den2 = TransferFunction._z_to_zinv(num, den) + xp_assert_equal(num, num2) + xp_assert_equal([0.0, 5, 6], den2) + + num2, den2 = TransferFunction._zinv_to_z(num, den) + xp_assert_equal(num, num2) + xp_assert_equal([5.0, 6, 0], den2) + diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_filter_design.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..62613b5bb64ec2980dc8a306e7a2a997b5713d2c --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_filter_design.py @@ -0,0 +1,4485 @@ +import warnings + +from scipy._lib import _pep440 +import numpy as np +from numpy.testing import ( + assert_array_almost_equal_nulp, assert_warns, suppress_warnings +) +import pytest +from pytest import raises as assert_raises +from scipy._lib._array_api import ( + xp_assert_close, xp_assert_equal, + assert_array_almost_equal, +) + +from numpy import array, spacing, sin, pi, sort, sqrt +from scipy.signal import (argrelextrema, BadCoefficients, bessel, besselap, bilinear, + buttap, butter, buttord, cheb1ap, cheb1ord, cheb2ap, + cheb2ord, cheby1, cheby2, ellip, ellipap, ellipord, + firwin, freqs_zpk, freqs, freqz, freqz_zpk, + gammatone, group_delay, iircomb, iirdesign, iirfilter, + iirnotch, iirpeak, lp2bp, lp2bs, lp2hp, lp2lp, normalize, + medfilt, order_filter, + sos2tf, sos2zpk, sosfreqz, freqz_sos, tf2sos, tf2zpk, zpk2sos, + zpk2tf, bilinear_zpk, lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, + lp2bs_zpk) +from scipy.signal._filter_design import (_cplxreal, _cplxpair, _norm_factor, + _bessel_poly, _bessel_zeros) + +try: + import mpmath +except ImportError: + mpmath = None + + +def mpmath_check(min_ver): + return pytest.mark.skipif( + mpmath is None + or _pep440.parse(mpmath.__version__) < _pep440.Version(min_ver), + reason=f"mpmath version >= {min_ver} required", + ) + + +class TestCplxPair: + + def test_trivial_input(self): + assert _cplxpair([]).size == 0 + assert _cplxpair(1) == 1 + + def test_output_order(self): + xp_assert_close(_cplxpair([1+1j, 1-1j]), [1-1j, 1+1j]) + + a = [1+1j, 1+1j, 1, 1-1j, 1-1j, 2] + b = [1-1j, 1+1j, 1-1j, 1+1j, 1, 2] + xp_assert_close(_cplxpair(a), b) + + # points spaced around the unit circle + z = np.exp(2j*pi*array([4, 3, 5, 2, 6, 1, 0])/7) + z1 = np.copy(z) + np.random.shuffle(z) + xp_assert_close(_cplxpair(z), z1) + np.random.shuffle(z) + xp_assert_close(_cplxpair(z), z1) + np.random.shuffle(z) + xp_assert_close(_cplxpair(z), z1) + + # Should be able to pair up all the conjugates + x = np.random.rand(10000) + 1j * np.random.rand(10000) + y = x.conj() + z = np.random.rand(10000) + x = np.concatenate((x, y, z)) + np.random.shuffle(x) + c = _cplxpair(x) + + # Every other element of head should be conjugates: + xp_assert_close(c[0:20000:2], np.conj(c[1:20000:2])) + # Real parts of head should be in sorted order: + xp_assert_close(c[0:20000:2].real, np.sort(c[0:20000:2].real)) + # Tail should be sorted real numbers: + xp_assert_close(c[20000:], np.sort(c[20000:])) + + def test_real_integer_input(self): + xp_assert_equal(_cplxpair([2, 0, 1]), [0, 1, 2]) + + def test_tolerances(self): + eps = spacing(1) + xp_assert_close(_cplxpair([1j, -1j, 1+1j*eps], tol=2*eps), + [-1j, 1j, 1+1j*eps]) + + # sorting close to 0 + xp_assert_close(_cplxpair([-eps+1j, +eps-1j]), [-1j, +1j]) + xp_assert_close(_cplxpair([+eps+1j, -eps-1j]), [-1j, +1j]) + xp_assert_close(_cplxpair([+1j, -1j]), [-1j, +1j]) + + def test_unmatched_conjugates(self): + # 1+2j is unmatched + assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j]) + + # 1+2j and 1-3j are unmatched + assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j, 1-3j]) + + # 1+3j is unmatched + assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+3j]) + + # Not conjugates + assert_raises(ValueError, _cplxpair, [4+5j, 4+5j]) + assert_raises(ValueError, _cplxpair, [1-7j, 1-7j]) + + # No pairs + assert_raises(ValueError, _cplxpair, [1+3j]) + assert_raises(ValueError, _cplxpair, [1-3j]) + + +class TestCplxReal: + + def test_trivial_input(self): + assert all(x.size == 0 for x in _cplxreal([])) + + x = _cplxreal(1) + assert x[0].size == 0 + xp_assert_equal(x[1], np.asarray([1])) + + + def test_output_order(self): + zc, zr = _cplxreal(np.roots(array([1, 0, 0, 1]))) + xp_assert_close(np.append(zc, zr), [1/2 + 1j*sin(pi/3), -1]) + + eps = spacing(1) + + a = [0+1j, 0-1j, eps + 1j, eps - 1j, -eps + 1j, -eps - 1j, + 1, 4, 2, 3, 0, 0, + 2+3j, 2-3j, + 1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, # sorts out of order + 3+1j, 3+1j, 3+1j, 3-1j, 3-1j, 3-1j, + 2-3j, 2+3j] + zc, zr = _cplxreal(a) + xp_assert_close(zc, [1j, 1j, 1j, 1+1j, 1+2j, 2+3j, 2+3j, 3+1j, 3+1j, + 3+1j]) + xp_assert_close(zr, [0.0, 0, 1, 2, 3, 4]) + + z = array([1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, 1+eps+3j, 1-2*eps-3j, + 0+1j, 0-1j, 2+4j, 2-4j, 2+3j, 2-3j, 3+7j, 3-7j, 4-eps+1j, + 4+eps-2j, 4-1j, 4-eps+2j]) + + zc, zr = _cplxreal(z) + xp_assert_close(zc, [1j, 1+1j, 1+2j, 1+3j, 2+3j, 2+4j, 3+7j, 4+1j, + 4+2j]) + xp_assert_equal(zr, np.asarray([])) + + def test_unmatched_conjugates(self): + # 1+2j is unmatched + assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j]) + + # 1+2j and 1-3j are unmatched + assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j, 1-3j]) + + # 1+3j is unmatched + assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+3j]) + + # No pairs + assert_raises(ValueError, _cplxreal, [1+3j]) + assert_raises(ValueError, _cplxreal, [1-3j]) + + def test_real_integer_input(self): + zc, zr = _cplxreal([2, 0, 1, 4]) + xp_assert_equal(zc, []) + xp_assert_equal(zr, [0, 1, 2, 4]) + + +class TestTf2zpk: + + @pytest.mark.parametrize('dt', (np.float64, np.complex128)) + def test_simple(self, dt): + z_r = np.array([0.5, -0.5]) + p_r = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) + # Sort the zeros/poles so that we don't fail the test if the order + # changes + z_r.sort() + p_r.sort() + b = np.poly(z_r).astype(dt) + a = np.poly(p_r).astype(dt) + + z, p, k = tf2zpk(b, a) + z.sort() + # The real part of `p` is ~0.0, so sort by imaginary part + p = p[np.argsort(p.imag)] + + assert_array_almost_equal(z, z_r) + assert_array_almost_equal(p, p_r) + assert_array_almost_equal(k, 1.) + assert k.dtype == dt + + def test_bad_filter(self): + # Regression test for #651: better handling of badly conditioned + # filter coefficients. + with suppress_warnings(): + warnings.simplefilter("error", BadCoefficients) + assert_raises(BadCoefficients, tf2zpk, [1e-15], [1.0, 1.0]) + + +class TestZpk2Tf: + + def test_identity(self): + """Test the identity transfer function.""" + z = [] + p = [] + k = 1. + b, a = zpk2tf(z, p, k) + b_r = np.array([1.]) # desired result + a_r = np.array([1.]) # desired result + # The test for the *type* of the return values is a regression + # test for ticket #1095. In the case p=[], zpk2tf used to + # return the scalar 1.0 instead of array([1.0]). + xp_assert_equal(b, b_r) + assert isinstance(b, np.ndarray) + xp_assert_equal(a, a_r) + assert isinstance(a, np.ndarray) + + +class TestSos2Zpk: + + def test_basic(self): + sos = [[1, 0, 1, 1, 0, -0.81], + [1, 0, 0, 1, 0, +0.49]] + z, p, k = sos2zpk(sos) + z2 = [1j, -1j, 0, 0] + p2 = [0.9, -0.9, 0.7j, -0.7j] + k2 = 1 + assert_array_almost_equal(sort(z), sort(z2), decimal=4) + assert_array_almost_equal(sort(p), sort(p2), decimal=4) + assert_array_almost_equal(k, k2) + + sos = [[1.00000, +0.61803, 1.0000, 1.00000, +0.60515, 0.95873], + [1.00000, -1.61803, 1.0000, 1.00000, -1.58430, 0.95873], + [1.00000, +1.00000, 0.0000, 1.00000, +0.97915, 0.00000]] + z, p, k = sos2zpk(sos) + z2 = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j, + 0.8090 - 0.5878j, -1.0000 + 0.0000j, 0] + p2 = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j, + 0.7922 - 0.5755j, -0.9791 + 0.0000j, 0] + k2 = 1 + assert_array_almost_equal(sort(z), sort(z2), decimal=4) + assert_array_almost_equal(sort(p), sort(p2), decimal=4) + + sos = array([[1, 2, 3, 1, 0.2, 0.3], + [4, 5, 6, 1, 0.4, 0.5]]) + z = array([-1 - 1.41421356237310j, -1 + 1.41421356237310j, + -0.625 - 1.05326872164704j, -0.625 + 1.05326872164704j]) + p = array([-0.2 - 0.678232998312527j, -0.2 + 0.678232998312527j, + -0.1 - 0.538516480713450j, -0.1 + 0.538516480713450j]) + k = 4 + z2, p2, k2 = sos2zpk(sos) + xp_assert_close(_cplxpair(z2), z) + xp_assert_close(_cplxpair(p2), p) + assert k2 == k + + @pytest.mark.thread_unsafe + def test_fewer_zeros(self): + """Test not the expected number of p/z (effectively at origin).""" + sos = butter(3, 0.1, output='sos') + z, p, k = sos2zpk(sos) + assert len(z) == 4 + assert len(p) == 4 + + sos = butter(12, [5., 30.], 'bandpass', fs=1200., analog=False, + output='sos') + with pytest.warns(BadCoefficients, match='Badly conditioned'): + z, p, k = sos2zpk(sos) + assert len(z) == 24 + assert len(p) == 24 + + +class TestSos2Tf: + + def test_basic(self): + sos = [[1, 1, 1, 1, 0, -1], + [-2, 3, 1, 1, 10, 1]] + b, a = sos2tf(sos) + assert_array_almost_equal(b, [-2, 1, 2, 4, 1]) + assert_array_almost_equal(a, [1, 10, 0, -10, -1]) + + +class TestTf2Sos: + + def test_basic(self): + num = [2, 16, 44, 56, 32] + den = [3, 3, -15, 18, -12] + sos = tf2sos(num, den) + sos2 = [[0.6667, 4.0000, 5.3333, 1.0000, +2.0000, -4.0000], + [1.0000, 2.0000, 2.0000, 1.0000, -1.0000, +1.0000]] + assert_array_almost_equal(sos, sos2, decimal=4) + + b = [1, -3, 11, -27, 18] + a = [16, 12, 2, -4, -1] + sos = tf2sos(b, a) + sos2 = [[0.0625, -0.1875, 0.1250, 1.0000, -0.2500, -0.1250], + [1.0000, +0.0000, 9.0000, 1.0000, +1.0000, +0.5000]] + # assert_array_almost_equal(sos, sos2, decimal=4) + + @pytest.mark.parametrize('b, a, analog, sos', + [([1], [1], False, [[1., 0., 0., 1., 0., 0.]]), + ([1], [1], True, [[0., 0., 1., 0., 0., 1.]]), + ([1], [1., 0., -1.01, 0, 0.01], False, + [[1., 0., 0., 1., 0., -0.01], + [1., 0., 0., 1., 0., -1]]), + ([1], [1., 0., -1.01, 0, 0.01], True, + [[0., 0., 1., 1., 0., -1], + [0., 0., 1., 1., 0., -0.01]])]) + def test_analog(self, b, a, analog, sos): + sos2 = tf2sos(b, a, analog=analog) + assert_array_almost_equal(sos, sos2, decimal=4) + + +class TestZpk2Sos: + + @pytest.mark.parametrize('dt', 'fdgFDG') + @pytest.mark.parametrize('pairing, analog', + [('nearest', False), + ('keep_odd', False), + ('minimal', False), + ('minimal', True)]) + def test_dtypes(self, dt, pairing, analog): + z = np.array([-1, -1]).astype(dt) + ct = dt.upper() # the poles have to be complex + p = np.array([0.57149 + 0.29360j, 0.57149 - 0.29360j]).astype(ct) + k = np.array(1).astype(dt) + sos = zpk2sos(z, p, k, pairing=pairing, analog=analog) + sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]] # octave & MATLAB + assert_array_almost_equal(sos, sos2, decimal=4) + + def test_basic(self): + for pairing in ('nearest', 'keep_odd'): + # + # Cases that match octave + # + + z = [-1, -1] + p = [0.57149 + 0.29360j, 0.57149 - 0.29360j] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]] # octave & MATLAB + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [1j, -1j] + p = [0.9, -0.9, 0.7j, -0.7j] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1, 0, 1, 1, 0, +0.49], + [1, 0, 0, 1, 0, -0.81]] # octave + # sos2 = [[0, 0, 1, 1, -0.9, 0], + # [1, 0, 1, 1, 0.9, 0]] # MATLAB + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [] + p = [0.8, -0.5+0.25j, -0.5-0.25j] + k = 1. + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1., 0., 0., 1., 1., 0.3125], + [1., 0., 0., 1., -0.8, 0.]] # octave, MATLAB fails + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [1., 1., 0.9j, -0.9j] + p = [0.99+0.01j, 0.99-0.01j, 0.1+0.9j, 0.1-0.9j] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1, 0, 0.81, 1, -0.2, 0.82], + [1, -2, 1, 1, -1.98, 0.9802]] # octave + # sos2 = [[1, -2, 1, 1, -0.2, 0.82], + # [1, 0, 0.81, 1, -1.98, 0.9802]] # MATLAB + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [0.9+0.1j, 0.9-0.1j, -0.9] + p = [0.75+0.25j, 0.75-0.25j, 0.9] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + if pairing == 'keep_odd': + sos2 = [[1, -1.8, 0.82, 1, -1.5, 0.625], + [1, 0.9, 0, 1, -0.9, 0]] # octave; MATLAB fails + assert_array_almost_equal(sos, sos2, decimal=4) + else: # pairing == 'nearest' + sos2 = [[1, 0.9, 0, 1, -1.5, 0.625], + [1, -1.8, 0.82, 1, -0.9, 0]] # our algorithm + assert_array_almost_equal(sos, sos2, decimal=4) + + # + # Cases that differ from octave: + # + + z = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j, + +0.8090 - 0.5878j, -1.0000 + 0.0000j] + p = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j, + +0.7922 - 0.5755j, -0.9791 + 0.0000j] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + # sos2 = [[1, 0.618, 1, 1, 0.6052, 0.95870], + # [1, -1.618, 1, 1, -1.5844, 0.95878], + # [1, 1, 0, 1, 0.9791, 0]] # octave, MATLAB fails + sos2 = [[1, 1, 0, 1, +0.97915, 0], + [1, 0.61803, 1, 1, +0.60515, 0.95873], + [1, -1.61803, 1, 1, -1.58430, 0.95873]] + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [-1 - 1.4142j, -1 + 1.4142j, + -0.625 - 1.0533j, -0.625 + 1.0533j] + p = [-0.2 - 0.6782j, -0.2 + 0.6782j, + -0.1 - 0.5385j, -0.1 + 0.5385j] + k = 4 + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[4, 8, 12, 1, 0.2, 0.3], + [1, 1.25, 1.5, 1, 0.4, 0.5]] # MATLAB + # sos2 = [[4, 8, 12, 1, 0.4, 0.5], + # [1, 1.25, 1.5, 1, 0.2, 0.3]] # octave + xp_assert_close(sos, sos2, rtol=1e-4, atol=1e-4) + + z = [] + p = [0.2, -0.5+0.25j, -0.5-0.25j] + k = 1. + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1., 0., 0., 1., -0.2, 0.], + [1., 0., 0., 1., 1., 0.3125]] + # sos2 = [[1., 0., 0., 1., 1., 0.3125], + # [1., 0., 0., 1., -0.2, 0]] # octave, MATLAB fails + assert_array_almost_equal(sos, sos2, decimal=4) + + # The next two examples are adapted from Leland B. Jackson, + # "Digital Filters and Signal Processing (1995) p.400: + # http://books.google.com/books?id=VZ8uabI1pNMC&lpg=PA400&ots=gRD9pi8Jua&dq=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&pg=PA400#v=onepage&q=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&f=false + + deg2rad = np.pi / 180. + k = 1. + + # first example + thetas = [22.5, 45, 77.5] + mags = [0.8, 0.6, 0.9] + z = np.array([np.exp(theta * deg2rad * 1j) for theta in thetas]) + z = np.concatenate((z, np.conj(z))) + p = np.array([mag * np.exp(theta * deg2rad * 1j) + for theta, mag in zip(thetas, mags)]) + p = np.concatenate((p, np.conj(p))) + sos = zpk2sos(z, p, k) + # sos2 = [[1, -0.43288, 1, 1, -0.38959, 0.81], # octave, + # [1, -1.41421, 1, 1, -0.84853, 0.36], # MATLAB fails + # [1, -1.84776, 1, 1, -1.47821, 0.64]] + # Note that pole-zero pairing matches, but ordering is different + sos2 = [[1, -1.41421, 1, 1, -0.84853, 0.36], + [1, -1.84776, 1, 1, -1.47821, 0.64], + [1, -0.43288, 1, 1, -0.38959, 0.81]] + assert_array_almost_equal(sos, sos2, decimal=4) + + # second example + z = np.array([np.exp(theta * deg2rad * 1j) + for theta in (85., 10.)]) + z = np.concatenate((z, np.conj(z), [1, -1])) + sos = zpk2sos(z, p, k) + + # sos2 = [[1, -0.17431, 1, 1, -0.38959, 0.81], # octave "wrong", + # [1, -1.96962, 1, 1, -0.84853, 0.36], # MATLAB fails + # [1, 0, -1, 1, -1.47821, 0.64000]] + # Our pole-zero pairing matches the text, Octave does not + sos2 = [[1, 0, -1, 1, -0.84853, 0.36], + [1, -1.96962, 1, 1, -1.47821, 0.64], + [1, -0.17431, 1, 1, -0.38959, 0.81]] + assert_array_almost_equal(sos, sos2, decimal=4) + + # these examples are taken from the doc string, and show the + # effect of the 'pairing' argument + @pytest.mark.parametrize('pairing, sos', + [('nearest', + np.array([[1., 1., 0.5, 1., -0.75, 0.], + [1., 1., 0., 1., -1.6, 0.65]])), + ('keep_odd', + np.array([[1., 1., 0, 1., -0.75, 0.], + [1., 1., 0.5, 1., -1.6, 0.65]])), + ('minimal', + np.array([[0., 1., 1., 0., 1., -0.75], + [1., 1., 0.5, 1., -1.6, 0.65]]))]) + def test_pairing(self, pairing, sos): + z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j]) + p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j]) + sos2 = zpk2sos(z1, p1, 1, pairing=pairing) + assert_array_almost_equal(sos, sos2, decimal=4) + + @pytest.mark.parametrize('p, sos_dt', + [([-1, 1, -0.1, 0.1], + [[0., 0., 1., 1., 0., -0.01], + [0., 0., 1., 1., 0., -1]]), + ([-0.7071+0.7071j, -0.7071-0.7071j, -0.1j, 0.1j], + [[0., 0., 1., 1., 0., 0.01], + [0., 0., 1., 1., 1.4142, 1.]])]) + def test_analog(self, p, sos_dt): + # test `analog` argument + # for discrete time, poles closest to unit circle should appear last + # for cont. time, poles closest to imaginary axis should appear last + sos2_dt = zpk2sos([], p, 1, pairing='minimal', analog=False) + sos2_ct = zpk2sos([], p, 1, pairing='minimal', analog=True) + assert_array_almost_equal(sos_dt, sos2_dt, decimal=4) + assert_array_almost_equal(sos_dt[::-1], sos2_ct, decimal=4) + + def test_bad_args(self): + with pytest.raises(ValueError, match=r'pairing must be one of'): + zpk2sos([1], [2], 1, pairing='no_such_pairing') + + with pytest.raises(ValueError, match=r'.*pairing must be "minimal"'): + zpk2sos([1], [2], 1, pairing='keep_odd', analog=True) + + with pytest.raises(ValueError, + match=r'.*must have len\(p\)>=len\(z\)'): + zpk2sos([1, 1], [2], 1, analog=True) + + with pytest.raises(ValueError, match=r'k must be real'): + zpk2sos([1], [2], k=1j) + + +class TestFreqs: + + def test_basic(self): + _, h = freqs([1.0], [1.0], worN=8) + assert_array_almost_equal(h, np.ones(8)) + + def test_output(self): + # 1st order low-pass filter: H(s) = 1 / (s + 1) + w = [0.1, 1, 10, 100] + num = [1] + den = [1, 1] + w, H = freqs(num, den, worN=w) + s = w * 1j + expected = 1 / (s + 1) + assert_array_almost_equal(H.real, expected.real) + assert_array_almost_equal(H.imag, expected.imag) + + def test_freq_range(self): + # Test that freqresp() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + # Expected range is from 0.01 to 10. + num = [1] + den = [1, 1] + n = 10 + expected_w = np.logspace(-2, 1, n) + w, H = freqs(num, den, worN=n) + assert_array_almost_equal(w, expected_w) + + def test_plot(self): + + def plot(w, h): + assert_array_almost_equal(h, np.ones(8)) + + assert_raises(ZeroDivisionError, freqs, [1.0], [1.0], worN=8, + plot=lambda w, h: 1 / 0) + freqs([1.0], [1.0], worN=8, plot=plot) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, h1 = freqs([1.0], [1.0]) + w2, h2 = freqs([1.0], [1.0], None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_w_or_N_types(self): + # Measure at 8 equally-spaced points + for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + w, h = freqs([1.0], [1.0], worN=N) + assert len(w) == 8 + assert_array_almost_equal(h, np.ones(8)) + + # Measure at frequency 8 rad/sec + for w in (8.0, 8.0+0j): + w_out, h = freqs([1.0], [1.0], worN=w) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + +class TestFreqs_zpk: + + def test_basic(self): + _, h = freqs_zpk([1.0], [1.0], [1.0], worN=8) + assert_array_almost_equal(h, np.ones(8)) + + def test_output(self): + # 1st order low-pass filter: H(s) = 1 / (s + 1) + w = [0.1, 1, 10, 100] + z = [] + p = [-1] + k = 1 + w, H = freqs_zpk(z, p, k, worN=w) + s = w * 1j + expected = 1 / (s + 1) + assert_array_almost_equal(H.real, expected.real) + assert_array_almost_equal(H.imag, expected.imag) + + def test_freq_range(self): + # Test that freqresp() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + # Expected range is from 0.01 to 10. + z = [] + p = [-1] + k = 1 + n = 10 + expected_w = np.logspace(-2, 1, n) + w, H = freqs_zpk(z, p, k, worN=n) + assert_array_almost_equal(w, expected_w) + + def test_vs_freqs(self): + b, a = cheby1(4, 5, 100, analog=True, output='ba') + z, p, k = cheby1(4, 5, 100, analog=True, output='zpk') + + w1, h1 = freqs(b, a) + w2, h2 = freqs_zpk(z, p, k) + xp_assert_close(w1, w2) + xp_assert_close(h1, h2, rtol=1e-6) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, h1 = freqs_zpk([1.0], [1.0], [1.0]) + w2, h2 = freqs_zpk([1.0], [1.0], [1.0], None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_w_or_N_types(self): + # Measure at 8 equally-spaced points + for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + w, h = freqs_zpk([], [], 1, worN=N) + assert len(w) == 8 + assert_array_almost_equal(h, np.ones(8)) + + # Measure at frequency 8 rad/sec + for w in (8.0, 8.0+0j): + w_out, h = freqs_zpk([], [], 1, worN=w) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + +class TestFreqz: + + def test_ticket1441(self): + """Regression test for ticket 1441.""" + # Because freqz previously used arange instead of linspace, + # when N was large, it would return one more point than + # requested. + N = 100000 + w, h = freqz([1.0], worN=N) + assert w.shape == (N,) + + def test_basic(self): + w, h = freqz([1.0], worN=8) + assert_array_almost_equal(w, np.pi * np.arange(8) / 8.) + assert_array_almost_equal(h, np.ones(8)) + w, h = freqz([1.0], worN=9) + assert_array_almost_equal(w, np.pi * np.arange(9) / 9.) + assert_array_almost_equal(h, np.ones(9)) + + for a in [1, np.ones(2)]: + w, h = freqz(np.ones(2), a, worN=0) + assert w.shape == (0,) + assert h.shape == (0,) + assert h.dtype == np.dtype('complex128') + + t = np.linspace(0, 1, 4, endpoint=False) + for b, a, h_whole in zip( + ([1., 0, 0, 0], np.sin(2 * np.pi * t)), + ([1., 0, 0, 0], [0.5, 0, 0, 0]), + ([1., 1., 1., 1.], [0, -4j, 0, 4j])): + w, h = freqz(b, a, worN=4, whole=True) + expected_w = np.linspace(0, 2 * np.pi, 4, endpoint=False) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, h_whole) + # simultaneously check int-like support + w, h = freqz(b, a, worN=np.int32(4), whole=True) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, h_whole) + w, h = freqz(b, a, worN=w, whole=True) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, h_whole) + + def test_basic_whole(self): + w, h = freqz([1.0], worN=8, whole=True) + assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8) + assert_array_almost_equal(h, np.ones(8)) + + def test_plot(self): + + def plot(w, h): + assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8) + assert_array_almost_equal(h, np.ones(8)) + + assert_raises(ZeroDivisionError, freqz, [1.0], worN=8, + plot=lambda w, h: 1 / 0) + freqz([1.0], worN=8, plot=plot) + + def test_fft_wrapping(self): + # Some simple real FIR filters + bs = list() # filters + as_ = list() + hs_whole = list() + hs_half = list() + # 3 taps + t = np.linspace(0, 1, 3, endpoint=False) + bs.append(np.sin(2 * np.pi * t)) + as_.append(3.) + hs_whole.append([0, -0.5j, 0.5j]) + hs_half.append([0, np.sqrt(1./12.), -0.5j]) + # 4 taps + t = np.linspace(0, 1, 4, endpoint=False) + bs.append(np.sin(2 * np.pi * t)) + as_.append(0.5) + hs_whole.append([0, -4j, 0, 4j]) + hs_half.append([0, np.sqrt(8), -4j, -np.sqrt(8)]) + del t + for ii, b in enumerate(bs): + # whole + a = as_[ii] + expected_w = np.linspace(0, 2 * np.pi, len(b), endpoint=False) + w, h = freqz(b, a, worN=expected_w, whole=True) # polyval + err_msg = f'b = {b}, a={a}' + assert_array_almost_equal(w, expected_w, err_msg=err_msg) + assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg) + w, h = freqz(b, a, worN=len(b), whole=True) # FFT + assert_array_almost_equal(w, expected_w, err_msg=err_msg) + assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg) + # non-whole + expected_w = np.linspace(0, np.pi, len(b), endpoint=False) + w, h = freqz(b, a, worN=expected_w, whole=False) # polyval + assert_array_almost_equal(w, expected_w, err_msg=err_msg) + assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg) + w, h = freqz(b, a, worN=len(b), whole=False) # FFT + assert_array_almost_equal(w, expected_w, err_msg=err_msg) + assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg) + + # some random FIR filters (real + complex) + # assume polyval is accurate + rng = np.random.RandomState(0) + for ii in range(2, 10): # number of taps + b = rng.randn(ii) + for kk in range(2): + a = rng.randn(1) if kk == 0 else rng.randn(3) + for jj in range(2): + if jj == 1: + b = b + rng.randn(ii) * 1j + # whole + expected_w = np.linspace(0, 2 * np.pi, ii, endpoint=False) + w, expected_h = freqz(b, a, worN=expected_w, whole=True) + assert_array_almost_equal(w, expected_w) + w, h = freqz(b, a, worN=ii, whole=True) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, expected_h) + # half + expected_w = np.linspace(0, np.pi, ii, endpoint=False) + w, expected_h = freqz(b, a, worN=expected_w, whole=False) + assert_array_almost_equal(w, expected_w) + w, h = freqz(b, a, worN=ii, whole=False) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, expected_h) + + def test_broadcasting1(self): + # Test broadcasting with worN an integer or a 1-D array, + # b and a are n-dimensional arrays. + np.random.seed(123) + b = np.random.rand(3, 5, 1) + a = np.random.rand(2, 1) + for whole in [False, True]: + # Test with worN being integers (one fast for FFT and one not), + # a 1-D array, and an empty array. + for worN in [16, 17, np.linspace(0, 1, 10), np.array([])]: + w, h = freqz(b, a, worN=worN, whole=whole) + for k in range(b.shape[1]): + bk = b[:, k, 0] + ak = a[:, 0] + ww, hh = freqz(bk, ak, worN=worN, whole=whole) + xp_assert_close(ww, w) + xp_assert_close(hh, h[k]) + + def test_broadcasting2(self): + # Test broadcasting with worN an integer or a 1-D array, + # b is an n-dimensional array, and a is left at the default value. + np.random.seed(123) + b = np.random.rand(3, 5, 1) + for whole in [False, True]: + for worN in [16, 17, np.linspace(0, 1, 10)]: + w, h = freqz(b, worN=worN, whole=whole) + for k in range(b.shape[1]): + bk = b[:, k, 0] + ww, hh = freqz(bk, worN=worN, whole=whole) + xp_assert_close(ww, w) + xp_assert_close(hh, h[k]) + + def test_broadcasting3(self): + # Test broadcasting where b.shape[-1] is the same length + # as worN, and a is left at the default value. + np.random.seed(123) + N = 16 + b = np.random.rand(3, N) + for whole in [False, True]: + for worN in [N, np.linspace(0, 1, N)]: + w, h = freqz(b, worN=worN, whole=whole) + assert w.size == N + for k in range(N): + bk = b[:, k] + ww, hh = freqz(bk, worN=w[k], whole=whole) + xp_assert_close(ww, np.asarray(w[k])[None]) + xp_assert_close(hh, np.asarray(h[k])[None]) + + def test_broadcasting4(self): + # Test broadcasting with worN a 2-D array. + np.random.seed(123) + b = np.random.rand(4, 2, 1, 1) + a = np.random.rand(5, 2, 1, 1) + for whole in [False, True]: + for worN in [np.random.rand(6, 7), np.empty((6, 0))]: + w, h = freqz(b, a, worN=worN, whole=whole) + xp_assert_close(w, worN, rtol=1e-14) + assert h.shape == (2,) + worN.shape + for k in range(2): + ww, hh = freqz(b[:, k, 0, 0], a[:, k, 0, 0], + worN=worN.ravel(), + whole=whole) + xp_assert_close(ww, worN.ravel(), rtol=1e-14) + xp_assert_close(hh, h[k, :, :].ravel()) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, h1 = freqz([1.0], 1) + w2, h2 = freqz([1.0], 1, None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_fs_param(self): + fs = 900 + b = [0.039479155677484369, 0.11843746703245311, 0.11843746703245311, + 0.039479155677484369] + a = [1.0, -1.3199152021838287, 0.80341991081938424, + -0.16767146321568049] + + # N = None, whole=False + w1, h1 = freqz(b, a, fs=fs) + w2, h2 = freqz(b, a) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs/2, 512, endpoint=False)) + + # N = None, whole=True + w1, h1 = freqz(b, a, whole=True, fs=fs) + w2, h2 = freqz(b, a, whole=True) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs, 512, endpoint=False)) + + # N = 5, whole=False + w1, h1 = freqz(b, a, 5, fs=fs) + w2, h2 = freqz(b, a, 5) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs/2, 5, endpoint=False)) + + # N = 5, whole=True + w1, h1 = freqz(b, a, 5, whole=True, fs=fs) + w2, h2 = freqz(b, a, 5, whole=True) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs, 5, endpoint=False)) + + # w is an array_like + for w in ([123], (123,), np.array([123]), (50, 123, 230), + np.array([50, 123, 230])): + w1, h1 = freqz(b, a, w, fs=fs) + w2, h2 = freqz(b, a, 2*pi*np.array(w)/fs) + xp_assert_close(h1, h2) + xp_assert_close(w, w1, check_dtype=False) + + def test_w_or_N_types(self): + # Measure at 7 (polyval) or 8 (fft) equally-spaced points + for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7), + np.array(7), + 8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + + w, h = freqz([1.0], worN=N) + assert_array_almost_equal(w, np.pi * np.arange(N) / N) + assert_array_almost_equal(h, np.ones(N)) + + w, h = freqz([1.0], worN=N, fs=100) + assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False)) + assert_array_almost_equal(h, np.ones(N)) + + # Measure at frequency 8 Hz + for w in (8.0, 8.0+0j): + # Only makes sense when fs is specified + w_out, h = freqz([1.0], worN=w, fs=100) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + def test_nyquist(self): + w, h = freqz([1.0], worN=8, include_nyquist=True) + assert_array_almost_equal(w, np.pi * np.arange(8) / 7.) + assert_array_almost_equal(h, np.ones(8)) + w, h = freqz([1.0], worN=9, include_nyquist=True) + assert_array_almost_equal(w, np.pi * np.arange(9) / 8.) + assert_array_almost_equal(h, np.ones(9)) + + for a in [1, np.ones(2)]: + w, h = freqz(np.ones(2), a, worN=0, include_nyquist=True) + assert w.shape == (0,) + assert h.shape == (0,) + assert h.dtype == np.dtype('complex128') + + w1, h1 = freqz([1.0], worN=8, whole = True, include_nyquist=True) + w2, h2 = freqz([1.0], worN=8, whole = True, include_nyquist=False) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + # https://github.com/scipy/scipy/issues/17289 + # https://github.com/scipy/scipy/issues/15273 + @pytest.mark.parametrize('whole,nyquist,worN', + [(False, False, 32), + (False, True, 32), + (True, False, 32), + (True, True, 32), + (False, False, 257), + (False, True, 257), + (True, False, 257), + (True, True, 257)]) + def test_17289(self, whole, nyquist, worN): + d = [0, 1] + w, Drfft = freqz(d, worN=32, whole=whole, include_nyquist=nyquist) + _, Dpoly = freqz(d, worN=w) + xp_assert_close(Drfft, Dpoly) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + freqz([1.0], fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none."): + freqz([1.0], fs=None) + + +class Testfreqz_sos: + + def test_freqz_sos_basic(self): + # Compare the results of freqz and freqz_sos for a low order + # Butterworth filter. + + N = 500 + + b, a = butter(4, 0.2) + sos = butter(4, 0.2, output='sos') + w, h = freqz(b, a, worN=N) + w2, h2 = freqz_sos(sos, worN=N) + xp_assert_equal(w2, w) + xp_assert_close(h2, h, rtol=1e-10, atol=1e-14) + + b, a = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass') + sos = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass', output='sos') + w, h = freqz(b, a, worN=N) + w2, h2 = freqz_sos(sos, worN=N) + xp_assert_equal(w2, w) + xp_assert_close(h2, h, rtol=1e-10, atol=1e-14) + # must have at least one section + assert_raises(ValueError, freqz_sos, sos[:0]) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + N = 500 + + sos = butter(4, 0.2, output='sos') + w1, h1 = freqz_sos(sos, worN=N) + w2, h2 = sosfreqz(sos, worN=N) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_freqz_sos_design(self): + # Compare freqz_sos output against expected values for different + # filter types + + # from cheb2ord + N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) + sos = cheby2(N, 60, Wn, 'stop', output='sos') + w, h = freqz_sos(sos) + h = np.abs(h) + w /= np.pi + xp_assert_close(20 * np.log10(h[w <= 0.1]), np.asarray(0.), atol=3.01, + check_shape=False) + xp_assert_close(20 * np.log10(h[w >= 0.6]), np.asarray(0.), atol=3.01, + check_shape=False) + xp_assert_close(h[(w >= 0.2) & (w <= 0.5)], + np.asarray(0.), atol=1e-3, + check_shape=False) # <= -60 dB + + N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 150) + sos = cheby2(N, 150, Wn, 'stop', output='sos') + w, h = freqz_sos(sos) + dB = 20*np.log10(np.abs(h)) + w /= np.pi + xp_assert_close(dB[w <= 0.1], np.asarray(0.0), atol=3.01, check_shape=False) + xp_assert_close(dB[w >= 0.6], np.asarray(0.0), atol=3.01, check_shape=False) + assert np.all(dB[(w >= 0.2) & (w <= 0.5)] < -149.9) + + # from cheb1ord + N, Wn = cheb1ord(0.2, 0.3, 3, 40) + sos = cheby1(N, 3, Wn, 'low', output='sos') + w, h = freqz_sos(sos) + h = np.abs(h) + w /= np.pi + xp_assert_close(20 * np.log10(h[w <= 0.2]), np.asarray(0.0), atol=3.01, + check_shape=False) + xp_assert_close(h[w >= 0.3], np.asarray(0.0), atol=1e-2, + check_shape=False) # <= -40 dB + + N, Wn = cheb1ord(0.2, 0.3, 1, 150) + sos = cheby1(N, 1, Wn, 'low', output='sos') + w, h = freqz_sos(sos) + dB = 20*np.log10(np.abs(h)) + w /= np.pi + xp_assert_close(dB[w <= 0.2], np.asarray(0.0), atol=1.01, + check_shape=False) + assert np.all(dB[w >= 0.3] < -149.9) + + # adapted from ellipord + N, Wn = ellipord(0.3, 0.2, 3, 60) + sos = ellip(N, 0.3, 60, Wn, 'high', output='sos') + w, h = freqz_sos(sos) + h = np.abs(h) + w /= np.pi + xp_assert_close(20 * np.log10(h[w >= 0.3]), np.asarray(0.0), atol=3.01, + check_shape=False) + xp_assert_close(h[w <= 0.1], np.asarray(0.0), atol=1.5e-3, + check_shape=False) # <= -60 dB (approx) + + # adapted from buttord + N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 40) + sos = butter(N, Wn, 'band', output='sos') + w, h = freqz_sos(sos) + h = np.abs(h) + w /= np.pi + + h014 = h[w <= 0.14] + xp_assert_close(h014, np.zeros_like(h014), atol=1e-2) # <= -40 dB + h06 = h[w >= 0.6] + xp_assert_close(h06, np.zeros_like(h06), atol=1e-2) # <= -40 dB + h0205 = 20 * np.log10(h[(w >= 0.2) & (w <= 0.5)]) + xp_assert_close(h0205, np.zeros_like(h0205), atol=3.01) + + N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 100) + sos = butter(N, Wn, 'band', output='sos') + w, h = freqz_sos(sos) + dB = 20*np.log10(np.maximum(np.abs(h), 1e-10)) + w /= np.pi + + assert np.all(dB[(w > 0) & (w <= 0.14)] < -99.9) + assert np.all(dB[w >= 0.6] < -99.9) + db0205 = dB[(w >= 0.2) & (w <= 0.5)] + xp_assert_close(db0205, np.zeros_like(db0205), atol=3.01) + + def test_freqz_sos_design_ellip(self): + N, Wn = ellipord(0.3, 0.1, 3, 60) + sos = ellip(N, 0.3, 60, Wn, 'high', output='sos') + w, h = freqz_sos(sos) + h = np.abs(h) + w /= np.pi + + h03 = 20 * np.log10(h[w >= 0.3]) + xp_assert_close(h03, np.zeros_like(h03), atol=3.01) + h01 = h[w <= 0.1] + xp_assert_close(h01, np.zeros_like(h01), atol=1.5e-3) # <= -60 dB (approx) + + N, Wn = ellipord(0.3, 0.2, .5, 150) + sos = ellip(N, .5, 150, Wn, 'high', output='sos') + w, h = freqz_sos(sos) + dB = 20*np.log10(np.maximum(np.abs(h), 1e-10)) + w /= np.pi + + db03 = dB[w >= 0.3] + xp_assert_close(db03, np.zeros_like(db03), atol=.55) + # Allow some numerical slop in the upper bound -150, so this is + # a check that dB[w <= 0.2] is less than or almost equal to -150. + assert dB[w <= 0.2].max() < -150*(1 - 1e-12) + + @mpmath_check("0.10") + def test_freqz_sos_against_mp(self): + # Compare the result of freqz_sos applied to a high order Butterworth + # filter against the result computed using mpmath. (signal.freqz fails + # miserably with such high order filters.) + from . import mpsig + N = 500 + order = 25 + Wn = 0.15 + with mpmath.workdps(80): + z_mp, p_mp, k_mp = mpsig.butter_lp(order, Wn) + w_mp, h_mp = mpsig.zpkfreqz(z_mp, p_mp, k_mp, N) + w_mp = np.array([float(x) for x in w_mp]) + h_mp = np.array([complex(x) for x in h_mp]) + + sos = butter(order, Wn, output='sos') + w, h = freqz_sos(sos, worN=N) + xp_assert_close(w, w_mp, rtol=1e-12, atol=1e-14) + xp_assert_close(h, h_mp, rtol=1e-12, atol=1e-14) + + def test_fs_param(self): + fs = 900 + sos = [[0.03934683014103762, 0.07869366028207524, 0.03934683014103762, + 1.0, -0.37256600288916636, 0.0], + [1.0, 1.0, 0.0, 1.0, -0.9495739996946778, 0.45125966317124144]] + + # N = None, whole=False + w1, h1 = freqz_sos(sos, fs=fs) + w2, h2 = freqz_sos(sos) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs/2, 512, endpoint=False)) + + # N = None, whole=True + w1, h1 = freqz_sos(sos, whole=True, fs=fs) + w2, h2 = freqz_sos(sos, whole=True) + xp_assert_close(h1, h2, atol=1e-27) + xp_assert_close(w1, np.linspace(0, fs, 512, endpoint=False)) + + # N = 5, whole=False + w1, h1 = freqz_sos(sos, 5, fs=fs) + w2, h2 = freqz_sos(sos, 5) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs/2, 5, endpoint=False)) + + # N = 5, whole=True + w1, h1 = freqz_sos(sos, 5, whole=True, fs=fs) + w2, h2 = freqz_sos(sos, 5, whole=True) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs, 5, endpoint=False)) + + # w is an array_like + for w in ([123], (123,), np.array([123]), (50, 123, 230), + np.array([50, 123, 230])): + w1, h1 = freqz_sos(sos, w, fs=fs) + w2, h2 = freqz_sos(sos, 2*pi*np.array(w)/fs) + xp_assert_close(h1, h2) + xp_assert_close(w, w1, check_dtype=False) + + def test_w_or_N_types(self): + # Measure at 7 (polyval) or 8 (fft) equally-spaced points + for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7), + np.array(7), + 8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + + w, h = freqz_sos([1, 0, 0, 1, 0, 0], worN=N) + assert_array_almost_equal(w, np.pi * np.arange(N) / N) + assert_array_almost_equal(h, np.ones(N)) + + w, h = freqz_sos([1, 0, 0, 1, 0, 0], worN=N, fs=100) + assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False)) + assert_array_almost_equal(h, np.ones(N)) + + # Measure at frequency 8 Hz + for w in (8.0, 8.0+0j): + # Only makes sense when fs is specified + w_out, h = freqz_sos([1, 0, 0, 1, 0, 0], worN=w, fs=100) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + def test_fs_validation(self): + sos = butter(4, 0.2, output='sos') + with pytest.raises(ValueError, match="Sampling.*single scalar"): + freqz_sos(sos, fs=np.array([10, 20])) + + +class TestFreqz_zpk: + + def test_ticket1441(self): + """Regression test for ticket 1441.""" + # Because freqz previously used arange instead of linspace, + # when N was large, it would return one more point than + # requested. + N = 100000 + w, h = freqz_zpk([0.5], [0.5], 1.0, worN=N) + assert w.shape == (N,) + + def test_basic(self): + w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8) + assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8) + assert_array_almost_equal(h, np.ones(8)) + + def test_basic_whole(self): + w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8, whole=True) + assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8) + assert_array_almost_equal(h, np.ones(8)) + + def test_vs_freqz(self): + b, a = cheby1(4, 5, 0.5, analog=False, output='ba') + z, p, k = cheby1(4, 5, 0.5, analog=False, output='zpk') + + w1, h1 = freqz(b, a) + w2, h2 = freqz_zpk(z, p, k) + xp_assert_close(w1, w2) + xp_assert_close(h1, h2, rtol=1e-6) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, h1 = freqz_zpk([0.5], [0.5], 1.0) + w2, h2 = freqz_zpk([0.5], [0.5], 1.0, None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_fs_param(self): + fs = 900 + z = [-1, -1, -1] + p = [0.4747869998473389+0.4752230717749344j, 0.37256600288916636, + 0.4747869998473389-0.4752230717749344j] + k = 0.03934683014103762 + + # N = None, whole=False + w1, h1 = freqz_zpk(z, p, k, whole=False, fs=fs) + w2, h2 = freqz_zpk(z, p, k, whole=False) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs/2, 512, endpoint=False)) + + # N = None, whole=True + w1, h1 = freqz_zpk(z, p, k, whole=True, fs=fs) + w2, h2 = freqz_zpk(z, p, k, whole=True) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs, 512, endpoint=False)) + + # N = 5, whole=False + w1, h1 = freqz_zpk(z, p, k, 5, fs=fs) + w2, h2 = freqz_zpk(z, p, k, 5) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs/2, 5, endpoint=False)) + + # N = 5, whole=True + w1, h1 = freqz_zpk(z, p, k, 5, whole=True, fs=fs) + w2, h2 = freqz_zpk(z, p, k, 5, whole=True) + xp_assert_close(h1, h2) + xp_assert_close(w1, np.linspace(0, fs, 5, endpoint=False)) + + # w is an array_like + for w in ([123], (123,), np.array([123]), (50, 123, 230), + np.array([50, 123, 230])): + w1, h1 = freqz_zpk(z, p, k, w, fs=fs) + w2, h2 = freqz_zpk(z, p, k, 2*pi*np.array(w)/fs) + xp_assert_close(h1, h2) + xp_assert_close(w, w1, check_dtype=False) + + def test_w_or_N_types(self): + # Measure at 8 equally-spaced points + for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + + w, h = freqz_zpk([], [], 1, worN=N) + assert_array_almost_equal(w, np.pi * np.arange(8) / 8.) + assert_array_almost_equal(h, np.ones(8)) + + w, h = freqz_zpk([], [], 1, worN=N, fs=100) + assert_array_almost_equal(w, np.linspace(0, 50, 8, endpoint=False)) + assert_array_almost_equal(h, np.ones(8)) + + # Measure at frequency 8 Hz + for w in (8.0, 8.0+0j): + # Only makes sense when fs is specified + w_out, h = freqz_zpk([], [], 1, worN=w, fs=100) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + freqz_zpk([1.0], [1.0], [1.0], fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none."): + freqz_zpk([1.0], [1.0], [1.0], fs=None) + + +class TestNormalize: + + def test_allclose(self): + """Test for false positive on allclose in normalize() in + filter_design.py""" + # Test to make sure the allclose call within signal.normalize does not + # choose false positives. Then check against a known output from MATLAB + # to make sure the fix doesn't break anything. + + # These are the coefficients returned from + # `[b,a] = cheby1(8, 0.5, 0.048)' + # in MATLAB. There are at least 15 significant figures in each + # coefficient, so it makes sense to test for errors on the order of + # 1e-13 (this can always be relaxed if different platforms have + # different rounding errors) + b_matlab = np.array([2.150733144728282e-11, 1.720586515782626e-10, + 6.022052805239190e-10, 1.204410561047838e-09, + 1.505513201309798e-09, 1.204410561047838e-09, + 6.022052805239190e-10, 1.720586515782626e-10, + 2.150733144728282e-11]) + a_matlab = np.array([1.000000000000000e+00, -7.782402035027959e+00, + 2.654354569747454e+01, -5.182182531666387e+01, + 6.334127355102684e+01, -4.963358186631157e+01, + 2.434862182949389e+01, -6.836925348604676e+00, + 8.412934944449140e-01]) + + # This is the input to signal.normalize after passing through the + # equivalent steps in signal.iirfilter as was done for MATLAB + b_norm_in = np.array([1.5543135865293012e-06, 1.2434508692234413e-05, + 4.3520780422820447e-05, 8.7041560845640893e-05, + 1.0880195105705122e-04, 8.7041560845640975e-05, + 4.3520780422820447e-05, 1.2434508692234413e-05, + 1.5543135865293012e-06]) + a_norm_in = np.array([7.2269025909127173e+04, -5.6242661430467968e+05, + 1.9182761917308895e+06, -3.7451128364682454e+06, + 4.5776121393762771e+06, -3.5869706138592605e+06, + 1.7596511818472347e+06, -4.9409793515707983e+05, + 6.0799461347219651e+04]) + + b_output, a_output = normalize(b_norm_in, a_norm_in) + + # The test on b works for decimal=14 but the one for a does not. For + # the sake of consistency, both of these are decimal=13. If something + # breaks on another platform, it is probably fine to relax this lower. + assert_array_almost_equal(b_matlab, b_output, decimal=13) + assert_array_almost_equal(a_matlab, a_output, decimal=13) + + def test_errors(self): + """Test the error cases.""" + # all zero denominator + assert_raises(ValueError, normalize, [1, 2], 0) + + # denominator not 1 dimensional + assert_raises(ValueError, normalize, [1, 2], [[1]]) + + # numerator too many dimensions + assert_raises(ValueError, normalize, [[[1, 2]]], 1) + + +class TestLp2lp: + + def test_basic(self): + b = [1] + a = [1, np.sqrt(2), 1] + b_lp, a_lp = lp2lp(b, a, 0.38574256627112119) + assert_array_almost_equal(b_lp, [0.1488], decimal=4) + assert_array_almost_equal(a_lp, [1, 0.5455, 0.1488], decimal=4) + + +class TestLp2hp: + + def test_basic(self): + b = [0.25059432325190018] + a = [1, 0.59724041654134863, 0.92834805757524175, 0.25059432325190018] + b_hp, a_hp = lp2hp(b, a, 2*np.pi*5000) + xp_assert_close(b_hp, [1.0, 0, 0, 0]) + xp_assert_close(a_hp, [1, 1.1638e5, 2.3522e9, 1.2373e14], rtol=1e-4) + + +class TestLp2bp: + + def test_basic(self): + b = [1] + a = [1, 2, 2, 1] + b_bp, a_bp = lp2bp(b, a, 2*np.pi*4000, 2*np.pi*2000) + xp_assert_close(b_bp, [1.9844e12, 0, 0, 0], rtol=1e-6) + xp_assert_close(a_bp, [1, 2.5133e4, 2.2108e9, 3.3735e13, + 1.3965e18, 1.0028e22, 2.5202e26], rtol=1e-4) + + +class TestLp2bs: + + def test_basic(self): + b = [1] + a = [1, 1] + b_bs, a_bs = lp2bs(b, a, 0.41722257286366754, 0.18460575326152251) + assert_array_almost_equal(b_bs, [1, 0, 0.17407], decimal=5) + assert_array_almost_equal(a_bs, [1, 0.18461, 0.17407], decimal=5) + + +class TestBilinear: + + def test_basic(self): + b = [0.14879732743343033] + a = [1, 0.54552236880522209, 0.14879732743343033] + b_z, a_z = bilinear(b, a, 0.5) + assert_array_almost_equal(b_z, [0.087821, 0.17564, 0.087821], + decimal=5) + assert_array_almost_equal(a_z, [1, -1.0048, 0.35606], decimal=4) + + b = [1, 0, 0.17407467530697837] + a = [1, 0.18460575326152251, 0.17407467530697837] + b_z, a_z = bilinear(b, a, 0.5) + assert_array_almost_equal(b_z, [0.86413, -1.2158, 0.86413], + decimal=4) + assert_array_almost_equal(a_z, [1, -1.2158, 0.72826], + decimal=4) + + def test_fs_validation(self): + b = [0.14879732743343033] + a = [1, 0.54552236880522209, 0.14879732743343033] + with pytest.raises(ValueError, match="Sampling.*single scalar"): + bilinear(b, a, fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none"): + bilinear(b, a, fs=None) + + +class TestLp2lp_zpk: + + def test_basic(self): + z = [] + p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)] + k = 1 + z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 5) + xp_assert_equal(z_lp, []) + xp_assert_close(sort(p_lp), sort(p)*5) + xp_assert_close(k_lp, 25.) + + # Pseudo-Chebyshev with both poles and zeros + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 20) + xp_assert_close(sort(z_lp), sort([-40j, +40j])) + xp_assert_close(sort(p_lp), sort([-15, -10-10j, -10+10j])) + xp_assert_close(k_lp, 60.) + + def test_fs_validation(self): + z = [-2j, +2j] + p = [-0.75, -0.5 - 0.5j, -0.5 + 0.5j] + k = 3 + + with pytest.raises(ValueError, match="Sampling.*single scalar"): + bilinear_zpk(z, p, k, fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none"): + bilinear_zpk(z, p, k, fs=None) + + +class TestLp2hp_zpk: + + def test_basic(self): + z = [] + p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)] + k = 1 + + z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 5) + xp_assert_equal(z_hp, np.asarray([0.0, 0.0])) + xp_assert_close(sort(p_hp), sort(p)*5) + xp_assert_close(k_hp, 1.0) + + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 6) + xp_assert_close(sort(z_hp), sort([-3j, 0, +3j])) + xp_assert_close(sort(p_hp), sort([-8, -6-6j, -6+6j])) + xp_assert_close(k_hp, 32.0) + + +class TestLp2bp_zpk: + + def test_basic(self): + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + z_bp, p_bp, k_bp = lp2bp_zpk(z, p, k, 15, 8) + xp_assert_close(sort(z_bp), sort([-25j, -9j, 0, +9j, +25j])) + xp_assert_close(sort(p_bp), sort([-3 + 6j*sqrt(6), + -3 - 6j*sqrt(6), + +2j+sqrt(-8j-225)-2, + -2j+sqrt(+8j-225)-2, + +2j-sqrt(-8j-225)-2, + -2j-sqrt(+8j-225)-2, ])) + xp_assert_close(k_bp, 24.0) + + +class TestLp2bs_zpk: + + def test_basic(self): + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + + z_bs, p_bs, k_bs = lp2bs_zpk(z, p, k, 35, 12) + + xp_assert_close(sort(z_bs), sort([+35j, -35j, + +3j+sqrt(1234)*1j, + -3j+sqrt(1234)*1j, + +3j-sqrt(1234)*1j, + -3j-sqrt(1234)*1j])) + xp_assert_close(sort(p_bs), sort([+3j*sqrt(129) - 8, + -3j*sqrt(129) - 8, + (-6 + 6j) - sqrt(-1225 - 72j), + (-6 - 6j) - sqrt(-1225 + 72j), + (-6 + 6j) + sqrt(-1225 - 72j), + (-6 - 6j) + sqrt(-1225 + 72j), ])) + xp_assert_close(k_bs, 32.0) + + +class TestBilinear_zpk: + + def test_basic(self): + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + + z_d, p_d, k_d = bilinear_zpk(z, p, k, 10) + + xp_assert_close(sort(z_d), sort([(20-2j)/(20+2j), (20+2j)/(20-2j), + -1])) + xp_assert_close(sort(p_d), sort([77/83, + (1j/2 + 39/2) / (41/2 - 1j/2), + (39/2 - 1j/2) / (1j/2 + 41/2), ])) + xp_assert_close(k_d, 9696/69803) + + +class TestPrototypeType: + + def test_output_type(self): + # Prototypes should consistently output arrays, not lists + # https://github.com/scipy/scipy/pull/441 + for func in (buttap, + besselap, + lambda N: cheb1ap(N, 1), + lambda N: cheb2ap(N, 20), + lambda N: ellipap(N, 1, 20)): + for N in range(7): + z, p, k = func(N) + assert isinstance(z, np.ndarray) + assert isinstance(p, np.ndarray) + + +def dB(x): + # Return magnitude in decibels, avoiding divide-by-zero warnings + # (and deal with some "not less-ordered" errors when -inf shows up) + return 20 * np.log10(np.maximum(np.abs(x), np.finfo(np.float64).tiny)) + + +class TestButtord: + + def test_lowpass(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + N, Wn = buttord(wp, ws, rp, rs, False) + b, a = butter(N, Wn, 'lowpass', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs) + + assert N == 16 + xp_assert_close(Wn, + 2.0002776782743284e-01, rtol=1e-15) + + def test_highpass(self): + wp = 0.3 + ws = 0.2 + rp = 3 + rs = 70 + N, Wn = buttord(wp, ws, rp, rs, False) + b, a = butter(N, Wn, 'highpass', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp < dB(h[wp <= w])) + assert np.all(dB(h[w <= ws]) < -rs) + + assert N == 18 + xp_assert_close(Wn, + 2.9996603079132672e-01, rtol=1e-15) + + def test_bandpass(self): + wp = [0.2, 0.5] + ws = [0.1, 0.6] + rp = 3 + rs = 80 + N, Wn = buttord(wp, ws, rp, rs, False) + b, a = butter(N, Wn, 'bandpass', False) + w, h = freqz(b, a) + w /= np.pi + + assert np.all((-rp - 0.1) < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + + assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < (-rs + 0.1)) + + assert N == 18 + xp_assert_close(Wn, [1.9998742411409134e-01, 5.0002139595676276e-01], + rtol=1e-15) + + def test_bandstop(self): + wp = [0.1, 0.6] + ws = [0.2, 0.5] + rp = 3 + rs = 90 + N, Wn = buttord(wp, ws, rp, rs, False) + b, a = butter(N, Wn, 'bandstop', False) + w, h = freqz(b, a) + w /= np.pi + + assert np.all(-rp < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs) + + assert N == 20 + xp_assert_close(Wn, [1.4759432329294042e-01, 5.9997365985276407e-01], + rtol=1e-6) + + def test_analog(self): + wp = 200 + ws = 600 + rp = 3 + rs = 60 + N, Wn = buttord(wp, ws, rp, rs, True) + b, a = butter(N, Wn, 'lowpass', True) + w, h = freqs(b, a) + assert np.all(-rp < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs) + + assert N == 7 + xp_assert_close(Wn, 2.0006785355671877e+02, rtol=1e-15) + + n, Wn = buttord(1, 550/450, 1, 26, analog=True) + assert n == 19 + xp_assert_close(Wn, 1.0361980524629517, rtol=1e-15) + + xp_assert_equal(buttord(1, 1.2, 1, 80, analog=True)[0], 55) + + def test_fs_param(self): + wp = [4410, 11025] + ws = [2205, 13230] + rp = 3 + rs = 80 + fs = 44100 + N, Wn = buttord(wp, ws, rp, rs, False, fs=fs) + b, a = butter(N, Wn, 'bandpass', False, fs=fs) + w, h = freqz(b, a, fs=fs) + assert np.all(-rp - 0.1 < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < -rs + 0.1) + + assert N == 18 + xp_assert_close(Wn, [4409.722701715714, 11025.47178084662], + rtol=1e-15) + + def test_invalid_input(self): + with pytest.raises(ValueError) as exc_info: + buttord([20, 50], [14, 60], 3, 2) + assert "gpass should be smaller than gstop" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + buttord([20, 50], [14, 60], -1, 2) + assert "gpass should be larger than 0.0" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + buttord([20, 50], [14, 60], 1, -2) + assert "gstop should be larger than 0.0" in str(exc_info.value) + + @pytest.mark.thread_unsafe + def test_runtime_warnings(self): + msg = "Order is zero.*|divide by zero encountered" + with pytest.warns(RuntimeWarning, match=msg): + buttord(0.0, 1.0, 3, 60) + + def test_ellip_butter(self): + # The purpose of the test is to compare to some known output from past + # scipy versions. The values to compare to are generated with scipy + # 1.9.1 (there is nothing special about this particular version though) + n, wn = buttord([0.1, 0.6], [0.2, 0.5], 3, 60) + assert n == 14 + + def test_fs_validation(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + + with pytest.raises(ValueError, match="Sampling.*single scalar"): + buttord(wp, ws, rp, rs, False, fs=np.array([10, 20])) + + +class TestCheb1ord: + + def test_lowpass(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + N, Wn = cheb1ord(wp, ws, rp, rs, False) + b, a = cheby1(N, rp, Wn, 'low', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs + 0.1) + + assert N == 8 + xp_assert_close(Wn, 0.2, rtol=1e-15) + + def test_highpass(self): + wp = 0.3 + ws = 0.2 + rp = 3 + rs = 70 + N, Wn = cheb1ord(wp, ws, rp, rs, False) + b, a = cheby1(N, rp, Wn, 'high', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[wp <= w])) + assert np.all(dB(h[w <= ws]) < -rs + 0.1) + + assert N == 9 + xp_assert_close(Wn, 0.3, rtol=1e-15) + + def test_bandpass(self): + wp = [0.2, 0.5] + ws = [0.1, 0.6] + rp = 3 + rs = 80 + N, Wn = cheb1ord(wp, ws, rp, rs, False) + b, a = cheby1(N, rp, Wn, 'band', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < -rs + 0.1) + + assert N == 9 + xp_assert_close(Wn, [0.2, 0.5], rtol=1e-15) + + def test_bandstop(self): + wp = [0.1, 0.6] + ws = [0.2, 0.5] + rp = 3 + rs = 90 + N, Wn = cheb1ord(wp, ws, rp, rs, False) + b, a = cheby1(N, rp, Wn, 'stop', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1) + + assert N == 10 + xp_assert_close(Wn, [0.14758232569947785, 0.6], rtol=1e-5) + + def test_analog(self): + wp = 700 + ws = 100 + rp = 3 + rs = 70 + N, Wn = cheb1ord(wp, ws, rp, rs, True) + b, a = cheby1(N, rp, Wn, 'high', True) + w, h = freqs(b, a) + assert np.all(-rp - 0.1 < dB(h[wp <= w])) + assert np.all(dB(h[w <= ws]) < -rs + 0.1) + + assert N == 4 + xp_assert_close(Wn, 700.0, rtol=1e-15) + + xp_assert_equal(cheb1ord(1, 1.2, 1, 80, analog=True)[0], 17) + + def test_fs_param(self): + wp = 4800 + ws = 7200 + rp = 3 + rs = 60 + fs = 48000 + N, Wn = cheb1ord(wp, ws, rp, rs, False, fs=fs) + b, a = cheby1(N, rp, Wn, 'low', False, fs=fs) + w, h = freqz(b, a, fs=fs) + assert np.all(-rp - 0.1 < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs + 0.1) + + assert N == 8 + xp_assert_close(Wn, 4800.0, rtol=1e-15) + + def test_invalid_input(self): + with pytest.raises(ValueError) as exc_info: + cheb1ord(0.2, 0.3, 3, 2) + assert "gpass should be smaller than gstop" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + cheb1ord(0.2, 0.3, -1, 2) + assert "gpass should be larger than 0.0" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + cheb1ord(0.2, 0.3, 1, -2) + assert "gstop should be larger than 0.0" in str(exc_info.value) + + def test_ellip_cheb1(self): + # The purpose of the test is to compare to some known output from past + # scipy versions. The values to compare to are generated with scipy + # 1.9.1 (there is nothing special about this particular version though) + n, wn = cheb1ord([0.1, 0.6], [0.2, 0.5], 3, 60) + assert n == 7 + + n2, w2 = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) + assert not (wn == w2).all() + + def test_fs_validation(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + + with pytest.raises(ValueError, match="Sampling.*single scalar"): + cheb1ord(wp, ws, rp, rs, False, fs=np.array([10, 20])) + + +class TestCheb2ord: + + def test_lowpass(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + N, Wn = cheb2ord(wp, ws, rp, rs, False) + b, a = cheby2(N, rs, Wn, 'lp', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs + 0.1) + + assert N == 8 + xp_assert_close(Wn, 0.28647639976553163, rtol=1e-15) + + def test_highpass(self): + wp = 0.3 + ws = 0.2 + rp = 3 + rs = 70 + N, Wn = cheb2ord(wp, ws, rp, rs, False) + b, a = cheby2(N, rs, Wn, 'hp', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[wp <= w])) + assert np.all(dB(h[w <= ws]) < -rs + 0.1) + + assert N == 9 + xp_assert_close(Wn, 0.20697492182903282, rtol=1e-15) + + def test_bandpass(self): + wp = [0.2, 0.5] + ws = [0.1, 0.6] + rp = 3 + rs = 80 + N, Wn = cheb2ord(wp, ws, rp, rs, False) + b, a = cheby2(N, rs, Wn, 'bp', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < -rs + 0.1) + + assert N == 9 + xp_assert_close(Wn, [0.14876937565923479, 0.59748447842351482], + rtol=1e-15) + + def test_bandstop(self): + wp = [0.1, 0.6] + ws = [0.2, 0.5] + rp = 3 + rs = 90 + N, Wn = cheb2ord(wp, ws, rp, rs, False) + b, a = cheby2(N, rs, Wn, 'bs', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1) + + assert N == 10 + xp_assert_close(Wn, [0.19926249974781743, 0.50125246585567362], + rtol=1e-6) + + def test_analog(self): + wp = [20, 50] + ws = [10, 60] + rp = 3 + rs = 80 + N, Wn = cheb2ord(wp, ws, rp, rs, True) + b, a = cheby2(N, rs, Wn, 'bp', True) + w, h = freqs(b, a) + assert np.all(-rp - 0.1 < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < -rs + 0.1) + + assert N == 11 + xp_assert_close(Wn, [1.673740595370124e+01, 5.974641487254268e+01], + rtol=1e-15) + + def test_fs_param(self): + wp = 150 + ws = 100 + rp = 3 + rs = 70 + fs = 1000 + N, Wn = cheb2ord(wp, ws, rp, rs, False, fs=fs) + b, a = cheby2(N, rs, Wn, 'hp', False, fs=fs) + w, h = freqz(b, a, fs=fs) + assert np.all(-rp - 0.1 < dB(h[wp <= w])) + assert np.all(dB(h[w <= ws]) < -rs + 0.1) + + assert N == 9 + xp_assert_close(Wn, 103.4874609145164, rtol=1e-15) + + def test_invalid_input(self): + with pytest.raises(ValueError) as exc_info: + cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 2) + assert "gpass should be smaller than gstop" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + cheb2ord([0.1, 0.6], [0.2, 0.5], -1, 2) + assert "gpass should be larger than 0.0" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + cheb2ord([0.1, 0.6], [0.2, 0.5], 1, -2) + assert "gstop should be larger than 0.0" in str(exc_info.value) + + def test_ellip_cheb2(self): + # The purpose of the test is to compare to some known output from past + # scipy versions. The values to compare to are generated with scipy + # 1.9.1 (there is nothing special about this particular version though) + n, wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) + assert n == 7 + + n1, w1 = cheb1ord([0.1, 0.6], [0.2, 0.5], 3, 60) + assert not (wn == w1).all() + + def test_fs_validation(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + + with pytest.raises(ValueError, match="Sampling.*single scalar"): + cheb2ord(wp, ws, rp, rs, False, fs=np.array([10, 20])) + + +class TestEllipord: + + def test_lowpass(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + N, Wn = ellipord(wp, ws, rp, rs, False) + b, a = ellip(N, rp, rs, Wn, 'lp', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs + 0.1) + + assert N == 5 + xp_assert_close(Wn, 0.2, rtol=1e-15) + + def test_lowpass_1000dB(self): + # failed when ellipkm1 wasn't used in ellipord and ellipap + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 1000 + N, Wn = ellipord(wp, ws, rp, rs, False) + sos = ellip(N, rp, rs, Wn, 'lp', False, output='sos') + w, h = freqz_sos(sos) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[w <= wp])) + assert np.all(dB(h[ws <= w]) < -rs + 0.1) + + def test_highpass(self): + wp = 0.3 + ws = 0.2 + rp = 3 + rs = 70 + N, Wn = ellipord(wp, ws, rp, rs, False) + b, a = ellip(N, rp, rs, Wn, 'hp', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[wp <= w])) + assert np.all(dB(h[w <= ws]) < -rs + 0.1) + + assert N == 6 + xp_assert_close(Wn, 0.3, rtol=1e-15) + + def test_bandpass(self): + wp = [0.2, 0.5] + ws = [0.1, 0.6] + rp = 3 + rs = 80 + N, Wn = ellipord(wp, ws, rp, rs, False) + b, a = ellip(N, rp, rs, Wn, 'bp', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < -rs + 0.1) + + assert N == 6 + xp_assert_close(Wn, [0.2, 0.5], rtol=1e-15) + + def test_bandstop(self): + wp = [0.1, 0.6] + ws = [0.2, 0.5] + rp = 3 + rs = 90 + N, Wn = ellipord(wp, ws, rp, rs, False) + b, a = ellip(N, rp, rs, Wn, 'bs', False) + w, h = freqz(b, a) + w /= np.pi + assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1) + + assert N == 7 + xp_assert_close(Wn, [0.14758232794342988, 0.6], rtol=1e-5) + + def test_analog(self): + wp = [1000, 6000] + ws = [2000, 5000] + rp = 3 + rs = 90 + N, Wn = ellipord(wp, ws, rp, rs, True) + b, a = ellip(N, rp, rs, Wn, 'bs', True) + w, h = freqs(b, a) + assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1) + + assert N == 8 + xp_assert_close(Wn, [1666.6666, 6000]) + + assert ellipord(1, 1.2, 1, 80, analog=True)[0] == 9 + + def test_fs_param(self): + wp = [400, 2400] + ws = [800, 2000] + rp = 3 + rs = 90 + fs = 8000 + N, Wn = ellipord(wp, ws, rp, rs, False, fs=fs) + b, a = ellip(N, rp, rs, Wn, 'bs', False, fs=fs) + w, h = freqz(b, a, fs=fs) + assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1) + + assert N == 7 + xp_assert_close(Wn, [590.3293117737195, 2400], rtol=1e-5) + + def test_invalid_input(self): + with pytest.raises(ValueError) as exc_info: + ellipord(0.2, 0.5, 3, 2) + assert "gpass should be smaller than gstop" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + ellipord(0.2, 0.5, -1, 2) + assert "gpass should be larger than 0.0" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + ellipord(0.2, 0.5, 1, -2) + assert "gstop should be larger than 0.0" in str(exc_info.value) + + def test_ellip_butter(self): + # The purpose of the test is to compare to some known output from past + # scipy versions. The values to compare to are generated with scipy + # 1.9.1 (there is nothing special about this particular version though) + n, wn = ellipord([0.1, 0.6], [0.2, 0.5], 3, 60) + assert n == 5 + + def test_fs_validation(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + + with pytest.raises(ValueError, match="Sampling.*single scalar"): + ellipord(wp, ws, rp, rs, False, fs=np.array([10, 20])) + + +class TestBessel: + + def test_degenerate(self): + for norm in ('delay', 'phase', 'mag'): + # 0-order filter is just a passthrough + b, a = bessel(0, 1, analog=True, norm=norm) + xp_assert_equal(b, np.asarray([1.0])) + xp_assert_equal(a, np.asarray([1.0])) + + # 1-order filter is same for all types + b, a = bessel(1, 1, analog=True, norm=norm) + xp_assert_close(b, np.asarray([1.0]), rtol=1e-15) + xp_assert_close(a, np.asarray([1.0, 1]), rtol=1e-15) + + z, p, k = bessel(1, 0.3, analog=True, output='zpk', norm=norm) + xp_assert_equal(z, np.asarray([])) + xp_assert_close(p, np.asarray([-0.3+0j]), rtol=1e-14) + xp_assert_close(k, 0.3, rtol=1e-14) + + def test_high_order(self): + # high even order, 'phase' + z, p, k = bessel(24, 100, analog=True, output='zpk') + z2 = [] + p2 = [ + -9.055312334014323e+01 + 4.844005815403969e+00j, + -8.983105162681878e+01 + 1.454056170018573e+01j, + -8.837357994162065e+01 + 2.426335240122282e+01j, + -8.615278316179575e+01 + 3.403202098404543e+01j, + -8.312326467067703e+01 + 4.386985940217900e+01j, + -7.921695461084202e+01 + 5.380628489700191e+01j, + -7.433392285433246e+01 + 6.388084216250878e+01j, + -6.832565803501586e+01 + 7.415032695116071e+01j, + -6.096221567378025e+01 + 8.470292433074425e+01j, + -5.185914574820616e+01 + 9.569048385258847e+01j, + -4.027853855197555e+01 + 1.074195196518679e+02j, + -2.433481337524861e+01 + 1.207298683731973e+02j, + ] + k2 = 9.999999999999989e+47 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) + xp_assert_close(k, k2, rtol=1e-14) + + # high odd order, 'phase' + z, p, k = bessel(23, 1000, analog=True, output='zpk') + z2 = [] + p2 = [ + -2.497697202208956e+02 + 1.202813187870698e+03j, + -4.126986617510172e+02 + 1.065328794475509e+03j, + -5.304922463809596e+02 + 9.439760364018479e+02j, + -9.027564978975828e+02 + 1.010534334242318e+02j, + -8.909283244406079e+02 + 2.023024699647598e+02j, + -8.709469394347836e+02 + 3.039581994804637e+02j, + -8.423805948131370e+02 + 4.062657947488952e+02j, + -8.045561642249877e+02 + 5.095305912401127e+02j, + -7.564660146766259e+02 + 6.141594859516342e+02j, + -6.965966033906477e+02 + 7.207341374730186e+02j, + -6.225903228776276e+02 + 8.301558302815096e+02j, + -9.066732476324988e+02] + k2 = 9.999999999999983e+68 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) + xp_assert_close(k, k2, rtol=1e-14) + + # high even order, 'delay' (Orchard 1965 "The Roots of the + # Maximally Flat-Delay Polynomials" Table 1) + z, p, k = bessel(31, 1, analog=True, output='zpk', norm='delay') + p2 = [-20.876706, + -20.826543 + 1.735732j, + -20.675502 + 3.473320j, + -20.421895 + 5.214702j, + -20.062802 + 6.961982j, + -19.593895 + 8.717546j, + -19.009148 + 10.484195j, + -18.300400 + 12.265351j, + -17.456663 + 14.065350j, + -16.463032 + 15.889910j, + -15.298849 + 17.746914j, + -13.934466 + 19.647827j, + -12.324914 + 21.610519j, + -10.395893 + 23.665701j, + - 8.005600 + 25.875019j, + - 4.792045 + 28.406037j, + ] + xp_assert_close(sorted(p, key=np.imag), + sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) + + # high odd order, 'delay' + z, p, k = bessel(30, 1, analog=True, output='zpk', norm='delay') + p2 = [-20.201029 + 0.867750j, + -20.097257 + 2.604235j, + -19.888485 + 4.343721j, + -19.572188 + 6.088363j, + -19.144380 + 7.840570j, + -18.599342 + 9.603147j, + -17.929195 + 11.379494j, + -17.123228 + 13.173901j, + -16.166808 + 14.992008j, + -15.039580 + 16.841580j, + -13.712245 + 18.733902j, + -12.140295 + 20.686563j, + -10.250119 + 22.729808j, + - 7.901170 + 24.924391j, + - 4.734679 + 27.435615j, + ] + xp_assert_close(sorted(p, key=np.imag), + sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) + + def test_refs(self): + # Compare to http://www.crbond.com/papers/bsf2.pdf + # "Delay Normalized Bessel Polynomial Coefficients" + bond_b = np.asarray([10395.0]) + bond_a = np.asarray([1.0, 21, 210, 1260, 4725, 10395, 10395]) + b, a = bessel(6, 1, norm='delay', analog=True) + xp_assert_close(b, bond_b) + xp_assert_close(a, bond_a) + + # "Delay Normalized Bessel Pole Locations" + bond_poles = { + 1: [-1.0000000000], + 2: [-1.5000000000 + 0.8660254038j], + 3: [-1.8389073227 + 1.7543809598j, -2.3221853546], + 4: [-2.1037893972 + 2.6574180419j, -2.8962106028 + 0.8672341289j], + 5: [-2.3246743032 + 3.5710229203j, -3.3519563992 + 1.7426614162j, + -3.6467385953], + 6: [-2.5159322478 + 4.4926729537j, -3.7357083563 + 2.6262723114j, + -4.2483593959 + 0.8675096732j], + 7: [-2.6856768789 + 5.4206941307j, -4.0701391636 + 3.5171740477j, + -4.7582905282 + 1.7392860611j, -4.9717868585], + 8: [-2.8389839489 + 6.3539112986j, -4.3682892172 + 4.4144425005j, + -5.2048407906 + 2.6161751526j, -5.5878860433 + 0.8676144454j], + 9: [-2.9792607982 + 7.2914636883j, -4.6384398872 + 5.3172716754j, + -5.6044218195 + 3.4981569179j, -6.1293679043 + 1.7378483835j, + -6.2970191817], + 10: [-3.1089162336 + 8.2326994591j, -4.8862195669 + 6.2249854825j, + -5.9675283286 + 4.3849471889j, -6.6152909655 + 2.6115679208j, + -6.9220449054 + 0.8676651955j] + } + + for N in range(1, 11): + p1 = np.sort(bond_poles[N]) + p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'delay')[1]))) + assert_array_almost_equal(p1, p2, decimal=10) + + # "Frequency Normalized Bessel Pole Locations" + bond_poles = { + 1: [-1.0000000000], + 2: [-1.1016013306 + 0.6360098248j], + 3: [-1.0474091610 + 0.9992644363j, -1.3226757999], + 4: [-0.9952087644 + 1.2571057395j, -1.3700678306 + 0.4102497175j], + 5: [-0.9576765486 + 1.4711243207j, -1.3808773259 + 0.7179095876j, + -1.5023162714], + 6: [-0.9306565229 + 1.6618632689j, -1.3818580976 + 0.9714718907j, + -1.5714904036 + 0.3208963742j], + 7: [-0.9098677806 + 1.8364513530j, -1.3789032168 + 1.1915667778j, + -1.6120387662 + 0.5892445069j, -1.6843681793], + 8: [-0.8928697188 + 1.9983258436j, -1.3738412176 + 1.3883565759j, + -1.6369394181 + 0.8227956251j, -1.7574084004 + 0.2728675751j], + 9: [-0.8783992762 + 2.1498005243j, -1.3675883098 + 1.5677337122j, + -1.6523964846 + 1.0313895670j, -1.8071705350 + 0.5123837306j, + -1.8566005012], + 10: [-0.8657569017 + 2.2926048310j, -1.3606922784 + 1.7335057427j, + -1.6618102414 + 1.2211002186j, -1.8421962445 + 0.7272575978j, + -1.9276196914 + 0.2416234710j] + } + + for N in range(1, 11): + p1 = np.sort(bond_poles[N]) + p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'mag')[1]))) + assert_array_almost_equal(p1, p2, decimal=10) + + # Compare to https://www.ranecommercial.com/legacy/note147.html + # "Table 1 - Bessel Crossovers of Second, Third, and Fourth-Order" + a = np.asarray([1, 1, 1/3]) + b2, a2 = bessel(2, 1, norm='delay', analog=True) + xp_assert_close(a[::-1], a2/b2) + + a = np.asarray([1, 1, 2/5, 1/15]) + b2, a2 = bessel(3, 1, norm='delay', analog=True) + xp_assert_close(a[::-1], a2/b2) + + a = np.asarray([1, 1, 9/21, 2/21, 1/105]) + b2, a2 = bessel(4, 1, norm='delay', analog=True) + xp_assert_close(a[::-1], a2/b2) + + a = np.asarray([1, np.sqrt(3), 1]) + b2, a2 = bessel(2, 1, norm='phase', analog=True) + xp_assert_close(a[::-1], a2/b2) + + # TODO: Why so inaccurate? Is reference flawed? + a = np.asarray([1, 2.481, 2.463, 1.018]) + b2, a2 = bessel(3, 1, norm='phase', analog=True) + assert_array_almost_equal(a[::-1], a2/b2, decimal=1) + + # TODO: Why so inaccurate? Is reference flawed? + a = np.asarray([1, 3.240, 4.5, 3.240, 1.050]) + b2, a2 = bessel(4, 1, norm='phase', analog=True) + assert_array_almost_equal(a[::-1], a2/b2, decimal=1) + + # Table of -3 dB factors: + N, scale = 2, np.asarray([1.272, 1.272], dtype=np.complex128) + scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1] + assert_array_almost_equal(scale2, scale, decimal=3) + + # TODO: Why so inaccurate? Is reference flawed? + N, scale = 3, np.asarray([1.413, 1.413, 1.413], dtype=np.complex128) + scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1] + assert_array_almost_equal(scale2, scale, decimal=2) + + # TODO: Why so inaccurate? Is reference flawed? + N, scale = 4, np.asarray([1.533]*4, dtype=np.complex128) + scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1] + assert_array_almost_equal(scale, scale2, decimal=1) + + def test_hardcoded(self): + # Compare to values from original hardcoded implementation + originals = { + 0: [], + 1: [-1], + 2: [-.8660254037844386467637229 + .4999999999999999999999996j], + 3: [-.9416000265332067855971980, + -.7456403858480766441810907 + .7113666249728352680992154j], + 4: [-.6572111716718829545787788 + .8301614350048733772399715j, + -.9047587967882449459642624 + .2709187330038746636700926j], + 5: [-.9264420773877602247196260, + -.8515536193688395541722677 + .4427174639443327209850002j, + -.5905759446119191779319432 + .9072067564574549539291747j], + 6: [-.9093906830472271808050953 + .1856964396793046769246397j, + -.7996541858328288520243325 + .5621717346937317988594118j, + -.5385526816693109683073792 + .9616876881954277199245657j], + 7: [-.9194871556490290014311619, + -.8800029341523374639772340 + .3216652762307739398381830j, + -.7527355434093214462291616 + .6504696305522550699212995j, + -.4966917256672316755024763 + 1.002508508454420401230220j], + 8: [-.9096831546652910216327629 + .1412437976671422927888150j, + -.8473250802359334320103023 + .4259017538272934994996429j, + -.7111381808485399250796172 + .7186517314108401705762571j, + -.4621740412532122027072175 + 1.034388681126901058116589j], + 9: [-.9154957797499037686769223, + -.8911217017079759323183848 + .2526580934582164192308115j, + -.8148021112269012975514135 + .5085815689631499483745341j, + -.6743622686854761980403401 + .7730546212691183706919682j, + -.4331415561553618854685942 + 1.060073670135929666774323j], + 10: [-.9091347320900502436826431 + .1139583137335511169927714j, + -.8688459641284764527921864 + .3430008233766309973110589j, + -.7837694413101441082655890 + .5759147538499947070009852j, + -.6417513866988316136190854 + .8175836167191017226233947j, + -.4083220732868861566219785 + 1.081274842819124562037210j], + 11: [-.9129067244518981934637318, + -.8963656705721166099815744 + .2080480375071031919692341j, + -.8453044014712962954184557 + .4178696917801248292797448j, + -.7546938934722303128102142 + .6319150050721846494520941j, + -.6126871554915194054182909 + .8547813893314764631518509j, + -.3868149510055090879155425 + 1.099117466763120928733632j], + 12: [-.9084478234140682638817772 + 95506365213450398415258360e-27j, + -.8802534342016826507901575 + .2871779503524226723615457j, + -.8217296939939077285792834 + .4810212115100676440620548j, + -.7276681615395159454547013 + .6792961178764694160048987j, + -.5866369321861477207528215 + .8863772751320727026622149j, + -.3679640085526312839425808 + 1.114373575641546257595657j], + 13: [-.9110914665984182781070663, + -.8991314665475196220910718 + .1768342956161043620980863j, + -.8625094198260548711573628 + .3547413731172988997754038j, + -.7987460692470972510394686 + .5350752120696801938272504j, + -.7026234675721275653944062 + .7199611890171304131266374j, + -.5631559842430199266325818 + .9135900338325109684927731j, + -.3512792323389821669401925 + 1.127591548317705678613239j], + 14: [-.9077932138396487614720659 + 82196399419401501888968130e-27j, + -.8869506674916445312089167 + .2470079178765333183201435j, + -.8441199160909851197897667 + .4131653825102692595237260j, + -.7766591387063623897344648 + .5819170677377608590492434j, + -.6794256425119233117869491 + .7552857305042033418417492j, + -.5418766775112297376541293 + .9373043683516919569183099j, + -.3363868224902037330610040 + 1.139172297839859991370924j], + 15: [-.9097482363849064167228581, + -.9006981694176978324932918 + .1537681197278439351298882j, + -.8731264620834984978337843 + .3082352470564267657715883j, + -.8256631452587146506294553 + .4642348752734325631275134j, + -.7556027168970728127850416 + .6229396358758267198938604j, + -.6579196593110998676999362 + .7862895503722515897065645j, + -.5224954069658330616875186 + .9581787261092526478889345j, + -.3229963059766444287113517 + 1.149416154583629539665297j], + 16: [-.9072099595087001356491337 + 72142113041117326028823950e-27j, + -.8911723070323647674780132 + .2167089659900576449410059j, + -.8584264231521330481755780 + .3621697271802065647661080j, + -.8074790293236003885306146 + .5092933751171800179676218j, + -.7356166304713115980927279 + .6591950877860393745845254j, + -.6379502514039066715773828 + .8137453537108761895522580j, + -.5047606444424766743309967 + .9767137477799090692947061j, + -.3108782755645387813283867 + 1.158552841199330479412225j], + 17: [-.9087141161336397432860029, + -.9016273850787285964692844 + .1360267995173024591237303j, + -.8801100704438627158492165 + .2725347156478803885651973j, + -.8433414495836129204455491 + .4100759282910021624185986j, + -.7897644147799708220288138 + .5493724405281088674296232j, + -.7166893842372349049842743 + .6914936286393609433305754j, + -.6193710717342144521602448 + .8382497252826992979368621j, + -.4884629337672704194973683 + .9932971956316781632345466j, + -.2998489459990082015466971 + 1.166761272925668786676672j], + 18: [-.9067004324162775554189031 + 64279241063930693839360680e-27j, + -.8939764278132455733032155 + .1930374640894758606940586j, + -.8681095503628830078317207 + .3224204925163257604931634j, + -.8281885016242836608829018 + .4529385697815916950149364j, + -.7726285030739558780127746 + .5852778162086640620016316j, + -.6987821445005273020051878 + .7204696509726630531663123j, + -.6020482668090644386627299 + .8602708961893664447167418j, + -.4734268069916151511140032 + 1.008234300314801077034158j, + -.2897592029880489845789953 + 1.174183010600059128532230j], + 19: [-.9078934217899404528985092, + -.9021937639390660668922536 + .1219568381872026517578164j, + -.8849290585034385274001112 + .2442590757549818229026280j, + -.8555768765618421591093993 + .3672925896399872304734923j, + -.8131725551578197705476160 + .4915365035562459055630005j, + -.7561260971541629355231897 + .6176483917970178919174173j, + -.6818424412912442033411634 + .7466272357947761283262338j, + -.5858613321217832644813602 + .8801817131014566284786759j, + -.4595043449730988600785456 + 1.021768776912671221830298j, + -.2804866851439370027628724 + 1.180931628453291873626003j], + 20: [-.9062570115576771146523497 + 57961780277849516990208850e-27j, + -.8959150941925768608568248 + .1740317175918705058595844j, + -.8749560316673332850673214 + .2905559296567908031706902j, + -.8427907479956670633544106 + .4078917326291934082132821j, + -.7984251191290606875799876 + .5264942388817132427317659j, + -.7402780309646768991232610 + .6469975237605228320268752j, + -.6658120544829934193890626 + .7703721701100763015154510j, + -.5707026806915714094398061 + .8982829066468255593407161j, + -.4465700698205149555701841 + 1.034097702560842962315411j, + -.2719299580251652601727704 + 1.187099379810885886139638j], + 21: [-.9072262653142957028884077, + -.9025428073192696303995083 + .1105252572789856480992275j, + -.8883808106664449854431605 + .2213069215084350419975358j, + -.8643915813643204553970169 + .3326258512522187083009453j, + -.8299435470674444100273463 + .4448177739407956609694059j, + -.7840287980408341576100581 + .5583186348022854707564856j, + -.7250839687106612822281339 + .6737426063024382240549898j, + -.6506315378609463397807996 + .7920349342629491368548074j, + -.5564766488918562465935297 + .9148198405846724121600860j, + -.4345168906815271799687308 + 1.045382255856986531461592j, + -.2640041595834031147954813 + 1.192762031948052470183960j], + 22: [-.9058702269930872551848625 + 52774908289999045189007100e-27j, + -.8972983138153530955952835 + .1584351912289865608659759j, + -.8799661455640176154025352 + .2644363039201535049656450j, + -.8534754036851687233084587 + .3710389319482319823405321j, + -.8171682088462720394344996 + .4785619492202780899653575j, + -.7700332930556816872932937 + .5874255426351153211965601j, + -.7105305456418785989070935 + .6982266265924524000098548j, + -.6362427683267827226840153 + .8118875040246347267248508j, + -.5430983056306302779658129 + .9299947824439872998916657j, + -.4232528745642628461715044 + 1.055755605227545931204656j, + -.2566376987939318038016012 + 1.197982433555213008346532j], + 23: [-.9066732476324988168207439, + -.9027564979912504609412993 + .1010534335314045013252480j, + -.8909283242471251458653994 + .2023024699381223418195228j, + -.8709469395587416239596874 + .3039581993950041588888925j, + -.8423805948021127057054288 + .4062657948237602726779246j, + -.8045561642053176205623187 + .5095305912227258268309528j, + -.7564660146829880581478138 + .6141594859476032127216463j, + -.6965966033912705387505040 + .7207341374753046970247055j, + -.6225903228771341778273152 + .8301558302812980678845563j, + -.5304922463810191698502226 + .9439760364018300083750242j, + -.4126986617510148836149955 + 1.065328794475513585531053j, + -.2497697202208956030229911 + 1.202813187870697831365338j], + 24: [-.9055312363372773709269407 + 48440066540478700874836350e-27j, + -.8983105104397872954053307 + .1454056133873610120105857j, + -.8837358034555706623131950 + .2426335234401383076544239j, + -.8615278304016353651120610 + .3403202112618624773397257j, + -.8312326466813240652679563 + .4386985933597305434577492j, + -.7921695462343492518845446 + .5380628490968016700338001j, + -.7433392285088529449175873 + .6388084216222567930378296j, + -.6832565803536521302816011 + .7415032695091650806797753j, + -.6096221567378335562589532 + .8470292433077202380020454j, + -.5185914574820317343536707 + .9569048385259054576937721j, + -.4027853855197518014786978 + 1.074195196518674765143729j, + -.2433481337524869675825448 + 1.207298683731972524975429j], + 25: [-.9062073871811708652496104, + -.9028833390228020537142561 + 93077131185102967450643820e-27j, + -.8928551459883548836774529 + .1863068969804300712287138j, + -.8759497989677857803656239 + .2798521321771408719327250j, + -.8518616886554019782346493 + .3738977875907595009446142j, + -.8201226043936880253962552 + .4686668574656966589020580j, + -.7800496278186497225905443 + .5644441210349710332887354j, + -.7306549271849967721596735 + .6616149647357748681460822j, + -.6704827128029559528610523 + .7607348858167839877987008j, + -.5972898661335557242320528 + .8626676330388028512598538j, + -.5073362861078468845461362 + .9689006305344868494672405j, + -.3934529878191079606023847 + 1.082433927173831581956863j, + -.2373280669322028974199184 + 1.211476658382565356579418j], + } + for N in originals: + p1 = sorted(np.union1d(originals[N], + np.conj(originals[N])), key=np.imag) + p2 = sorted(besselap(N)[1], key=np.imag) + xp_assert_close(p1, + p2, rtol=1e-14, check_dtype=False) + + def test_norm_phase(self): + # Test some orders and frequencies and see that they have the right + # phase at w0 + for N in (1, 2, 3, 4, 5, 51, 72): + for w0 in (1, 100): + b, a = bessel(N, w0, analog=True, norm='phase') + w = np.linspace(0, w0, 100) + w, h = freqs(b, a, w) + phase = np.unwrap(np.angle(h)) + xp_assert_close(phase[[0, -1]], (0, -N*pi/4), rtol=1e-1) + + def test_norm_mag(self): + # Test some orders and frequencies and see that they have the right + # mag at w0 + for N in (1, 2, 3, 4, 5, 51, 72): + for w0 in (1, 100): + b, a = bessel(N, w0, analog=True, norm='mag') + w = (0, w0) + w, h = freqs(b, a, w) + mag = abs(h) + xp_assert_close(mag, (1, 1/np.sqrt(2))) + + def test_norm_delay(self): + # Test some orders and frequencies and see that they have the right + # delay at DC + for N in (1, 2, 3, 4, 5, 51, 72): + for w0 in (1, 100): + b, a = bessel(N, w0, analog=True, norm='delay') + w = np.linspace(0, 10*w0, 1000) + w, h = freqs(b, a, w) + delay = -np.diff(np.unwrap(np.angle(h)))/np.diff(w) + xp_assert_close(delay[0], 1/w0, rtol=1e-4) + + def test_norm_factor(self): + mpmath_values = { + 1: 1.0, 2: 1.361654128716130520, 3: 1.755672368681210649, + 4: 2.113917674904215843, 5: 2.427410702152628137, + 6: 2.703395061202921876, 7: 2.951722147038722771, + 8: 3.179617237510651330, 9: 3.391693138911660101, + 10: 3.590980594569163482, 11: 3.779607416439620092, + 12: 3.959150821144285315, 13: 4.130825499383535980, + 14: 4.295593409533637564, 15: 4.454233021624377494, + 16: 4.607385465472647917, 17: 4.755586548961147727, + 18: 4.899289677284488007, 19: 5.038882681488207605, + 20: 5.174700441742707423, 21: 5.307034531360917274, + 22: 5.436140703250035999, 23: 5.562244783787878196, + 24: 5.685547371295963521, 25: 5.806227623775418541, + 50: 8.268963160013226298, 51: 8.352374541546012058, + } + for N in mpmath_values: + z, p, k = besselap(N, 'delay') + xp_assert_close(mpmath_values[N], _norm_factor(p, k), rtol=1e-13) + + def test_bessel_poly(self): + xp_assert_equal(_bessel_poly(5), [945, 945, 420, 105, 15, 1]) + xp_assert_equal(_bessel_poly(4, True), [1, 10, 45, 105, 105]) + + def test_bessel_zeros(self): + xp_assert_equal(_bessel_zeros(0), []) + + def test_invalid(self): + assert_raises(ValueError, besselap, 5, 'nonsense') + assert_raises(ValueError, besselap, -5) + assert_raises(ValueError, besselap, 3.2) + assert_raises(ValueError, _bessel_poly, -3) + assert_raises(ValueError, _bessel_poly, 3.3) + + @pytest.mark.fail_slow(10) + def test_fs_param(self): + for norm in ('phase', 'mag', 'delay'): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = bessel(N, fc, btype, norm=norm, fs=fs) + ba2 = bessel(N, fc/(fs/2), btype, norm=norm) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = bessel(N, fc, btype, norm=norm, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = bessel(N, fcnorm, btype, norm=norm) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + + +class TestButter: + + def test_degenerate(self): + # 0-order filter is just a passthrough + b, a = butter(0, 1, analog=True) + xp_assert_equal(b, np.asarray([1.0])) + xp_assert_equal(a, np.asarray([1.0])) + + # 1-order filter is same for all types + b, a = butter(1, 1, analog=True) + assert_array_almost_equal(b, [1]) + assert_array_almost_equal(a, [1, 1]) + + z, p, k = butter(1, 0.3, output='zpk') + xp_assert_equal(z, np.asarray([-1.0])) + xp_assert_close(p, [3.249196962329063e-01 + 0j], rtol=1e-14) + xp_assert_close(k, 3.375401518835469e-01, rtol=1e-14) + + def test_basic(self): + # analog s-plane + for N in range(25): + wn = 0.01 + z, p, k = butter(N, wn, 'low', analog=True, output='zpk') + assert_array_almost_equal([], z) + assert len(p) == N + # All poles should be at distance wn from origin + assert_array_almost_equal(abs(p), np.asarray(wn)) + assert all(np.real(p) <= 0) # No poles in right half of S-plane + assert_array_almost_equal(wn**N, k) + + # digital z-plane + for N in range(25): + wn = 0.01 + z, p, k = butter(N, wn, 'high', analog=False, output='zpk') + xp_assert_equal(np.ones(N), z) # All zeros exactly at DC + assert all(np.abs(p) <= 1) # No poles outside unit circle + + b1, a1 = butter(2, 1, analog=True) + assert_array_almost_equal(b1, [1]) + assert_array_almost_equal(a1, [1, np.sqrt(2), 1]) + + b2, a2 = butter(5, 1, analog=True) + assert_array_almost_equal(b2, [1]) + assert_array_almost_equal(a2, [1, 3.2361, 5.2361, + 5.2361, 3.2361, 1], decimal=4) + + b3, a3 = butter(10, 1, analog=True) + assert_array_almost_equal(b3, [1]) + assert_array_almost_equal(a3, [1, 6.3925, 20.4317, 42.8021, 64.8824, + 74.2334, 64.8824, 42.8021, 20.4317, + 6.3925, 1], decimal=4) + + b2, a2 = butter(19, 1.0441379169150726, analog=True) + assert_array_almost_equal(b2, [2.2720], decimal=4) + assert_array_almost_equal(a2, 1.0e+004 * np.array([ + 0.0001, 0.0013, 0.0080, 0.0335, 0.1045, 0.2570, + 0.5164, 0.8669, 1.2338, 1.5010, 1.5672, 1.4044, + 1.0759, 0.6986, 0.3791, 0.1681, 0.0588, 0.0153, + 0.0026, 0.0002]), decimal=0) + + b, a = butter(5, 0.4) + assert_array_almost_equal(b, [0.0219, 0.1097, 0.2194, + 0.2194, 0.1097, 0.0219], decimal=4) + assert_array_almost_equal(a, [1.0000, -0.9853, 0.9738, + -0.3864, 0.1112, -0.0113], decimal=4) + + def test_highpass(self): + # highpass, high even order + z, p, k = butter(28, 0.43, 'high', output='zpk') + z2 = np.ones(28) + p2 = [ + 2.068257195514592e-01 + 9.238294351481734e-01j, + 2.068257195514592e-01 - 9.238294351481734e-01j, + 1.874933103892023e-01 + 8.269455076775277e-01j, + 1.874933103892023e-01 - 8.269455076775277e-01j, + 1.717435567330153e-01 + 7.383078571194629e-01j, + 1.717435567330153e-01 - 7.383078571194629e-01j, + 1.588266870755982e-01 + 6.564623730651094e-01j, + 1.588266870755982e-01 - 6.564623730651094e-01j, + 1.481881532502603e-01 + 5.802343458081779e-01j, + 1.481881532502603e-01 - 5.802343458081779e-01j, + 1.394122576319697e-01 + 5.086609000582009e-01j, + 1.394122576319697e-01 - 5.086609000582009e-01j, + 1.321840881809715e-01 + 4.409411734716436e-01j, + 1.321840881809715e-01 - 4.409411734716436e-01j, + 1.262633413354405e-01 + 3.763990035551881e-01j, + 1.262633413354405e-01 - 3.763990035551881e-01j, + 1.214660449478046e-01 + 3.144545234797277e-01j, + 1.214660449478046e-01 - 3.144545234797277e-01j, + 1.104868766650320e-01 + 2.771505404367791e-02j, + 1.104868766650320e-01 - 2.771505404367791e-02j, + 1.111768629525075e-01 + 8.331369153155753e-02j, + 1.111768629525075e-01 - 8.331369153155753e-02j, + 1.125740630842972e-01 + 1.394219509611784e-01j, + 1.125740630842972e-01 - 1.394219509611784e-01j, + 1.147138487992747e-01 + 1.963932363793666e-01j, + 1.147138487992747e-01 - 1.963932363793666e-01j, + 1.176516491045901e-01 + 2.546021573417188e-01j, + 1.176516491045901e-01 - 2.546021573417188e-01j, + ] + k2 = 1.446671081817286e-06 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-7) + xp_assert_close(k, k2, rtol=1e-10) + + # highpass, high odd order + z, p, k = butter(27, 0.56, 'high', output='zpk') + z2 = np.ones(27) + p2 = [ + -1.772572785680147e-01 + 9.276431102995948e-01j, + -1.772572785680147e-01 - 9.276431102995948e-01j, + -1.600766565322114e-01 + 8.264026279893268e-01j, + -1.600766565322114e-01 - 8.264026279893268e-01j, + -1.461948419016121e-01 + 7.341841939120078e-01j, + -1.461948419016121e-01 - 7.341841939120078e-01j, + -1.348975284762046e-01 + 6.493235066053785e-01j, + -1.348975284762046e-01 - 6.493235066053785e-01j, + -1.256628210712206e-01 + 5.704921366889227e-01j, + -1.256628210712206e-01 - 5.704921366889227e-01j, + -1.181038235962314e-01 + 4.966120551231630e-01j, + -1.181038235962314e-01 - 4.966120551231630e-01j, + -1.119304913239356e-01 + 4.267938916403775e-01j, + -1.119304913239356e-01 - 4.267938916403775e-01j, + -1.069237739782691e-01 + 3.602914879527338e-01j, + -1.069237739782691e-01 - 3.602914879527338e-01j, + -1.029178030691416e-01 + 2.964677964142126e-01j, + -1.029178030691416e-01 - 2.964677964142126e-01j, + -9.978747500816100e-02 + 2.347687643085738e-01j, + -9.978747500816100e-02 - 2.347687643085738e-01j, + -9.743974496324025e-02 + 1.747028739092479e-01j, + -9.743974496324025e-02 - 1.747028739092479e-01j, + -9.580754551625957e-02 + 1.158246860771989e-01j, + -9.580754551625957e-02 - 1.158246860771989e-01j, + -9.484562207782568e-02 + 5.772118357151691e-02j, + -9.484562207782568e-02 - 5.772118357151691e-02j, + -9.452783117928215e-02 + ] + k2 = 9.585686688851069e-09 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-8) + xp_assert_close(k, k2) + + def test_bandpass(self): + z, p, k = butter(8, [0.25, 0.33], 'band', output='zpk') + z2 = [1, 1, 1, 1, 1, 1, 1, 1, + -1, -1, -1, -1, -1, -1, -1, -1] + p2 = [ + 4.979909925436156e-01 + 8.367609424799387e-01j, + 4.979909925436156e-01 - 8.367609424799387e-01j, + 4.913338722555539e-01 + 7.866774509868817e-01j, + 4.913338722555539e-01 - 7.866774509868817e-01j, + 5.035229361778706e-01 + 7.401147376726750e-01j, + 5.035229361778706e-01 - 7.401147376726750e-01j, + 5.307617160406101e-01 + 7.029184459442954e-01j, + 5.307617160406101e-01 - 7.029184459442954e-01j, + 5.680556159453138e-01 + 6.788228792952775e-01j, + 5.680556159453138e-01 - 6.788228792952775e-01j, + 6.100962560818854e-01 + 6.693849403338664e-01j, + 6.100962560818854e-01 - 6.693849403338664e-01j, + 6.904694312740631e-01 + 6.930501690145245e-01j, + 6.904694312740631e-01 - 6.930501690145245e-01j, + 6.521767004237027e-01 + 6.744414640183752e-01j, + 6.521767004237027e-01 - 6.744414640183752e-01j, + ] + k2 = 3.398854055800844e-08 + xp_assert_equal(z, z2, check_dtype=False) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-13) + xp_assert_close(k, k2, rtol=1e-13) + + # bandpass analog + z, p, k = butter(4, [90.5, 110.5], 'bp', analog=True, output='zpk') + z2 = np.zeros(4, dtype=z.dtype) + p2 = [ + -4.179137760733086e+00 + 1.095935899082837e+02j, + -4.179137760733086e+00 - 1.095935899082837e+02j, + -9.593598668443835e+00 + 1.034745398029734e+02j, + -9.593598668443835e+00 - 1.034745398029734e+02j, + -8.883991981781929e+00 + 9.582087115567160e+01j, + -8.883991981781929e+00 - 9.582087115567160e+01j, + -3.474530886568715e+00 + 9.111599925805801e+01j, + -3.474530886568715e+00 - 9.111599925805801e+01j, + ] + k2 = 1.600000000000001e+05 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag)) + xp_assert_close(k, k2, rtol=1e-15) + + def test_bandstop(self): + z, p, k = butter(7, [0.45, 0.56], 'stop', output='zpk') + z2 = [-1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j] + p2 = [-1.766850742887729e-01 + 9.466951258673900e-01j, + -1.766850742887729e-01 - 9.466951258673900e-01j, + 1.467897662432886e-01 + 9.515917126462422e-01j, + 1.467897662432886e-01 - 9.515917126462422e-01j, + -1.370083529426906e-01 + 8.880376681273993e-01j, + -1.370083529426906e-01 - 8.880376681273993e-01j, + 1.086774544701390e-01 + 8.915240810704319e-01j, + 1.086774544701390e-01 - 8.915240810704319e-01j, + -7.982704457700891e-02 + 8.506056315273435e-01j, + -7.982704457700891e-02 - 8.506056315273435e-01j, + 5.238812787110331e-02 + 8.524011102699969e-01j, + 5.238812787110331e-02 - 8.524011102699969e-01j, + -1.357545000491310e-02 + 8.382287744986582e-01j, + -1.357545000491310e-02 - 8.382287744986582e-01j] + k2 = 4.577122512960063e-01 + xp_assert_close(sorted(z, key=np.imag), + sorted(z2, key=np.imag)) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag)) + xp_assert_close(k, k2, rtol=1e-14) + + def test_ba_output(self): + b, a = butter(4, [100, 300], 'bandpass', analog=True) + b2 = [1.6e+09, 0, 0, 0, 0] + a2 = [1.000000000000000e+00, 5.226251859505511e+02, + 2.565685424949238e+05, 6.794127417357160e+07, + 1.519411254969542e+10, 2.038238225207147e+12, + 2.309116882454312e+14, 1.411088002066486e+16, + 8.099999999999991e+17] + xp_assert_close(b, b2, rtol=1e-14) + xp_assert_close(a, a2, rtol=1e-14) + + def test_fs_param(self): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = butter(N, fc, btype, fs=fs) + ba2 = butter(N, fc/(fs/2), btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = butter(N, fc, btype, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = butter(N, fcnorm, btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + + +class TestCheby1: + + def test_degenerate(self): + # 0-order filter is just a passthrough + # Even-order filters have DC gain of -rp dB + b, a = cheby1(0, 10*np.log10(2), 1, analog=True) + assert_array_almost_equal(b, [1/np.sqrt(2)]) + xp_assert_equal(a, np.asarray([1.0])) + + # 1-order filter is same for all types + b, a = cheby1(1, 10*np.log10(2), 1, analog=True) + assert_array_almost_equal(b, [1]) + assert_array_almost_equal(a, [1, 1]) + + z, p, k = cheby1(1, 0.1, 0.3, output='zpk') + xp_assert_equal(z, np.asarray([-1.0])) + xp_assert_close(p, [-5.390126972799615e-01 + 0j], rtol=1e-14) + xp_assert_close(k, 7.695063486399808e-01, rtol=1e-14) + + def test_basic(self): + for N in range(25): + wn = 0.01 + z, p, k = cheby1(N, 1, wn, 'low', analog=True, output='zpk') + assert_array_almost_equal([], z) + assert len(p) == N + assert all(np.real(p) <= 0) # No poles in right half of S-plane + + for N in range(25): + wn = 0.01 + z, p, k = cheby1(N, 1, wn, 'high', analog=False, output='zpk') + xp_assert_equal(np.ones(N), z) # All zeros exactly at DC + assert all(np.abs(p) <= 1) # No poles outside unit circle + + # Same test as TestNormalize + b, a = cheby1(8, 0.5, 0.048) + assert_array_almost_equal(b, [ + 2.150733144728282e-11, 1.720586515782626e-10, + 6.022052805239190e-10, 1.204410561047838e-09, + 1.505513201309798e-09, 1.204410561047838e-09, + 6.022052805239190e-10, 1.720586515782626e-10, + 2.150733144728282e-11], decimal=14) + assert_array_almost_equal(a, [ + 1.000000000000000e+00, -7.782402035027959e+00, + 2.654354569747454e+01, -5.182182531666387e+01, + 6.334127355102684e+01, -4.963358186631157e+01, + 2.434862182949389e+01, -6.836925348604676e+00, + 8.412934944449140e-01], decimal=14) + + b, a = cheby1(4, 1, [0.4, 0.7], btype='band') + assert_array_almost_equal(b, [0.0084, 0, -0.0335, 0, 0.0502, 0, + -0.0335, 0, 0.0084], decimal=4) + assert_array_almost_equal(a, [1.0, 1.1191, 2.862, 2.2986, 3.4137, + 1.8653, 1.8982, 0.5676, 0.4103], + decimal=4) + + b2, a2 = cheby1(5, 3, 1, analog=True) + assert_array_almost_equal(b2, [0.0626], decimal=4) + assert_array_almost_equal(a2, [1, 0.5745, 1.4150, 0.5489, 0.4080, + 0.0626], decimal=4) + + b, a = cheby1(8, 0.5, 0.1) + assert_array_almost_equal(b, 1.0e-006 * np.array([ + 0.00703924326028, 0.05631394608227, 0.19709881128793, + 0.39419762257586, 0.49274702821983, 0.39419762257586, + 0.19709881128793, 0.05631394608227, 0.00703924326028]), + decimal=13) + assert_array_almost_equal(a, [ + 1.00000000000000, -7.44912258934158, 24.46749067762108, + -46.27560200466141, 55.11160187999928, -42.31640010161038, + 20.45543300484147, -5.69110270561444, 0.69770374759022], + decimal=13) + + b, a = cheby1(8, 0.5, 0.25) + assert_array_almost_equal(b, 1.0e-003 * np.array([ + 0.00895261138923, 0.07162089111382, 0.25067311889837, + 0.50134623779673, 0.62668279724591, 0.50134623779673, + 0.25067311889837, 0.07162089111382, 0.00895261138923]), + decimal=13) + assert_array_almost_equal(a, [1.00000000000000, -5.97529229188545, + 16.58122329202101, -27.71423273542923, + 30.39509758355313, -22.34729670426879, + 10.74509800434910, -3.08924633697497, + 0.40707685889802], decimal=13) + + def test_highpass(self): + # high even order + z, p, k = cheby1(24, 0.7, 0.2, 'high', output='zpk') + z2 = np.ones(24) + p2 = [-6.136558509657073e-01 + 2.700091504942893e-01j, + -6.136558509657073e-01 - 2.700091504942893e-01j, + -3.303348340927516e-01 + 6.659400861114254e-01j, + -3.303348340927516e-01 - 6.659400861114254e-01j, + 8.779713780557169e-03 + 8.223108447483040e-01j, + 8.779713780557169e-03 - 8.223108447483040e-01j, + 2.742361123006911e-01 + 8.356666951611864e-01j, + 2.742361123006911e-01 - 8.356666951611864e-01j, + 4.562984557158206e-01 + 7.954276912303594e-01j, + 4.562984557158206e-01 - 7.954276912303594e-01j, + 5.777335494123628e-01 + 7.435821817961783e-01j, + 5.777335494123628e-01 - 7.435821817961783e-01j, + 6.593260977749194e-01 + 6.955390907990932e-01j, + 6.593260977749194e-01 - 6.955390907990932e-01j, + 7.149590948466562e-01 + 6.559437858502012e-01j, + 7.149590948466562e-01 - 6.559437858502012e-01j, + 7.532432388188739e-01 + 6.256158042292060e-01j, + 7.532432388188739e-01 - 6.256158042292060e-01j, + 7.794365244268271e-01 + 6.042099234813333e-01j, + 7.794365244268271e-01 - 6.042099234813333e-01j, + 7.967253874772997e-01 + 5.911966597313203e-01j, + 7.967253874772997e-01 - 5.911966597313203e-01j, + 8.069756417293870e-01 + 5.862214589217275e-01j, + 8.069756417293870e-01 - 5.862214589217275e-01j] + k2 = 6.190427617192018e-04 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-10) + xp_assert_close(k, k2, rtol=1e-10) + + # high odd order + z, p, k = cheby1(23, 0.8, 0.3, 'high', output='zpk') + z2 = np.ones(23) + p2 = [-7.676400532011010e-01, + -6.754621070166477e-01 + 3.970502605619561e-01j, + -6.754621070166477e-01 - 3.970502605619561e-01j, + -4.528880018446727e-01 + 6.844061483786332e-01j, + -4.528880018446727e-01 - 6.844061483786332e-01j, + -1.986009130216447e-01 + 8.382285942941594e-01j, + -1.986009130216447e-01 - 8.382285942941594e-01j, + 2.504673931532608e-02 + 8.958137635794080e-01j, + 2.504673931532608e-02 - 8.958137635794080e-01j, + 2.001089429976469e-01 + 9.010678290791480e-01j, + 2.001089429976469e-01 - 9.010678290791480e-01j, + 3.302410157191755e-01 + 8.835444665962544e-01j, + 3.302410157191755e-01 - 8.835444665962544e-01j, + 4.246662537333661e-01 + 8.594054226449009e-01j, + 4.246662537333661e-01 - 8.594054226449009e-01j, + 4.919620928120296e-01 + 8.366772762965786e-01j, + 4.919620928120296e-01 - 8.366772762965786e-01j, + 5.385746917494749e-01 + 8.191616180796720e-01j, + 5.385746917494749e-01 - 8.191616180796720e-01j, + 5.855636993537203e-01 + 8.060680937701062e-01j, + 5.855636993537203e-01 - 8.060680937701062e-01j, + 5.688812849391721e-01 + 8.086497795114683e-01j, + 5.688812849391721e-01 - 8.086497795114683e-01j] + k2 = 1.941697029206324e-05 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-10) + xp_assert_close(k, k2, rtol=1e-10) + + z, p, k = cheby1(10, 1, 1000, 'high', analog=True, output='zpk') + z2 = np.zeros(10) + p2 = [-3.144743169501551e+03 + 3.511680029092744e+03j, + -3.144743169501551e+03 - 3.511680029092744e+03j, + -5.633065604514602e+02 + 2.023615191183945e+03j, + -5.633065604514602e+02 - 2.023615191183945e+03j, + -1.946412183352025e+02 + 1.372309454274755e+03j, + -1.946412183352025e+02 - 1.372309454274755e+03j, + -7.987162953085479e+01 + 1.105207708045358e+03j, + -7.987162953085479e+01 - 1.105207708045358e+03j, + -2.250315039031946e+01 + 1.001723931471477e+03j, + -2.250315039031946e+01 - 1.001723931471477e+03j] + k2 = 8.912509381337453e-01 + xp_assert_equal(z, z2) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-13) + xp_assert_close(k, k2, rtol=1e-15) + + def test_bandpass(self): + z, p, k = cheby1(8, 1, [0.3, 0.4], 'bp', output='zpk') + z2 = [1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1] + p2 = [3.077784854851463e-01 + 9.453307017592942e-01j, + 3.077784854851463e-01 - 9.453307017592942e-01j, + 3.280567400654425e-01 + 9.272377218689016e-01j, + 3.280567400654425e-01 - 9.272377218689016e-01j, + 3.677912763284301e-01 + 9.038008865279966e-01j, + 3.677912763284301e-01 - 9.038008865279966e-01j, + 4.194425632520948e-01 + 8.769407159656157e-01j, + 4.194425632520948e-01 - 8.769407159656157e-01j, + 4.740921994669189e-01 + 8.496508528630974e-01j, + 4.740921994669189e-01 - 8.496508528630974e-01j, + 5.234866481897429e-01 + 8.259608422808477e-01j, + 5.234866481897429e-01 - 8.259608422808477e-01j, + 5.844717632289875e-01 + 8.052901363500210e-01j, + 5.844717632289875e-01 - 8.052901363500210e-01j, + 5.615189063336070e-01 + 8.100667803850766e-01j, + 5.615189063336070e-01 - 8.100667803850766e-01j] + k2 = 5.007028718074307e-09 + xp_assert_equal(z, z2, check_dtype=False) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-13) + xp_assert_close(k, k2, rtol=1e-13) + + def test_bandstop(self): + z, p, k = cheby1(7, 1, [0.5, 0.6], 'stop', output='zpk') + z2 = [-1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j] + p2 = [-8.942974551472813e-02 + 3.482480481185926e-01j, + -8.942974551472813e-02 - 3.482480481185926e-01j, + 1.293775154041798e-01 + 8.753499858081858e-01j, + 1.293775154041798e-01 - 8.753499858081858e-01j, + 3.399741945062013e-02 + 9.690316022705607e-01j, + 3.399741945062013e-02 - 9.690316022705607e-01j, + 4.167225522796539e-04 + 9.927338161087488e-01j, + 4.167225522796539e-04 - 9.927338161087488e-01j, + -3.912966549550960e-01 + 8.046122859255742e-01j, + -3.912966549550960e-01 - 8.046122859255742e-01j, + -3.307805547127368e-01 + 9.133455018206508e-01j, + -3.307805547127368e-01 - 9.133455018206508e-01j, + -3.072658345097743e-01 + 9.443589759799366e-01j, + -3.072658345097743e-01 - 9.443589759799366e-01j] + k2 = 3.619438310405028e-01 + xp_assert_close(sorted(z, key=np.imag), + sorted(z2, key=np.imag), rtol=1e-13) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-13) + xp_assert_close(k, k2, rtol=0, atol=5e-16) + + def test_ba_output(self): + # with transfer function conversion, without digital conversion + b, a = cheby1(5, 0.9, [210, 310], 'stop', analog=True) + b2 = [1.000000000000006e+00, 0, + 3.255000000000020e+05, 0, + 4.238010000000026e+10, 0, + 2.758944510000017e+15, 0, + 8.980364380050052e+19, 0, + 1.169243442282517e+24 + ] + a2 = [1.000000000000000e+00, 4.630555945694342e+02, + 4.039266454794788e+05, 1.338060988610237e+08, + 5.844333551294591e+10, 1.357346371637638e+13, + 3.804661141892782e+15, 5.670715850340080e+17, + 1.114411200988328e+20, 8.316815934908471e+21, + 1.169243442282517e+24 + ] + xp_assert_close(b, b2, rtol=1e-14) + xp_assert_close(a, a2, rtol=1e-14) + + def test_fs_param(self): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = cheby1(N, 1, fc, btype, fs=fs) + ba2 = cheby1(N, 1, fc/(fs/2), btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = cheby1(N, 1, fc, btype, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = cheby1(N, 1, fcnorm, btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + +class TestCheby2: + + def test_degenerate(self): + # 0-order filter is just a passthrough + # Stopband ripple factor doesn't matter + b, a = cheby2(0, 123.456, 1, analog=True) + xp_assert_equal(b, np.asarray([1.0])) + xp_assert_equal(a, np.asarray([1.0])) + + # 1-order filter is same for all types + b, a = cheby2(1, 10*np.log10(2), 1, analog=True) + assert_array_almost_equal(b, [1]) + assert_array_almost_equal(a, [1, 1]) + + z, p, k = cheby2(1, 50, 0.3, output='zpk') + xp_assert_equal(z, np.asarray([-1], dtype=np.complex128)) + xp_assert_close(p, [9.967826460175649e-01 + 0j], rtol=1e-14) + xp_assert_close(k, 1.608676991217512e-03, rtol=1e-14) + + def test_basic(self): + for N in range(25): + wn = 0.01 + z, p, k = cheby2(N, 40, wn, 'low', analog=True, output='zpk') + assert len(p) == N + assert all(np.real(p) <= 0) # No poles in right half of S-plane + + for N in range(25): + wn = 0.01 + z, p, k = cheby2(N, 40, wn, 'high', analog=False, output='zpk') + assert all(np.abs(p) <= 1) # No poles outside unit circle + + B, A = cheby2(18, 100, 0.5) + assert_array_almost_equal(B, [ + 0.00167583914216, 0.01249479541868, 0.05282702120282, + 0.15939804265706, 0.37690207631117, 0.73227013789108, + 1.20191856962356, 1.69522872823393, 2.07598674519837, + 2.21972389625291, 2.07598674519838, 1.69522872823395, + 1.20191856962359, 0.73227013789110, 0.37690207631118, + 0.15939804265707, 0.05282702120282, 0.01249479541868, + 0.00167583914216], decimal=13) + assert_array_almost_equal(A, [ + 1.00000000000000, -0.27631970006174, 3.19751214254060, + -0.15685969461355, 4.13926117356269, 0.60689917820044, + 2.95082770636540, 0.89016501910416, 1.32135245849798, + 0.51502467236824, 0.38906643866660, 0.15367372690642, + 0.07255803834919, 0.02422454070134, 0.00756108751837, + 0.00179848550988, 0.00033713574499, 0.00004258794833, + 0.00000281030149], decimal=13) + + def test_highpass(self): + # high even order + z, p, k = cheby2(26, 60, 0.3, 'high', output='zpk') + z2 = [9.981088955489852e-01 + 6.147058341984388e-02j, + 9.981088955489852e-01 - 6.147058341984388e-02j, + 9.832702870387426e-01 + 1.821525257215483e-01j, + 9.832702870387426e-01 - 1.821525257215483e-01j, + 9.550760158089112e-01 + 2.963609353922882e-01j, + 9.550760158089112e-01 - 2.963609353922882e-01j, + 9.162054748821922e-01 + 4.007087817803773e-01j, + 9.162054748821922e-01 - 4.007087817803773e-01j, + 8.700619897368064e-01 + 4.929423232136168e-01j, + 8.700619897368064e-01 - 4.929423232136168e-01j, + 5.889791753434985e-01 + 8.081482110427953e-01j, + 5.889791753434985e-01 - 8.081482110427953e-01j, + 5.984900456570295e-01 + 8.011302423760501e-01j, + 5.984900456570295e-01 - 8.011302423760501e-01j, + 6.172880888914629e-01 + 7.867371958365343e-01j, + 6.172880888914629e-01 - 7.867371958365343e-01j, + 6.448899971038180e-01 + 7.642754030030161e-01j, + 6.448899971038180e-01 - 7.642754030030161e-01j, + 6.804845629637927e-01 + 7.327624168637228e-01j, + 6.804845629637927e-01 - 7.327624168637228e-01j, + 8.202619107108660e-01 + 5.719881098737678e-01j, + 8.202619107108660e-01 - 5.719881098737678e-01j, + 7.228410452536148e-01 + 6.910143437705678e-01j, + 7.228410452536148e-01 - 6.910143437705678e-01j, + 7.702121399578629e-01 + 6.377877856007792e-01j, + 7.702121399578629e-01 - 6.377877856007792e-01j] + p2 = [7.365546198286450e-01 + 4.842085129329526e-02j, + 7.365546198286450e-01 - 4.842085129329526e-02j, + 7.292038510962885e-01 + 1.442201672097581e-01j, + 7.292038510962885e-01 - 1.442201672097581e-01j, + 7.151293788040354e-01 + 2.369925800458584e-01j, + 7.151293788040354e-01 - 2.369925800458584e-01j, + 6.955051820787286e-01 + 3.250341363856910e-01j, + 6.955051820787286e-01 - 3.250341363856910e-01j, + 6.719122956045220e-01 + 4.070475750638047e-01j, + 6.719122956045220e-01 - 4.070475750638047e-01j, + 6.461722130611300e-01 + 4.821965916689270e-01j, + 6.461722130611300e-01 - 4.821965916689270e-01j, + 5.528045062872224e-01 + 8.162920513838372e-01j, + 5.528045062872224e-01 - 8.162920513838372e-01j, + 5.464847782492791e-01 + 7.869899955967304e-01j, + 5.464847782492791e-01 - 7.869899955967304e-01j, + 5.488033111260949e-01 + 7.520442354055579e-01j, + 5.488033111260949e-01 - 7.520442354055579e-01j, + 6.201874719022955e-01 + 5.500894392527353e-01j, + 6.201874719022955e-01 - 5.500894392527353e-01j, + 5.586478152536709e-01 + 7.112676877332921e-01j, + 5.586478152536709e-01 - 7.112676877332921e-01j, + 5.958145844148228e-01 + 6.107074340842115e-01j, + 5.958145844148228e-01 - 6.107074340842115e-01j, + 5.747812938519067e-01 + 6.643001536914696e-01j, + 5.747812938519067e-01 - 6.643001536914696e-01j] + k2 = 9.932997786497189e-02 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-13) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-12) + xp_assert_close(k, k2, rtol=1e-11) + + # high odd order + z, p, k = cheby2(25, 80, 0.5, 'high', output='zpk') + z2 = [9.690690376586687e-01 + 2.467897896011971e-01j, + 9.690690376586687e-01 - 2.467897896011971e-01j, + 9.999999999999492e-01, + 8.835111277191199e-01 + 4.684101698261429e-01j, + 8.835111277191199e-01 - 4.684101698261429e-01j, + 7.613142857900539e-01 + 6.483830335935022e-01j, + 7.613142857900539e-01 - 6.483830335935022e-01j, + 6.232625173626231e-01 + 7.820126817709752e-01j, + 6.232625173626231e-01 - 7.820126817709752e-01j, + 4.864456563413621e-01 + 8.737108351316745e-01j, + 4.864456563413621e-01 - 8.737108351316745e-01j, + 3.618368136816749e-01 + 9.322414495530347e-01j, + 3.618368136816749e-01 - 9.322414495530347e-01j, + 2.549486883466794e-01 + 9.669545833752675e-01j, + 2.549486883466794e-01 - 9.669545833752675e-01j, + 1.676175432109457e-01 + 9.858520980390212e-01j, + 1.676175432109457e-01 - 9.858520980390212e-01j, + 1.975218468277521e-03 + 9.999980492540941e-01j, + 1.975218468277521e-03 - 9.999980492540941e-01j, + 1.786959496651858e-02 + 9.998403260399917e-01j, + 1.786959496651858e-02 - 9.998403260399917e-01j, + 9.967933660557139e-02 + 9.950196127985684e-01j, + 9.967933660557139e-02 - 9.950196127985684e-01j, + 5.013970951219547e-02 + 9.987422137518890e-01j, + 5.013970951219547e-02 - 9.987422137518890e-01j] + p2 = [4.218866331906864e-01, + 4.120110200127552e-01 + 1.361290593621978e-01j, + 4.120110200127552e-01 - 1.361290593621978e-01j, + 3.835890113632530e-01 + 2.664910809911026e-01j, + 3.835890113632530e-01 - 2.664910809911026e-01j, + 3.399195570456499e-01 + 3.863983538639875e-01j, + 3.399195570456499e-01 - 3.863983538639875e-01j, + 2.855977834508353e-01 + 4.929444399540688e-01j, + 2.855977834508353e-01 - 4.929444399540688e-01j, + 2.255765441339322e-01 + 5.851631870205766e-01j, + 2.255765441339322e-01 - 5.851631870205766e-01j, + 1.644087535815792e-01 + 6.637356937277153e-01j, + 1.644087535815792e-01 - 6.637356937277153e-01j, + -7.293633845273095e-02 + 9.739218252516307e-01j, + -7.293633845273095e-02 - 9.739218252516307e-01j, + 1.058259206358626e-01 + 7.304739464862978e-01j, + 1.058259206358626e-01 - 7.304739464862978e-01j, + -5.703971947785402e-02 + 9.291057542169088e-01j, + -5.703971947785402e-02 - 9.291057542169088e-01j, + 5.263875132656864e-02 + 7.877974334424453e-01j, + 5.263875132656864e-02 - 7.877974334424453e-01j, + -3.007943405982616e-02 + 8.846331716180016e-01j, + -3.007943405982616e-02 - 8.846331716180016e-01j, + 6.857277464483946e-03 + 8.383275456264492e-01j, + 6.857277464483946e-03 - 8.383275456264492e-01j] + k2 = 6.507068761705037e-03 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-13) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-12) + xp_assert_close(k, k2, rtol=1e-11) + + def test_bandpass(self): + z, p, k = cheby2(9, 40, [0.07, 0.2], 'pass', output='zpk') + z2 = [-9.999999999999999e-01, + 3.676588029658514e-01 + 9.299607543341383e-01j, + 3.676588029658514e-01 - 9.299607543341383e-01j, + 7.009689684982283e-01 + 7.131917730894889e-01j, + 7.009689684982283e-01 - 7.131917730894889e-01j, + 7.815697973765858e-01 + 6.238178033919218e-01j, + 7.815697973765858e-01 - 6.238178033919218e-01j, + 8.063793628819866e-01 + 5.913986160941200e-01j, + 8.063793628819866e-01 - 5.913986160941200e-01j, + 1.000000000000001e+00, + 9.944493019920448e-01 + 1.052168511576739e-01j, + 9.944493019920448e-01 - 1.052168511576739e-01j, + 9.854674703367308e-01 + 1.698642543566085e-01j, + 9.854674703367308e-01 - 1.698642543566085e-01j, + 9.762751735919308e-01 + 2.165335665157851e-01j, + 9.762751735919308e-01 - 2.165335665157851e-01j, + 9.792277171575134e-01 + 2.027636011479496e-01j, + 9.792277171575134e-01 - 2.027636011479496e-01j] + p2 = [8.143803410489621e-01 + 5.411056063397541e-01j, + 8.143803410489621e-01 - 5.411056063397541e-01j, + 7.650769827887418e-01 + 5.195412242095543e-01j, + 7.650769827887418e-01 - 5.195412242095543e-01j, + 6.096241204063443e-01 + 3.568440484659796e-01j, + 6.096241204063443e-01 - 3.568440484659796e-01j, + 6.918192770246239e-01 + 4.770463577106911e-01j, + 6.918192770246239e-01 - 4.770463577106911e-01j, + 6.986241085779207e-01 + 1.146512226180060e-01j, + 6.986241085779207e-01 - 1.146512226180060e-01j, + 8.654645923909734e-01 + 1.604208797063147e-01j, + 8.654645923909734e-01 - 1.604208797063147e-01j, + 9.164831670444591e-01 + 1.969181049384918e-01j, + 9.164831670444591e-01 - 1.969181049384918e-01j, + 9.630425777594550e-01 + 2.317513360702271e-01j, + 9.630425777594550e-01 - 2.317513360702271e-01j, + 9.438104703725529e-01 + 2.193509900269860e-01j, + 9.438104703725529e-01 - 2.193509900269860e-01j] + k2 = 9.345352824659604e-03 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-13) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-13) + xp_assert_close(k, k2, rtol=1e-11) + + def test_bandstop(self): + z, p, k = cheby2(6, 55, [0.1, 0.9], 'stop', output='zpk') + z2 = [6.230544895101009e-01 + 7.821784343111114e-01j, + 6.230544895101009e-01 - 7.821784343111114e-01j, + 9.086608545660115e-01 + 4.175349702471991e-01j, + 9.086608545660115e-01 - 4.175349702471991e-01j, + 9.478129721465802e-01 + 3.188268649763867e-01j, + 9.478129721465802e-01 - 3.188268649763867e-01j, + -6.230544895100982e-01 + 7.821784343111109e-01j, + -6.230544895100982e-01 - 7.821784343111109e-01j, + -9.086608545660116e-01 + 4.175349702472088e-01j, + -9.086608545660116e-01 - 4.175349702472088e-01j, + -9.478129721465784e-01 + 3.188268649763897e-01j, + -9.478129721465784e-01 - 3.188268649763897e-01j] + p2 = [-9.464094036167638e-01 + 1.720048695084344e-01j, + -9.464094036167638e-01 - 1.720048695084344e-01j, + -8.715844103386737e-01 + 1.370665039509297e-01j, + -8.715844103386737e-01 - 1.370665039509297e-01j, + -8.078751204586425e-01 + 5.729329866682983e-02j, + -8.078751204586425e-01 - 5.729329866682983e-02j, + 9.464094036167665e-01 + 1.720048695084332e-01j, + 9.464094036167665e-01 - 1.720048695084332e-01j, + 8.078751204586447e-01 + 5.729329866683007e-02j, + 8.078751204586447e-01 - 5.729329866683007e-02j, + 8.715844103386721e-01 + 1.370665039509331e-01j, + 8.715844103386721e-01 - 1.370665039509331e-01j] + k2 = 2.917823332763358e-03 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-13) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-13) + xp_assert_close(k, k2, rtol=1e-11) + + def test_ba_output(self): + # with transfer function conversion, without digital conversion + b, a = cheby2(5, 20, [2010, 2100], 'stop', True) + b2 = [1.000000000000000e+00, 0, # Matlab: 6.683253076978249e-12, + 2.111512500000000e+07, 0, # Matlab: 1.134325604589552e-04, + 1.782966433781250e+14, 0, # Matlab: 7.216787944356781e+02, + 7.525901316990656e+20, 0, # Matlab: 2.039829265789886e+09, + 1.587960565565748e+27, 0, # Matlab: 2.161236218626134e+15, + 1.339913493808585e+33] + a2 = [1.000000000000000e+00, 1.849550755473371e+02, + 2.113222918998538e+07, 3.125114149732283e+09, + 1.785133457155609e+14, 1.979158697776348e+16, + 7.535048322653831e+20, 5.567966191263037e+22, + 1.589246884221346e+27, 5.871210648525566e+28, + 1.339913493808590e+33] + xp_assert_close(b, b2, rtol=1e-14) + xp_assert_close(a, a2, rtol=1e-14) + + def test_fs_param(self): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = cheby2(N, 20, fc, btype, fs=fs) + ba2 = cheby2(N, 20, fc/(fs/2), btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = cheby2(N, 20, fc, btype, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = cheby2(N, 20, fcnorm, btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + +class TestEllip: + + def test_degenerate(self): + # 0-order filter is just a passthrough + # Even-order filters have DC gain of -rp dB + # Stopband ripple factor doesn't matter + b, a = ellip(0, 10*np.log10(2), 123.456, 1, analog=True) + assert_array_almost_equal(b, [1/np.sqrt(2)]) + xp_assert_equal(a, np.asarray([1.0])) + + # 1-order filter is same for all types + b, a = ellip(1, 10*np.log10(2), 1, 1, analog=True) + assert_array_almost_equal(b, [1]) + assert_array_almost_equal(a, [1, 1]) + + z, p, k = ellip(1, 1, 55, 0.3, output='zpk') + xp_assert_close(z, [-9.999999999999998e-01], rtol=1e-14) + xp_assert_close(p, [-6.660721153525525e-04], rtol=1e-10) + xp_assert_close(k, 5.003330360576763e-01, rtol=1e-14) + + def test_basic(self): + for N in range(25): + wn = 0.01 + z, p, k = ellip(N, 1, 40, wn, 'low', analog=True, output='zpk') + assert len(p) == N + assert all(np.real(p) <= 0) # No poles in right half of S-plane + + for N in range(25): + wn = 0.01 + z, p, k = ellip(N, 1, 40, wn, 'high', analog=False, output='zpk') + assert all(np.abs(p) <= 1) # No poles outside unit circle + + b3, a3 = ellip(5, 3, 26, 1, analog=True) + assert_array_almost_equal(b3, [0.1420, 0, 0.3764, 0, + 0.2409], decimal=4) + assert_array_almost_equal(a3, [1, 0.5686, 1.8061, 0.8017, 0.8012, + 0.2409], decimal=4) + + b, a = ellip(3, 1, 60, [0.4, 0.7], 'stop') + assert_array_almost_equal(b, [0.3310, 0.3469, 1.1042, 0.7044, 1.1042, + 0.3469, 0.3310], decimal=4) + assert_array_almost_equal(a, [1.0000, 0.6973, 1.1441, 0.5878, 0.7323, + 0.1131, -0.0060], decimal=4) + + def test_highpass(self): + # high even order + z, p, k = ellip(24, 1, 80, 0.3, 'high', output='zpk') + z2 = [9.761875332501075e-01 + 2.169283290099910e-01j, + 9.761875332501075e-01 - 2.169283290099910e-01j, + 8.413503353963494e-01 + 5.404901600661900e-01j, + 8.413503353963494e-01 - 5.404901600661900e-01j, + 7.160082576305009e-01 + 6.980918098681732e-01j, + 7.160082576305009e-01 - 6.980918098681732e-01j, + 6.456533638965329e-01 + 7.636306264739803e-01j, + 6.456533638965329e-01 - 7.636306264739803e-01j, + 6.127321820971366e-01 + 7.902906256703928e-01j, + 6.127321820971366e-01 - 7.902906256703928e-01j, + 5.983607817490196e-01 + 8.012267936512676e-01j, + 5.983607817490196e-01 - 8.012267936512676e-01j, + 5.922577552594799e-01 + 8.057485658286990e-01j, + 5.922577552594799e-01 - 8.057485658286990e-01j, + 5.896952092563588e-01 + 8.076258788449631e-01j, + 5.896952092563588e-01 - 8.076258788449631e-01j, + 5.886248765538837e-01 + 8.084063054565607e-01j, + 5.886248765538837e-01 - 8.084063054565607e-01j, + 5.881802711123132e-01 + 8.087298490066037e-01j, + 5.881802711123132e-01 - 8.087298490066037e-01j, + 5.879995719101164e-01 + 8.088612386766461e-01j, + 5.879995719101164e-01 - 8.088612386766461e-01j, + 5.879354086709576e-01 + 8.089078780868164e-01j, + 5.879354086709576e-01 - 8.089078780868164e-01j] + p2 = [-3.184805259081650e-01 + 4.206951906775851e-01j, + -3.184805259081650e-01 - 4.206951906775851e-01j, + 1.417279173459985e-01 + 7.903955262836452e-01j, + 1.417279173459985e-01 - 7.903955262836452e-01j, + 4.042881216964651e-01 + 8.309042239116594e-01j, + 4.042881216964651e-01 - 8.309042239116594e-01j, + 5.128964442789670e-01 + 8.229563236799665e-01j, + 5.128964442789670e-01 - 8.229563236799665e-01j, + 5.569614712822724e-01 + 8.155957702908510e-01j, + 5.569614712822724e-01 - 8.155957702908510e-01j, + 5.750478870161392e-01 + 8.118633973883931e-01j, + 5.750478870161392e-01 - 8.118633973883931e-01j, + 5.825314018170804e-01 + 8.101960910679270e-01j, + 5.825314018170804e-01 - 8.101960910679270e-01j, + 5.856397379751872e-01 + 8.094825218722543e-01j, + 5.856397379751872e-01 - 8.094825218722543e-01j, + 5.869326035251949e-01 + 8.091827531557583e-01j, + 5.869326035251949e-01 - 8.091827531557583e-01j, + 5.874697218855733e-01 + 8.090593298213502e-01j, + 5.874697218855733e-01 - 8.090593298213502e-01j, + 5.876904783532237e-01 + 8.090127161018823e-01j, + 5.876904783532237e-01 - 8.090127161018823e-01j, + 5.877753105317594e-01 + 8.090050577978136e-01j, + 5.877753105317594e-01 - 8.090050577978136e-01j] + k2 = 4.918081266957108e-02 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-4) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-4) + xp_assert_close(k, k2, rtol=1e-3) + + # high odd order + z, p, k = ellip(23, 1, 70, 0.5, 'high', output='zpk') + z2 = [9.999999999998661e-01, + 6.603717261750994e-01 + 7.509388678638675e-01j, + 6.603717261750994e-01 - 7.509388678638675e-01j, + 2.788635267510325e-01 + 9.603307416968041e-01j, + 2.788635267510325e-01 - 9.603307416968041e-01j, + 1.070215532544218e-01 + 9.942567008268131e-01j, + 1.070215532544218e-01 - 9.942567008268131e-01j, + 4.049427369978163e-02 + 9.991797705105507e-01j, + 4.049427369978163e-02 - 9.991797705105507e-01j, + 1.531059368627931e-02 + 9.998827859909265e-01j, + 1.531059368627931e-02 - 9.998827859909265e-01j, + 5.808061438534933e-03 + 9.999831330689181e-01j, + 5.808061438534933e-03 - 9.999831330689181e-01j, + 2.224277847754599e-03 + 9.999975262909676e-01j, + 2.224277847754599e-03 - 9.999975262909676e-01j, + 8.731857107534554e-04 + 9.999996187732845e-01j, + 8.731857107534554e-04 - 9.999996187732845e-01j, + 3.649057346914968e-04 + 9.999999334218996e-01j, + 3.649057346914968e-04 - 9.999999334218996e-01j, + 1.765538109802615e-04 + 9.999999844143768e-01j, + 1.765538109802615e-04 - 9.999999844143768e-01j, + 1.143655290967426e-04 + 9.999999934602630e-01j, + 1.143655290967426e-04 - 9.999999934602630e-01j] + p2 = [-6.322017026545028e-01, + -4.648423756662754e-01 + 5.852407464440732e-01j, + -4.648423756662754e-01 - 5.852407464440732e-01j, + -2.249233374627773e-01 + 8.577853017985717e-01j, + -2.249233374627773e-01 - 8.577853017985717e-01j, + -9.234137570557621e-02 + 9.506548198678851e-01j, + -9.234137570557621e-02 - 9.506548198678851e-01j, + -3.585663561241373e-02 + 9.821494736043981e-01j, + -3.585663561241373e-02 - 9.821494736043981e-01j, + -1.363917242312723e-02 + 9.933844128330656e-01j, + -1.363917242312723e-02 - 9.933844128330656e-01j, + -5.131505238923029e-03 + 9.975221173308673e-01j, + -5.131505238923029e-03 - 9.975221173308673e-01j, + -1.904937999259502e-03 + 9.990680819857982e-01j, + -1.904937999259502e-03 - 9.990680819857982e-01j, + -6.859439885466834e-04 + 9.996492201426826e-01j, + -6.859439885466834e-04 - 9.996492201426826e-01j, + -2.269936267937089e-04 + 9.998686250679161e-01j, + -2.269936267937089e-04 - 9.998686250679161e-01j, + -5.687071588789117e-05 + 9.999527573294513e-01j, + -5.687071588789117e-05 - 9.999527573294513e-01j, + -6.948417068525226e-07 + 9.999882737700173e-01j, + -6.948417068525226e-07 - 9.999882737700173e-01j] + k2 = 1.220910020289434e-02 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-4) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-4) + xp_assert_close(k, k2, rtol=1e-3) + + def test_bandpass(self): + z, p, k = ellip(7, 1, 40, [0.07, 0.2], 'pass', output='zpk') + z2 = [-9.999999999999991e-01, + 6.856610961780020e-01 + 7.279209168501619e-01j, + 6.856610961780020e-01 - 7.279209168501619e-01j, + 7.850346167691289e-01 + 6.194518952058737e-01j, + 7.850346167691289e-01 - 6.194518952058737e-01j, + 7.999038743173071e-01 + 6.001281461922627e-01j, + 7.999038743173071e-01 - 6.001281461922627e-01j, + 9.999999999999999e-01, + 9.862938983554124e-01 + 1.649980183725925e-01j, + 9.862938983554124e-01 - 1.649980183725925e-01j, + 9.788558330548762e-01 + 2.045513580850601e-01j, + 9.788558330548762e-01 - 2.045513580850601e-01j, + 9.771155231720003e-01 + 2.127093189691258e-01j, + 9.771155231720003e-01 - 2.127093189691258e-01j] + p2 = [8.063992755498643e-01 + 5.858071374778874e-01j, + 8.063992755498643e-01 - 5.858071374778874e-01j, + 8.050395347071724e-01 + 5.639097428109795e-01j, + 8.050395347071724e-01 - 5.639097428109795e-01j, + 8.113124936559144e-01 + 4.855241143973142e-01j, + 8.113124936559144e-01 - 4.855241143973142e-01j, + 8.665595314082394e-01 + 3.334049560919331e-01j, + 8.665595314082394e-01 - 3.334049560919331e-01j, + 9.412369011968871e-01 + 2.457616651325908e-01j, + 9.412369011968871e-01 - 2.457616651325908e-01j, + 9.679465190411238e-01 + 2.228772501848216e-01j, + 9.679465190411238e-01 - 2.228772501848216e-01j, + 9.747235066273385e-01 + 2.178937926146544e-01j, + 9.747235066273385e-01 - 2.178937926146544e-01j] + k2 = 8.354782670263239e-03 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-4) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-4) + xp_assert_close(k, k2, rtol=1e-3) + + z, p, k = ellip(5, 1, 75, [90.5, 110.5], 'pass', True, 'zpk') + z2 = [-5.583607317695175e-14 + 1.433755965989225e+02j, + -5.583607317695175e-14 - 1.433755965989225e+02j, + 5.740106416459296e-14 + 1.261678754570291e+02j, + 5.740106416459296e-14 - 1.261678754570291e+02j, + -2.199676239638652e-14 + 6.974861996895196e+01j, + -2.199676239638652e-14 - 6.974861996895196e+01j, + -3.372595657044283e-14 + 7.926145989044531e+01j, + -3.372595657044283e-14 - 7.926145989044531e+01j, + 0] + p2 = [-8.814960004852743e-01 + 1.104124501436066e+02j, + -8.814960004852743e-01 - 1.104124501436066e+02j, + -2.477372459140184e+00 + 1.065638954516534e+02j, + -2.477372459140184e+00 - 1.065638954516534e+02j, + -3.072156842945799e+00 + 9.995404870405324e+01j, + -3.072156842945799e+00 - 9.995404870405324e+01j, + -2.180456023925693e+00 + 9.379206865455268e+01j, + -2.180456023925693e+00 - 9.379206865455268e+01j, + -7.230484977485752e-01 + 9.056598800801140e+01j, + -7.230484977485752e-01 - 9.056598800801140e+01j] + k2 = 3.774571622827070e-02 + xp_assert_close(sorted(z, key=np.imag), + sorted(z2, key=np.imag), rtol=1e-4) + xp_assert_close(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-6) + xp_assert_close(k, k2, rtol=1e-3) + + def test_bandstop(self): + z, p, k = ellip(8, 1, 65, [0.2, 0.4], 'stop', output='zpk') + z2 = [3.528578094286510e-01 + 9.356769561794296e-01j, + 3.528578094286510e-01 - 9.356769561794296e-01j, + 3.769716042264783e-01 + 9.262248159096587e-01j, + 3.769716042264783e-01 - 9.262248159096587e-01j, + 4.406101783111199e-01 + 8.976985411420985e-01j, + 4.406101783111199e-01 - 8.976985411420985e-01j, + 5.539386470258847e-01 + 8.325574907062760e-01j, + 5.539386470258847e-01 - 8.325574907062760e-01j, + 6.748464963023645e-01 + 7.379581332490555e-01j, + 6.748464963023645e-01 - 7.379581332490555e-01j, + 7.489887970285254e-01 + 6.625826604475596e-01j, + 7.489887970285254e-01 - 6.625826604475596e-01j, + 7.913118471618432e-01 + 6.114127579150699e-01j, + 7.913118471618432e-01 - 6.114127579150699e-01j, + 7.806804740916381e-01 + 6.249303940216475e-01j, + 7.806804740916381e-01 - 6.249303940216475e-01j] + + p2 = [-1.025299146693730e-01 + 5.662682444754943e-01j, + -1.025299146693730e-01 - 5.662682444754943e-01j, + 1.698463595163031e-01 + 8.926678667070186e-01j, + 1.698463595163031e-01 - 8.926678667070186e-01j, + 2.750532687820631e-01 + 9.351020170094005e-01j, + 2.750532687820631e-01 - 9.351020170094005e-01j, + 3.070095178909486e-01 + 9.457373499553291e-01j, + 3.070095178909486e-01 - 9.457373499553291e-01j, + 7.695332312152288e-01 + 2.792567212705257e-01j, + 7.695332312152288e-01 - 2.792567212705257e-01j, + 8.083818999225620e-01 + 4.990723496863960e-01j, + 8.083818999225620e-01 - 4.990723496863960e-01j, + 8.066158014414928e-01 + 5.649811440393374e-01j, + 8.066158014414928e-01 - 5.649811440393374e-01j, + 8.062787978834571e-01 + 5.855780880424964e-01j, + 8.062787978834571e-01 - 5.855780880424964e-01j] + k2 = 2.068622545291259e-01 + xp_assert_close(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-6) + xp_assert_close(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-5) + xp_assert_close(k, k2, rtol=1e-5) + + def test_ba_output(self): + # with transfer function conversion, without digital conversion + b, a = ellip(5, 1, 40, [201, 240], 'stop', True) + b2 = [ + 1.000000000000000e+00, 0, # Matlab: 1.743506051190569e-13, + 2.426561778314366e+05, 0, # Matlab: 3.459426536825722e-08, + 2.348218683400168e+10, 0, # Matlab: 2.559179747299313e-03, + 1.132780692872241e+15, 0, # Matlab: 8.363229375535731e+01, + 2.724038554089566e+19, 0, # Matlab: 1.018700994113120e+06, + 2.612380874940186e+23 + ] + a2 = [ + 1.000000000000000e+00, 1.337266601804649e+02, + 2.486725353510667e+05, 2.628059713728125e+07, + 2.436169536928770e+10, 1.913554568577315e+12, + 1.175208184614438e+15, 6.115751452473410e+16, + 2.791577695211466e+19, 7.241811142725384e+20, + 2.612380874940182e+23 + ] + xp_assert_close(b, b2, rtol=1e-6) + xp_assert_close(a, a2, rtol=1e-4) + + def test_fs_param(self): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = ellip(N, 1, 20, fc, btype, fs=fs) + ba2 = ellip(N, 1, 20, fc/(fs/2), btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = ellip(N, 1, 20, fc, btype, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = ellip(N, 1, 20, fcnorm, btype) + for ba1_, ba2_ in zip(ba1, ba2): + xp_assert_close(ba1_, ba2_) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + iirnotch(0.06, 30, fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none"): + iirnotch(0.06, 30, fs=None) + + +def test_sos_consistency(): + # Consistency checks of output='sos' for the specialized IIR filter + # design functions. + design_funcs = [(bessel, (0.1,)), + (butter, (0.1,)), + (cheby1, (45.0, 0.1)), + (cheby2, (0.087, 0.1)), + (ellip, (0.087, 45, 0.1))] + for func, args in design_funcs: + name = func.__name__ + + b, a = func(2, *args, output='ba') + sos = func(2, *args, output='sos') + xp_assert_close(sos, [np.hstack((b, a))], err_msg=f"{name}(2,...)") + + zpk = func(3, *args, output='zpk') + sos = func(3, *args, output='sos') + xp_assert_close(sos, zpk2sos(*zpk), err_msg=f"{name}(3,...)") + + zpk = func(4, *args, output='zpk') + sos = func(4, *args, output='sos') + xp_assert_close(sos, zpk2sos(*zpk), err_msg=f"{name}(4,...)") + + +class TestIIRNotch: + + def test_ba_output(self): + # Compare coefficients with Matlab ones + # for the equivalent input: + b, a = iirnotch(0.06, 30) + b2 = [ + 9.9686824e-01, -1.9584219e+00, + 9.9686824e-01 + ] + a2 = [ + 1.0000000e+00, -1.9584219e+00, + 9.9373647e-01 + ] + + xp_assert_close(b, b2, rtol=1e-8) + xp_assert_close(a, a2, rtol=1e-8) + + def test_frequency_response(self): + # Get filter coefficients + b, a = iirnotch(0.3, 30) + + # Get frequency response + w, h = freqz(b, a, 1000) + + # Pick 5 point + p = [200, # w0 = 0.200 + 295, # w0 = 0.295 + 300, # w0 = 0.300 + 305, # w0 = 0.305 + 400] # w0 = 0.400 + + # Get frequency response correspondent to each of those points + hp = h[p] + + # Check if the frequency response fulfill the specifications: + # hp[0] and hp[4] correspond to frequencies distant from + # w0 = 0.3 and should be close to 1 + xp_assert_close(abs(hp[0]), np.asarray(1.), rtol=1e-2, check_0d=False) + xp_assert_close(abs(hp[4]), np.asarray(1.), rtol=1e-2, check_0d=False) + + # hp[1] and hp[3] correspond to frequencies approximately + # on the edges of the passband and should be close to -3dB + xp_assert_close(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) + xp_assert_close(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) + + # hp[2] correspond to the frequency that should be removed + # the frequency response should be very close to 0 + xp_assert_close(abs(hp[2]), np.asarray(0.0), atol=1e-10, check_0d=False) + + def test_errors(self): + # Exception should be raised if w0 > 1 or w0 <0 + assert_raises(ValueError, iirnotch, w0=2, Q=30) + assert_raises(ValueError, iirnotch, w0=-1, Q=30) + + # Exception should be raised if any of the parameters + # are not float (or cannot be converted to one) + assert_raises(ValueError, iirnotch, w0="blabla", Q=30) + assert_raises(TypeError, iirnotch, w0=-1, Q=[1, 2, 3]) + + def test_fs_param(self): + # Get filter coefficients + b, a = iirnotch(1500, 30, fs=10000) + + # Get frequency response + w, h = freqz(b, a, 1000, fs=10000) + + # Pick 5 point + p = [200, # w0 = 1000 + 295, # w0 = 1475 + 300, # w0 = 1500 + 305, # w0 = 1525 + 400] # w0 = 2000 + + # Get frequency response correspondent to each of those points + hp = h[p] + + # Check if the frequency response fulfill the specifications: + # hp[0] and hp[4] correspond to frequencies distant from + # w0 = 1500 and should be close to 1 + xp_assert_close(abs(hp[0]), np.ones_like(abs(hp[0])), rtol=1e-2, + check_0d=False) + xp_assert_close(abs(hp[4]), np.ones_like(abs(hp[4])), rtol=1e-2, + check_0d=False) + + # hp[1] and hp[3] correspond to frequencies approximately + # on the edges of the passband and should be close to -3dB + xp_assert_close(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) + xp_assert_close(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) + + # hp[2] correspond to the frequency that should be removed + # the frequency response should be very close to 0 + xp_assert_close(abs(hp[2]), np.asarray(0.0), atol=1e-10, check_0d=False) + + +class TestIIRPeak: + + def test_ba_output(self): + # Compare coefficients with Matlab ones + # for the equivalent input: + b, a = iirpeak(0.06, 30) + b2 = [ + 3.131764229e-03, 0, + -3.131764229e-03 + ] + a2 = [ + 1.0000000e+00, -1.958421917e+00, + 9.9373647e-01 + ] + xp_assert_close(b, b2, rtol=1e-8) + xp_assert_close(a, a2, rtol=1e-8) + + def test_frequency_response(self): + # Get filter coefficients + b, a = iirpeak(0.3, 30) + + # Get frequency response + w, h = freqz(b, a, 1000) + + # Pick 5 point + p = [30, # w0 = 0.030 + 295, # w0 = 0.295 + 300, # w0 = 0.300 + 305, # w0 = 0.305 + 800] # w0 = 0.800 + + # Get frequency response correspondent to each of those points + hp = h[p] + + # Check if the frequency response fulfill the specifications: + # hp[0] and hp[4] correspond to frequencies distant from + # w0 = 0.3 and should be close to 0 + xp_assert_close(abs(hp[0]), + np.zeros_like(abs(hp[0])), atol=1e-2, check_0d=False) + xp_assert_close(abs(hp[4]), + np.zeros_like(abs(hp[4])), atol=1e-2, check_0d=False) + + # hp[1] and hp[3] correspond to frequencies approximately + # on the edges of the passband and should be close to 10**(-3/20) + xp_assert_close(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) + xp_assert_close(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) + + # hp[2] correspond to the frequency that should be retained and + # the frequency response should be very close to 1 + xp_assert_close(abs(hp[2]), np.asarray(1.0), rtol=1e-10, check_0d=False) + + def test_errors(self): + # Exception should be raised if w0 > 1 or w0 <0 + assert_raises(ValueError, iirpeak, w0=2, Q=30) + assert_raises(ValueError, iirpeak, w0=-1, Q=30) + + # Exception should be raised if any of the parameters + # are not float (or cannot be converted to one) + assert_raises(ValueError, iirpeak, w0="blabla", Q=30) + assert_raises(TypeError, iirpeak, w0=-1, Q=[1, 2, 3]) + + def test_fs_param(self): + # Get filter coefficients + b, a = iirpeak(1200, 30, fs=8000) + + # Get frequency response + w, h = freqz(b, a, 1000, fs=8000) + + # Pick 5 point + p = [30, # w0 = 120 + 295, # w0 = 1180 + 300, # w0 = 1200 + 305, # w0 = 1220 + 800] # w0 = 3200 + + # Get frequency response correspondent to each of those points + hp = h[p] + + # Check if the frequency response fulfill the specifications: + # hp[0] and hp[4] correspond to frequencies distant from + # w0 = 1200 and should be close to 0 + xp_assert_close(abs(hp[0]), + np.zeros_like(abs(hp[0])), atol=1e-2, check_0d=False) + xp_assert_close(abs(hp[4]), + np.zeros_like(abs(hp[4])), atol=1e-2, check_0d=False) + + # hp[1] and hp[3] correspond to frequencies approximately + # on the edges of the passband and should be close to 10**(-3/20) + xp_assert_close(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) + xp_assert_close(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) + + # hp[2] correspond to the frequency that should be retained and + # the frequency response should be very close to 1 + xp_assert_close(abs(hp[2]), + np.ones_like(abs(hp[2])), rtol=1e-10, check_0d=False) + + +class TestIIRComb: + # Test erroneous input cases + def test_invalid_input(self): + # w0 is <= 0 or >= fs / 2 + fs = 1000 + for args in [(-fs, 30), (0, 35), (fs / 2, 40), (fs, 35)]: + with pytest.raises(ValueError, match='w0 must be between '): + iircomb(*args, fs=fs) + + # fs is not divisible by w0 + for args in [(120, 30), (157, 35)]: + with pytest.raises(ValueError, match='fs must be divisible '): + iircomb(*args, fs=fs) + + # https://github.com/scipy/scipy/issues/14043#issuecomment-1107349140 + # Previously, fs=44100, w0=49.999 was rejected, but fs=2, + # w0=49.999/int(44100/2) was accepted. Now it is rejected, too. + with pytest.raises(ValueError, match='fs must be divisible '): + iircomb(w0=49.999/int(44100/2), Q=30) + + with pytest.raises(ValueError, match='fs must be divisible '): + iircomb(w0=49.999, Q=30, fs=44100) + + # Filter type is not notch or peak + for args in [(0.2, 30, 'natch'), (0.5, 35, 'comb')]: + with pytest.raises(ValueError, match='ftype must be '): + iircomb(*args) + + # Verify that the filter's frequency response contains a + # notch at the cutoff frequency + @pytest.mark.parametrize('ftype', ('notch', 'peak')) + def test_frequency_response(self, ftype): + # Create a notching or peaking comb filter at 1000 Hz + b, a = iircomb(1000, 30, ftype=ftype, fs=10000) + + # Compute the frequency response + freqs, response = freqz(b, a, 1000, fs=10000) + + # Find the notch using argrelextrema + comb_points = argrelextrema(abs(response), np.less)[0] + + # Verify that the first notch sits at 1000 Hz + comb1 = comb_points[0] + xp_assert_close(freqs[comb1], np.asarray(1000.), check_0d=False) + + # Verify pass_zero parameter + @pytest.mark.parametrize('ftype,pass_zero,peak,notch', + [('peak', True, 123.45, 61.725), + ('peak', False, 61.725, 123.45), + ('peak', None, 61.725, 123.45), + ('notch', None, 61.725, 123.45), + ('notch', True, 123.45, 61.725), + ('notch', False, 61.725, 123.45)]) + def test_pass_zero(self, ftype, pass_zero, peak, notch): + # Create a notching or peaking comb filter + b, a = iircomb(123.45, 30, ftype=ftype, fs=1234.5, pass_zero=pass_zero) + + # Compute the frequency response + freqs, response = freqz(b, a, [peak, notch], fs=1234.5) + + # Verify that expected notches are notches and peaks are peaks + assert abs(response[0]) > 0.99 + assert abs(response[1]) < 1e-10 + + # All built-in IIR filters are real, so should have perfectly + # symmetrical poles and zeros. Then ba representation (using + # numpy.poly) will be purely real instead of having negligible + # imaginary parts. + def test_iir_symmetry(self): + b, a = iircomb(400, 30, fs=24000) + z, p, k = tf2zpk(b, a) + xp_assert_equal(sorted(z), sorted(z.conj())) + xp_assert_equal(sorted(p), sorted(p.conj())) + xp_assert_equal(k, np.real(k)) + + assert issubclass(b.dtype.type, np.floating) + assert issubclass(a.dtype.type, np.floating) + + # Verify filter coefficients with MATLAB's iircomb function + def test_ba_output(self): + b_notch, a_notch = iircomb(60, 35, ftype='notch', fs=600) + b_notch2 = [0.957020174408697, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, -0.957020174408697] + a_notch2 = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, -0.914040348817395] + xp_assert_close(b_notch, b_notch2) + xp_assert_close(a_notch, a_notch2) + + b_peak, a_peak = iircomb(60, 35, ftype='peak', fs=600) + b_peak2 = [0.0429798255913026, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, -0.0429798255913026] + a_peak2 = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.914040348817395] + xp_assert_close(b_peak, b_peak2) + xp_assert_close(a_peak, a_peak2) + + # Verify that https://github.com/scipy/scipy/issues/14043 is fixed + def test_nearest_divisor(self): + # Create a notching comb filter + b, a = iircomb(50/int(44100/2), 50.0, ftype='notch') + + # Compute the frequency response at an upper harmonic of 50 + freqs, response = freqz(b, a, [22000], fs=44100) + + # Before bug fix, this would produce N = 881, so that 22 kHz was ~0 dB. + # Now N = 882 correctly and 22 kHz should be a notch <-220 dB + assert abs(response[0]) < 1e-10 + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + iircomb(1000, 30, fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none"): + iircomb(1000, 30, fs=None) + + +class TestIIRDesign: + + def test_exceptions(self): + with pytest.raises(ValueError, match="the same shape"): + iirdesign(0.2, [0.1, 0.3], 1, 40) + with pytest.raises(ValueError, match="the same shape"): + iirdesign(np.array([[0.3, 0.6], [0.3, 0.6]]), + np.array([[0.4, 0.5], [0.4, 0.5]]), 1, 40) + + # discrete filter with non-positive frequency + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign(0, 0.5, 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign(-0.1, 0.5, 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign(0.1, 0, 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign(0.1, -0.5, 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0, 0.3], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([-0.1, 0.3], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, -0.3], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0.3], [0, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0.3], [-0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0.3], [0.1, 0], 1, 40) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0.3], [0.1, -0.5], 1, 40) + + # analog filter with negative frequency + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign(-0.1, 0.5, 1, 40, analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign(0.1, -0.5, 1, 40, analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([-0.1, 0.3], [0.1, 0.5], 1, 40, analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, -0.3], [0.1, 0.5], 1, 40, analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0.3], [-0.1, 0.5], 1, 40, analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirdesign([0.1, 0.3], [0.1, -0.5], 1, 40, analog=True) + + # discrete filter with fs=None, freq > 1 + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign(1, 0.5, 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign(1.1, 0.5, 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign(0.1, 1, 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign(0.1, 1.5, 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([1, 0.3], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([1.1, 0.3], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([0.1, 1], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([0.1, 1.1], [0.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([0.1, 0.3], [1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([0.1, 0.3], [1.1, 0.5], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([0.1, 0.3], [0.1, 1], 1, 40) + with pytest.raises(ValueError, match="must be less than 1"): + iirdesign([0.1, 0.3], [0.1, 1.5], 1, 40) + + # discrete filter with fs>2, wp, ws < fs/2 must pass + iirdesign(100, 500, 1, 40, fs=2000) + iirdesign(500, 100, 1, 40, fs=2000) + iirdesign([200, 400], [100, 500], 1, 40, fs=2000) + iirdesign([100, 500], [200, 400], 1, 40, fs=2000) + + # discrete filter with fs>2, freq > fs/2: this must raise + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign(1000, 400, 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign(1100, 500, 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign(100, 1000, 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign(100, 1100, 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([1000, 400], [100, 500], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([1100, 400], [100, 500], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([200, 1000], [100, 500], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([200, 1100], [100, 500], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([200, 400], [1000, 500], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([200, 400], [1100, 500], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([200, 400], [100, 1000], 1, 40, fs=2000) + with pytest.raises(ValueError, match="must be less than fs/2"): + iirdesign([200, 400], [100, 1100], 1, 40, fs=2000) + + with pytest.raises(ValueError, match="strictly inside stopband"): + iirdesign([0.1, 0.4], [0.5, 0.6], 1, 40) + with pytest.raises(ValueError, match="strictly inside stopband"): + iirdesign([0.5, 0.6], [0.1, 0.4], 1, 40) + with pytest.raises(ValueError, match="strictly inside stopband"): + iirdesign([0.3, 0.6], [0.4, 0.7], 1, 40) + with pytest.raises(ValueError, match="strictly inside stopband"): + iirdesign([0.4, 0.7], [0.3, 0.6], 1, 40) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + iirfilter(1, 1, btype="low", fs=np.array([10, 20])) + + +class TestIIRFilter: + + def test_symmetry(self): + # All built-in IIR filters are real, so should have perfectly + # symmetrical poles and zeros. Then ba representation (using + # numpy.poly) will be purely real instead of having negligible + # imaginary parts. + for N in np.arange(1, 26): + for ftype in ('butter', 'bessel', 'cheby1', 'cheby2', 'ellip'): + z, p, k = iirfilter(N, 1.1, 1, 20, 'low', analog=True, + ftype=ftype, output='zpk') + xp_assert_equal(sorted(z), + sorted(z.conj())) + xp_assert_equal(sorted(p), + sorted(p.conj())) + xp_assert_equal(k, np.real(k)) + + b, a = iirfilter(N, 1.1, 1, 20, 'low', analog=True, + ftype=ftype, output='ba') + assert issubclass(b.dtype.type, np.floating) + assert issubclass(a.dtype.type, np.floating) + + def test_int_inputs(self): + # Using integer frequency arguments and large N should not produce + # numpy integers that wraparound to negative numbers + k = iirfilter(24, 100, btype='low', analog=True, ftype='bessel', + output='zpk')[2] + k2 = 9.999999999999989e+47 + xp_assert_close(np.asarray(k), np.asarray(k2)) + # if fs is specified then the normalization of Wn to have + # 0 <= Wn <= 1 should not cause an integer overflow + # the following line should not raise an exception + iirfilter(20, [1000000000, 1100000000], btype='bp', + analog=False, fs=6250000000) + + def test_invalid_wn_size(self): + # low and high have 1 Wn, band and stop have 2 Wn + assert_raises(ValueError, iirfilter, 1, [0.1, 0.9], btype='low') + assert_raises(ValueError, iirfilter, 1, [0.2, 0.5], btype='high') + assert_raises(ValueError, iirfilter, 1, 0.2, btype='bp') + assert_raises(ValueError, iirfilter, 1, 400, btype='bs', analog=True) + + def test_invalid_wn_range(self): + # For digital filters, 0 <= Wn <= 1 + assert_raises(ValueError, iirfilter, 1, 2, btype='low') + assert_raises(ValueError, iirfilter, 1, [0.5, 1], btype='band') + assert_raises(ValueError, iirfilter, 1, [0., 0.5], btype='band') + assert_raises(ValueError, iirfilter, 1, -1, btype='high') + assert_raises(ValueError, iirfilter, 1, [1, 2], btype='band') + assert_raises(ValueError, iirfilter, 1, [10, 20], btype='stop') + + # analog=True with non-positive critical frequencies + with pytest.raises(ValueError, match="must be greater than 0"): + iirfilter(2, 0, btype='low', analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirfilter(2, -1, btype='low', analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirfilter(2, [0, 100], analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirfilter(2, [-1, 100], analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirfilter(2, [10, 0], analog=True) + with pytest.raises(ValueError, match="must be greater than 0"): + iirfilter(2, [10, -1], analog=True) + + def test_analog_sos(self): + # first order Butterworth filter with Wn = 1 has tf 1/(s+1) + sos = [[0., 0., 1., 0., 1., 1.]] + sos2 = iirfilter(N=1, Wn=1, btype='low', analog=True, output='sos') + assert_array_almost_equal(sos, sos2) + + def test_wn1_ge_wn0(self): + # gh-15773: should raise error if Wn[0] >= Wn[1] + with pytest.raises(ValueError, + match=r"Wn\[0\] must be less than Wn\[1\]"): + iirfilter(2, [0.5, 0.5]) + with pytest.raises(ValueError, + match=r"Wn\[0\] must be less than Wn\[1\]"): + iirfilter(2, [0.6, 0.5]) + + +class TestGroupDelay: + def test_identity_filter(self): + w, gd = group_delay((1, 1)) + assert_array_almost_equal(w, pi * np.arange(512) / 512) + assert_array_almost_equal(gd, np.zeros(512)) + w, gd = group_delay((1, 1), whole=True) + assert_array_almost_equal(w, 2 * pi * np.arange(512) / 512) + assert_array_almost_equal(gd, np.zeros(512)) + + def test_fir(self): + # Let's design linear phase FIR and check that the group delay + # is constant. + N = 100 + b = firwin(N + 1, 0.1) + w, gd = group_delay((b, 1)) + xp_assert_close(gd, np.ones_like(gd)*(0.5 * N)) + + def test_iir(self): + # Let's design Butterworth filter and test the group delay at + # some points against MATLAB answer. + b, a = butter(4, 0.1) + w = np.linspace(0, pi, num=10, endpoint=False) + w, gd = group_delay((b, a), w=w) + matlab_gd = np.array([8.249313898506037, 11.958947880907104, + 2.452325615326005, 1.048918665702008, + 0.611382575635897, 0.418293269460578, + 0.317932917836572, 0.261371844762525, + 0.229038045801298, 0.212185774208521]) + assert_array_almost_equal(gd, matlab_gd) + + @pytest.mark.thread_unsafe + def test_singular(self): + # Let's create a filter with zeros and poles on the unit circle and + # check if warnings are raised at those frequencies. + z1 = np.exp(1j * 0.1 * pi) + z2 = np.exp(1j * 0.25 * pi) + p1 = np.exp(1j * 0.5 * pi) + p2 = np.exp(1j * 0.8 * pi) + b = np.convolve([1, -z1], [1, -z2]) + a = np.convolve([1, -p1], [1, -p2]) + w = np.array([0.1 * pi, 0.25 * pi, -0.5 * pi, -0.8 * pi]) + + w, gd = assert_warns(UserWarning, group_delay, (b, a), w=w) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, gd1 = group_delay((1, 1)) + w2, gd2 = group_delay((1, 1), None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(gd1, gd2) + + def test_fs_param(self): + # Let's design Butterworth filter and test the group delay at + # some points against the normalized frequency answer. + b, a = butter(4, 4800, fs=96000) + w = np.linspace(0, 96000/2, num=10, endpoint=False) + w, gd = group_delay((b, a), w=w, fs=96000) + norm_gd = np.array([8.249313898506037, 11.958947880907104, + 2.452325615326005, 1.048918665702008, + 0.611382575635897, 0.418293269460578, + 0.317932917836572, 0.261371844762525, + 0.229038045801298, 0.212185774208521]) + assert_array_almost_equal(gd, norm_gd) + + def test_w_or_N_types(self): + # Measure at 8 equally-spaced points + for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + w, gd = group_delay((1, 1), N) + assert_array_almost_equal(w, pi * np.arange(8) / 8) + assert_array_almost_equal(gd, np.zeros(8)) + + # Measure at frequency 8 rad/sec + for w in (8.0, 8.0+0j): + w_out, gd = group_delay((1, 1), w) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(gd, [0]) + + def test_complex_coef(self): + # gh-19586: handle complex coef TFs + # + # for g(z) = (alpha*z+1)/(1+conjugate(alpha)), group delay is + # given by function below. + # + # def gd_expr(w, alpha): + # num = 1j*(abs(alpha)**2-1)*np.exp(1j*w) + # den = (alpha*np.exp(1j*w)+1)*(np.exp(1j*w)+np.conj(alpha)) + # return -np.imag(num/den) + + # arbitrary non-real alpha + alpha = -0.6143077933232609+0.3355978770229421j + # 8 points from from -pi to pi + wref = np.array([-3.141592653589793 , + -2.356194490192345 , + -1.5707963267948966, + -0.7853981633974483, + 0. , + 0.7853981633974483, + 1.5707963267948966, + 2.356194490192345 ]) + gdref = array([0.18759548150354619, + 0.17999770352712252, + 0.23598047471879877, + 0.46539443069907194, + 1.9511492420564165 , + 3.478129975138865 , + 0.6228594960517333 , + 0.27067831839471224]) + b = [alpha,1] + a = [1, np.conjugate(alpha)] + gdtest = group_delay((b,a), wref)[1] + # need nulp=14 for macOS arm64 wheel builds; added 2 for some + # robustness on other platforms. + assert_array_almost_equal_nulp(gdtest, gdref, nulp=16) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + group_delay((1, 1), fs=np.array([10, 20])) + + with pytest.raises(ValueError, match="Sampling.*be none"): + group_delay((1, 1), fs=None) + + +class TestGammatone: + # Test erroneous input cases. + def test_invalid_input(self): + # Cutoff frequency is <= 0 or >= fs / 2. + fs = 16000 + for args in [(-fs, 'iir'), (0, 'fir'), (fs / 2, 'iir'), (fs, 'fir')]: + with pytest.raises(ValueError, match='The frequency must be ' + 'between '): + gammatone(*args, fs=fs) + + # Filter type is not fir or iir + for args in [(440, 'fie'), (220, 'it')]: + with pytest.raises(ValueError, match='ftype must be '): + gammatone(*args, fs=fs) + + # Order is <= 0 or > 24 for FIR filter. + for args in [(440, 'fir', -50), (220, 'fir', 0), (110, 'fir', 25), + (55, 'fir', 50)]: + with pytest.raises(ValueError, match='Invalid order: '): + gammatone(*args, numtaps=None, fs=fs) + + # Verify that the filter's frequency response is approximately + # 1 at the cutoff frequency. + def test_frequency_response(self): + fs = 16000 + ftypes = ['fir', 'iir'] + for ftype in ftypes: + # Create a gammatone filter centered at 1000 Hz. + b, a = gammatone(1000, ftype, fs=fs) + + # Calculate the frequency response. + freqs, response = freqz(b, a) + + # Determine peak magnitude of the response + # and corresponding frequency. + response_max = np.max(np.abs(response)) + freq_hz = freqs[np.argmax(np.abs(response))] / ((2 * np.pi) / fs) + + # Check that the peak magnitude is 1 and the frequency is 1000 Hz. + xp_assert_close(response_max, + np.ones_like(response_max), rtol=1e-2, check_0d=False) + xp_assert_close(freq_hz, + 1000*np.ones_like(freq_hz), rtol=1e-2, check_0d=False) + + # All built-in IIR filters are real, so should have perfectly + # symmetrical poles and zeros. Then ba representation (using + # numpy.poly) will be purely real instead of having negligible + # imaginary parts. + def test_iir_symmetry(self): + b, a = gammatone(440, 'iir', fs=24000) + z, p, k = tf2zpk(b, a) + xp_assert_equal(sorted(z), sorted(z.conj())) + xp_assert_equal(sorted(p), sorted(p.conj())) + xp_assert_equal(k, np.real(k)) + + assert issubclass(b.dtype.type, np.floating) + assert issubclass(a.dtype.type, np.floating) + + # Verify FIR filter coefficients with the paper's + # Mathematica implementation + def test_fir_ba_output(self): + b, _ = gammatone(15, 'fir', fs=1000) + b2 = [0.0, 2.2608075649884e-04, + 1.5077903981357e-03, 4.2033687753998e-03, + 8.1508962726503e-03, 1.2890059089154e-02, + 1.7833890391666e-02, 2.2392613558564e-02, + 2.6055195863104e-02, 2.8435872863284e-02, + 2.9293319149544e-02, 2.852976858014e-02, + 2.6176557156294e-02, 2.2371510270395e-02, + 1.7332485267759e-02] + xp_assert_close(b, b2) + + # Verify IIR filter coefficients with the paper's MATLAB implementation + def test_iir_ba_output(self): + b, a = gammatone(440, 'iir', fs=16000) + b2 = [1.31494461367464e-06, -5.03391196645395e-06, + 7.00649426000897e-06, -4.18951968419854e-06, + 9.02614910412011e-07] + a2 = [1.0, -7.65646235454218, + 25.7584699322366, -49.7319214483238, + 60.2667361289181, -46.9399590980486, + 22.9474798808461, -6.43799381299034, + 0.793651554625368] + xp_assert_close(b, b2) + xp_assert_close(a, a2) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + gammatone(440, 'iir', fs=np.array([10, 20])) + + +class TestOrderFilter: + def test_doc_example(self): + x = np.arange(25).reshape(5, 5) + domain = np.identity(3) + + # minimum of elements 1,3,9 (zero-padded) on phone pad + # 7,5,3 on numpad + expected = np.array( + [[0., 0., 0., 0., 0.], + [0., 0., 1., 2., 0.], + [0., 5., 6., 7., 0.], + [0., 10., 11., 12., 0.], + [0., 0., 0., 0., 0.]], + ) + xp_assert_close(order_filter(x, domain, 0), expected, check_dtype=False) + + # maximum of elements 1,3,9 (zero-padded) on phone pad + # 7,5,3 on numpad + expected = np.array( + [[6., 7., 8., 9., 4.], + [11., 12., 13., 14., 9.], + [16., 17., 18., 19., 14.], + [21., 22., 23., 24., 19.], + [20., 21., 22., 23., 24.]], + ) + xp_assert_close(order_filter(x, domain, 2), expected, check_dtype=False) + + # and, just to complete the set, median of zero-padded elements + expected = np.array( + [[0, 1, 2, 3, 0], + [5, 6, 7, 8, 3], + [10, 11, 12, 13, 8], + [15, 16, 17, 18, 13], + [0, 15, 16, 17, 18]], + ) + xp_assert_close(order_filter(x, domain, 1), expected) + + def test_medfilt_order_filter(self): + x = np.arange(25).reshape(5, 5) + + # median of zero-padded elements 1,5,9 on phone pad + # 7,5,3 on numpad + expected = np.array( + [[0, 1, 2, 3, 0], + [1, 6, 7, 8, 4], + [6, 11, 12, 13, 9], + [11, 16, 17, 18, 14], + [0, 16, 17, 18, 0]], + ) + xp_assert_close(medfilt(x, 3), expected) + + xp_assert_close( + order_filter(x, np.ones((3, 3)), 4), + expected + ) + + def test_order_filter_asymmetric(self): + x = np.arange(25).reshape(5, 5) + domain = np.array( + [[1, 1, 0], + [0, 1, 0], + [0, 0, 0]], + ) + + expected = np.array( + [[0, 0, 0, 0, 0], + [0, 0, 1, 2, 3], + [0, 5, 6, 7, 8], + [0, 10, 11, 12, 13], + [0, 15, 16, 17, 18]] + ) + xp_assert_close(order_filter(x, domain, 0), expected) + + expected = np.array( + [[0, 0, 0, 0, 0], + [0, 1, 2, 3, 4], + [5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]] + ) + xp_assert_close(order_filter(x, domain, 1), expected) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_fir_filter_design.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_fir_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..cd2b60e63cdcb36e39c6775fa99394d123c2956b --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_fir_filter_design.py @@ -0,0 +1,654 @@ +import numpy as np +from numpy.testing import assert_warns +from scipy._lib._array_api import ( + xp_assert_close, xp_assert_equal, + assert_almost_equal, assert_array_almost_equal, +) +from pytest import raises as assert_raises +import pytest + +from scipy.fft import fft +from scipy.special import sinc +from scipy.signal import (kaiser_beta, kaiser_atten, kaiserord, + firwin, firwin2, freqz, remez, firls, minimum_phase +) + + +def test_kaiser_beta(): + b = kaiser_beta(58.7) + assert_almost_equal(b, 0.1102 * 50.0) + b = kaiser_beta(22.0) + assert_almost_equal(b, 0.5842 + 0.07886) + b = kaiser_beta(21.0) + assert b == 0.0 + b = kaiser_beta(10.0) + assert b == 0.0 + + +def test_kaiser_atten(): + a = kaiser_atten(1, 1.0) + assert a == 7.95 + a = kaiser_atten(2, 1/np.pi) + assert a == 2.285 + 7.95 + + +def test_kaiserord(): + assert_raises(ValueError, kaiserord, 1.0, 1.0) + numtaps, beta = kaiserord(2.285 + 7.95 - 0.001, 1/np.pi) + assert (numtaps, beta) == (2, 0.0) + + +class TestFirwin: + + def check_response(self, h, expected_response, tol=.05): + N = len(h) + alpha = 0.5 * (N-1) + m = np.arange(0,N) - alpha # time indices of taps + for freq, expected in expected_response: + actual = abs(np.sum(h*np.exp(-1.j*np.pi*m*freq))) + mse = abs(actual-expected)**2 + assert mse < tol, f'response not as expected, mse={mse:g} > {tol:g}' + + def test_response(self): + N = 51 + f = .5 + # increase length just to try even/odd + h = firwin(N, f) # low-pass from 0 to f + self.check_response(h, [(.25,1), (.75,0)]) + + h = firwin(N+1, f, window='nuttall') # specific window + self.check_response(h, [(.25,1), (.75,0)]) + + h = firwin(N+2, f, pass_zero=False) # stop from 0 to f --> high-pass + self.check_response(h, [(.25,0), (.75,1)]) + + f1, f2, f3, f4 = .2, .4, .6, .8 + h = firwin(N+3, [f1, f2], pass_zero=False) # band-pass filter + self.check_response(h, [(.1,0), (.3,1), (.5,0)]) + + h = firwin(N+4, [f1, f2]) # band-stop filter + self.check_response(h, [(.1,1), (.3,0), (.5,1)]) + + h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False) + self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)]) + + h = firwin(N+6, [f1, f2, f3, f4]) # multiband filter + self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)]) + + h = firwin(N+7, 0.1, width=.03) # low-pass + self.check_response(h, [(.05,1), (.75,0)]) + + h = firwin(N+8, 0.1, pass_zero=False) # high-pass + self.check_response(h, [(.05,0), (.75,1)]) + + def mse(self, h, bands): + """Compute mean squared error versus ideal response across frequency + band. + h -- coefficients + bands -- list of (left, right) tuples relative to 1==Nyquist of + passbands + """ + w, H = freqz(h, worN=1024) + f = w/np.pi + passIndicator = np.zeros(len(w), bool) + for left, right in bands: + passIndicator |= (f >= left) & (f < right) + Hideal = np.where(passIndicator, 1, 0) + mse = np.mean(abs(abs(H)-Hideal)**2) + return mse + + def test_scaling(self): + """ + For one lowpass, bandpass, and highpass example filter, this test + checks two things: + - the mean squared error over the frequency domain of the unscaled + filter is smaller than the scaled filter (true for rectangular + window) + - the response of the scaled filter is exactly unity at the center + of the first passband + """ + N = 11 + cases = [ + ([.5], True, (0, 1)), + ([0.2, .6], False, (.4, 1)), + ([.5], False, (1, 1)), + ] + for cutoff, pass_zero, expected_response in cases: + h = firwin(N, cutoff, scale=False, pass_zero=pass_zero, window='ones') + hs = firwin(N, cutoff, scale=True, pass_zero=pass_zero, window='ones') + if len(cutoff) == 1: + if pass_zero: + cutoff = [0] + cutoff + else: + cutoff = cutoff + [1] + msg = 'least squares violation' + assert self.mse(h, [cutoff]) < self.mse(hs, [cutoff]), msg + self.check_response(hs, [expected_response], 1e-12) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + firwin(51, .5, fs=np.array([10, 20])) + + +class TestFirWinMore: + """Different author, different style, different tests...""" + + def test_lowpass(self): + width = 0.04 + ntaps, beta = kaiserord(120, width) + kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False) + taps = firwin(ntaps, **kwargs) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where + # we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5) + + taps_str = firwin(ntaps, pass_zero='lowpass', **kwargs) + xp_assert_close(taps, taps_str) + + def test_highpass(self): + width = 0.04 + ntaps, beta = kaiserord(120, width) + + # Ensure that ntaps is odd. + ntaps |= 1 + + kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False) + taps = firwin(ntaps, pass_zero=False, **kwargs) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where + # we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5) + + taps_str = firwin(ntaps, pass_zero='highpass', **kwargs) + xp_assert_close(taps, taps_str) + + def test_bandpass(self): + width = 0.04 + ntaps, beta = kaiserord(120, width) + kwargs = dict(cutoff=[0.3, 0.7], window=('kaiser', beta), scale=False) + taps = firwin(ntaps, pass_zero=False, **kwargs) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where + # we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 0.2, 0.3-width/2, 0.3+width/2, 0.5, + 0.7-width/2, 0.7+width/2, 0.8, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5) + + taps_str = firwin(ntaps, pass_zero='bandpass', **kwargs) + xp_assert_close(taps, taps_str) + + def test_bandstop_multi(self): + width = 0.04 + ntaps, beta = kaiserord(120, width) + kwargs = dict(cutoff=[0.2, 0.5, 0.8], window=('kaiser', beta), + scale=False) + taps = firwin(ntaps, **kwargs) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where + # we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 0.1, 0.2-width/2, 0.2+width/2, 0.35, + 0.5-width/2, 0.5+width/2, 0.65, + 0.8-width/2, 0.8+width/2, 0.9, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], + decimal=5) + + taps_str = firwin(ntaps, pass_zero='bandstop', **kwargs) + xp_assert_close(taps, taps_str) + + def test_fs_nyq(self): + """Test the fs and nyq keywords.""" + nyquist = 1000 + width = 40.0 + relative_width = width/nyquist + ntaps, beta = kaiserord(120, relative_width) + taps = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta), + pass_zero=False, scale=False, fs=2*nyquist) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where + # we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 200, 300-width/2, 300+width/2, 500, + 700-width/2, 700+width/2, 800, 1000]) + freqs, response = freqz(taps, worN=np.pi*freq_samples/nyquist) + assert_array_almost_equal(np.abs(response), + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5) + + def test_bad_cutoff(self): + """Test that invalid cutoff argument raises ValueError.""" + # cutoff values must be greater than 0 and less than 1. + assert_raises(ValueError, firwin, 99, -0.5) + assert_raises(ValueError, firwin, 99, 1.5) + # Don't allow 0 or 1 in cutoff. + assert_raises(ValueError, firwin, 99, [0, 0.5]) + assert_raises(ValueError, firwin, 99, [0.5, 1]) + # cutoff values must be strictly increasing. + assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2]) + assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5]) + # Must have at least one cutoff value. + assert_raises(ValueError, firwin, 99, []) + # 2D array not allowed. + assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]]) + # cutoff values must be less than nyq. + assert_raises(ValueError, firwin, 99, 50.0, fs=80) + assert_raises(ValueError, firwin, 99, [10, 20, 30], fs=50) + + def test_even_highpass_raises_value_error(self): + """Test that attempt to create a highpass filter with an even number + of taps raises a ValueError exception.""" + assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False) + assert_raises(ValueError, firwin, 40, [.25, 0.5]) + + def test_bad_pass_zero(self): + """Test degenerate pass_zero cases.""" + with assert_raises(ValueError, match='pass_zero must be'): + firwin(41, 0.5, pass_zero='foo') + with assert_raises(TypeError, match='cannot be interpreted'): + firwin(41, 0.5, pass_zero=1.) + for pass_zero in ('lowpass', 'highpass'): + with assert_raises(ValueError, match='cutoff must have one'): + firwin(41, [0.5, 0.6], pass_zero=pass_zero) + for pass_zero in ('bandpass', 'bandstop'): + with assert_raises(ValueError, match='must have at least two'): + firwin(41, [0.5], pass_zero=pass_zero) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + firwin2(51, .5, 1, fs=np.array([10, 20])) + + +class TestFirwin2: + + def test_invalid_args(self): + # `freq` and `gain` have different lengths. + with assert_raises(ValueError, match='must be of same length'): + firwin2(50, [0, 0.5, 1], [0.0, 1.0]) + # `nfreqs` is less than `ntaps`. + with assert_raises(ValueError, match='ntaps must be less than nfreqs'): + firwin2(50, [0, 0.5, 1], [0.0, 1.0, 1.0], nfreqs=33) + # Decreasing value in `freq` + with assert_raises(ValueError, match='must be nondecreasing'): + firwin2(50, [0, 0.5, 0.4, 1.0], [0, .25, .5, 1.0]) + # Value in `freq` repeated more than once. + with assert_raises(ValueError, match='must not occur more than twice'): + firwin2(50, [0, .1, .1, .1, 1.0], [0.0, 0.5, 0.75, 1.0, 1.0]) + # `freq` does not start at 0.0. + with assert_raises(ValueError, match='start with 0'): + firwin2(50, [0.5, 1.0], [0.0, 1.0]) + # `freq` does not end at fs/2. + with assert_raises(ValueError, match='end with fs/2'): + firwin2(50, [0.0, 0.5], [0.0, 1.0]) + # Value 0 is repeated in `freq` + with assert_raises(ValueError, match='0 must not be repeated'): + firwin2(50, [0.0, 0.0, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0]) + # Value fs/2 is repeated in `freq` + with assert_raises(ValueError, match='fs/2 must not be repeated'): + firwin2(50, [0.0, 0.5, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0]) + # Value in `freq` that is too close to a repeated number + with assert_raises(ValueError, match='cannot contain numbers ' + 'that are too close'): + firwin2(50, [0.0, 0.5 - np.finfo(float).eps * 0.5, 0.5, 0.5, 1.0], + [1.0, 1.0, 1.0, 0.0, 0.0]) + + # Type II filter, but the gain at nyquist frequency is not zero. + with assert_raises(ValueError, match='Type II filter'): + firwin2(16, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0]) + + # Type III filter, but the gains at nyquist and zero rate are not zero. + with assert_raises(ValueError, match='Type III filter'): + firwin2(17, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0], antisymmetric=True) + with assert_raises(ValueError, match='Type III filter'): + firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True) + with assert_raises(ValueError, match='Type III filter'): + firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 1.0], antisymmetric=True) + + # Type IV filter, but the gain at zero rate is not zero. + with assert_raises(ValueError, match='Type IV filter'): + firwin2(16, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True) + + def test01(self): + width = 0.04 + beta = 12.0 + ntaps = 400 + # Filter is 1 from w=0 to w=0.5, then decreases linearly from 1 to 0 as w + # increases from w=0.5 to w=1 (w=1 is the Nyquist frequency). + freq = [0.0, 0.5, 1.0] + gain = [1.0, 1.0, 0.0] + taps = firwin2(ntaps, freq, gain, window=('kaiser', beta)) + freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, + 0.75, 1.0-width/2]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [1.0, 1.0, 1.0, 1.0-width, 0.5, width], decimal=5) + + def test02(self): + width = 0.04 + beta = 12.0 + # ntaps must be odd for positive gain at Nyquist. + ntaps = 401 + # An ideal highpass filter. + freq = [0.0, 0.5, 0.5, 1.0] + gain = [0.0, 0.0, 1.0, 1.0] + taps = firwin2(ntaps, freq, gain, window=('kaiser', beta)) + freq_samples = np.array([0.0, 0.25, 0.5-width, 0.5+width, 0.75, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5) + + def test03(self): + width = 0.02 + ntaps, beta = kaiserord(120, width) + # ntaps must be odd for positive gain at Nyquist. + ntaps = int(ntaps) | 1 + freq = [0.0, 0.4, 0.4, 0.5, 0.5, 1.0] + gain = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0] + taps = firwin2(ntaps, freq, gain, window=('kaiser', beta)) + freq_samples = np.array([0.0, 0.4-width, 0.4+width, 0.45, + 0.5-width, 0.5+width, 0.75, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5) + + def test04(self): + """Test firwin2 when window=None.""" + ntaps = 5 + # Ideal lowpass: gain is 1 on [0,0.5], and 0 on [0.5, 1.0] + freq = [0.0, 0.5, 0.5, 1.0] + gain = [1.0, 1.0, 0.0, 0.0] + taps = firwin2(ntaps, freq, gain, window=None, nfreqs=8193) + alpha = 0.5 * (ntaps - 1) + m = np.arange(0, ntaps) - alpha + h = 0.5 * sinc(0.5 * m) + assert_array_almost_equal(h, taps) + + def test05(self): + """Test firwin2 for calculating Type IV filters""" + ntaps = 1500 + + freq = [0.0, 1.0] + gain = [0.0, 1.0] + taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True) + assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2:][::-1]) + + freqs, response = freqz(taps, worN=2048) + assert_array_almost_equal(abs(response), freqs / np.pi, decimal=4) + + def test06(self): + """Test firwin2 for calculating Type III filters""" + ntaps = 1501 + + freq = [0.0, 0.5, 0.55, 1.0] + gain = [0.0, 0.5, 0.0, 0.0] + taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True) + assert taps[ntaps // 2] == 0.0 + assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2 + 1:][::-1]) + + freqs, response1 = freqz(taps, worN=2048) + response2 = np.interp(freqs / np.pi, freq, gain) + assert_array_almost_equal(abs(response1), response2, decimal=3) + + def test_fs_nyq(self): + taps1 = firwin2(80, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) + taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], fs=120.0) + assert_array_almost_equal(taps1, taps2) + + def test_tuple(self): + taps1 = firwin2(150, (0.0, 0.5, 0.5, 1.0), (1.0, 1.0, 0.0, 0.0)) + taps2 = firwin2(150, [0.0, 0.5, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0]) + assert_array_almost_equal(taps1, taps2) + + def test_input_modyfication(self): + freq1 = np.array([0.0, 0.5, 0.5, 1.0]) + freq2 = np.array(freq1) + firwin2(80, freq1, [1.0, 1.0, 0.0, 0.0]) + xp_assert_equal(freq1, freq2) + + +class TestRemez: + + def test_bad_args(self): + assert_raises(ValueError, remez, 11, [0.1, 0.4], [1], type='pooka') + + def test_hilbert(self): + N = 11 # number of taps in the filter + a = 0.1 # width of the transition band + + # design an unity gain hilbert bandpass filter from w to 0.5-w + h = remez(11, [a, 0.5-a], [1], type='hilbert') + + # make sure the filter has correct # of taps + assert len(h) == N, "Number of Taps" + + # make sure it is type III (anti-symmetric tap coefficients) + assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1]) + + # Since the requested response is symmetric, all even coefficients + # should be zero (or in this case really small) + assert (abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero" + + # now check the frequency response + w, H = freqz(h, 1) + f = w/2/np.pi + Hmag = abs(H) + + # should have a zero at 0 and pi (in this case close to zero) + assert (Hmag[[0, -1]] < 0.02).all(), "Zero at zero and pi" + + # check that the pass band is close to unity + idx = np.logical_and(f > a, f < 0.5-a) + assert (abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity" + + def test_compare(self): + # test comparison to MATLAB + k = [0.024590270518440, -0.041314581814658, -0.075943803756711, + -0.003530911231040, 0.193140296954975, 0.373400753484939, + 0.373400753484939, 0.193140296954975, -0.003530911231040, + -0.075943803756711, -0.041314581814658, 0.024590270518440] + h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.) + xp_assert_close(h, k) + + h = [-0.038976016082299, 0.018704846485491, -0.014644062687875, + 0.002879152556419, 0.016849978528150, -0.043276706138248, + 0.073641298245579, -0.103908158578635, 0.129770906801075, + -0.147163447297124, 0.153302248456347, -0.147163447297124, + 0.129770906801075, -0.103908158578635, 0.073641298245579, + -0.043276706138248, 0.016849978528150, 0.002879152556419, + -0.014644062687875, 0.018704846485491, -0.038976016082299] + xp_assert_close(remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.), h) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + remez(11, .1, 1, fs=np.array([10, 20])) + +class TestFirls: + + def test_bad_args(self): + # even numtaps + assert_raises(ValueError, firls, 10, [0.1, 0.2], [0, 0]) + # odd bands + assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.4], [0, 0, 0]) + # len(bands) != len(desired) + assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.4], [0, 0, 0]) + # non-monotonic bands + assert_raises(ValueError, firls, 11, [0.2, 0.1], [0, 0]) + assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.3], [0] * 4) + assert_raises(ValueError, firls, 11, [0.3, 0.4, 0.1, 0.2], [0] * 4) + assert_raises(ValueError, firls, 11, [0.1, 0.3, 0.2, 0.4], [0] * 4) + # negative desired + assert_raises(ValueError, firls, 11, [0.1, 0.2], [-1, 1]) + # len(weight) != len(pairs) + assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], weight=[1, 2]) + # negative weight + assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], weight=[-1]) + + def test_firls(self): + N = 11 # number of taps in the filter + a = 0.1 # width of the transition band + + # design a halfband symmetric low-pass filter + h = firls(11, [0, a, 0.5-a, 0.5], [1, 1, 0, 0], fs=1.0) + + # make sure the filter has correct # of taps + assert h.shape[0] == N + + # make sure it is symmetric + midx = (N-1) // 2 + assert_array_almost_equal(h[:midx], h[:-midx-1:-1]) + + # make sure the center tap is 0.5 + assert_almost_equal(h[midx], 0.5) + + # For halfband symmetric, odd coefficients (except the center) + # should be zero (really small) + hodd = np.hstack((h[1:midx:2], h[-midx+1::2])) + assert_array_almost_equal(hodd, np.zeros_like(hodd)) + + # now check the frequency response + w, H = freqz(h, 1) + f = w/2/np.pi + Hmag = np.abs(H) + + # check that the pass band is close to unity + idx = np.logical_and(f > 0, f < a) + assert_array_almost_equal(Hmag[idx], np.ones_like(Hmag[idx]), decimal=3) + + # check that the stop band is close to zero + idx = np.logical_and(f > 0.5-a, f < 0.5) + assert_array_almost_equal(Hmag[idx], np.zeros_like(Hmag[idx]), decimal=3) + + def test_compare(self): + # compare to OCTAVE output + taps = firls(9, [0, 0.5, 0.55, 1], [1, 1, 0, 0], weight=[1, 2]) + # >> taps = firls(8, [0 0.5 0.55 1], [1 1 0 0], [1, 2]); + known_taps = [-6.26930101730182e-04, -1.03354450635036e-01, + -9.81576747564301e-03, 3.17271686090449e-01, + 5.11409425599933e-01, 3.17271686090449e-01, + -9.81576747564301e-03, -1.03354450635036e-01, + -6.26930101730182e-04] + xp_assert_close(taps, known_taps) + + # compare to MATLAB output + taps = firls(11, [0, 0.5, 0.5, 1], [1, 1, 0, 0], weight=[1, 2]) + # >> taps = firls(10, [0 0.5 0.5 1], [1 1 0 0], [1, 2]); + known_taps = [ + 0.058545300496815, -0.014233383714318, -0.104688258464392, + 0.012403323025279, 0.317930861136062, 0.488047220029700, + 0.317930861136062, 0.012403323025279, -0.104688258464392, + -0.014233383714318, 0.058545300496815] + xp_assert_close(taps, known_taps) + + # With linear changes: + taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], fs=20) + # >> taps = firls(6, [0, 0.1, 0.2, 0.3, 0.4, 0.5], [1, 0, 0, 1, 1, 0]) + known_taps = [ + 1.156090832768218, -4.1385894727395849, 7.5288619164321826, + -8.5530572592947856, 7.5288619164321826, -4.1385894727395849, + 1.156090832768218] + xp_assert_close(taps, known_taps) + + def test_rank_deficient(self): + # solve() runs but warns (only sometimes, so here we don't use match) + x = firls(21, [0, 0.1, 0.9, 1], [1, 1, 0, 0]) + w, h = freqz(x, fs=2.) + absh2 = np.abs(h[:2]) + xp_assert_close(absh2, np.ones_like(absh2), atol=1e-5) + absh2 = np.abs(h[-2:]) + xp_assert_close(absh2, np.zeros_like(absh2), atol=1e-6, rtol=1e-7) + # switch to pinvh (tolerances could be higher with longer + # filters, but using shorter ones is faster computationally and + # the idea is the same) + x = firls(101, [0, 0.01, 0.99, 1], [1, 1, 0, 0]) + w, h = freqz(x, fs=2.) + mask = w < 0.01 + assert mask.sum() > 3 + habs = np.abs(h[mask]) + xp_assert_close(habs, np.ones_like(habs), atol=1e-4) + mask = w > 0.99 + assert mask.sum() > 3 + habs = np.abs(h[mask]) + xp_assert_close(habs, np.zeros_like(habs), atol=1e-4) + + def test_fs_validation(self): + with pytest.raises(ValueError, match="Sampling.*single scalar"): + firls(11, .1, 1, fs=np.array([10, 20])) + +class TestMinimumPhase: + @pytest.mark.thread_unsafe + def test_bad_args(self): + # not enough taps + assert_raises(ValueError, minimum_phase, [1.]) + assert_raises(ValueError, minimum_phase, [1., 1.]) + assert_raises(ValueError, minimum_phase, np.full(10, 1j)) + assert_raises(ValueError, minimum_phase, 'foo') + assert_raises(ValueError, minimum_phase, np.ones(10), n_fft=8) + assert_raises(ValueError, minimum_phase, np.ones(10), method='foo') + assert_warns(RuntimeWarning, minimum_phase, np.arange(3)) + with pytest.raises(ValueError, match="is only supported when"): + minimum_phase(np.ones(3), method='hilbert', half=False) + + def test_homomorphic(self): + # check that it can recover frequency responses of arbitrary + # linear-phase filters + + # for some cases we can get the actual filter back + h = [1, -1] + h_new = minimum_phase(np.convolve(h, h[::-1])) + xp_assert_close(h_new, np.asarray(h, dtype=np.float64), rtol=0.05) + + # but in general we only guarantee we get the magnitude back + rng = np.random.RandomState(0) + for n in (2, 3, 10, 11, 15, 16, 17, 20, 21, 100, 101): + h = rng.randn(n) + h_linear = np.convolve(h, h[::-1]) + h_new = minimum_phase(h_linear) + xp_assert_close(np.abs(fft(h_new)), np.abs(fft(h)), rtol=1e-4) + h_new = minimum_phase(h_linear, half=False) + assert len(h_linear) == len(h_new) + xp_assert_close(np.abs(fft(h_new)), np.abs(fft(h_linear)), rtol=1e-4) + + def test_hilbert(self): + # compare to MATLAB output of reference implementation + + # f=[0 0.3 0.5 1]; + # a=[1 1 0 0]; + # h=remez(11,f,a); + h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.) + k = [0.349585548646686, 0.373552164395447, 0.326082685363438, + 0.077152207480935, -0.129943946349364, -0.059355880509749] + m = minimum_phase(h, 'hilbert') + xp_assert_close(m, k, rtol=5e-3) + + # f=[0 0.8 0.9 1]; + # a=[0 0 1 1]; + # h=remez(20,f,a); + h = remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.) + k = [0.232486803906329, -0.133551833687071, 0.151871456867244, + -0.157957283165866, 0.151739294892963, -0.129293146705090, + 0.100787844523204, -0.065832656741252, 0.035361328741024, + -0.014977068692269, -0.158416139047557] + m = minimum_phase(h, 'hilbert', n_fft=2**19) + xp_assert_close(m, k, rtol=2e-3) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_ltisys.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_ltisys.py new file mode 100644 index 0000000000000000000000000000000000000000..826b39cb0e066b3a6198bbcf1a293f6e0497076b --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_ltisys.py @@ -0,0 +1,1225 @@ +import warnings + +import numpy as np +from numpy.testing import suppress_warnings +import pytest +from pytest import raises as assert_raises +from scipy._lib._array_api import( + assert_almost_equal, xp_assert_equal, xp_assert_close +) + +from scipy.signal import (ss2tf, tf2ss, lti, + dlti, bode, freqresp, lsim, impulse, step, + abcd_normalize, place_poles, + TransferFunction, StateSpace, ZerosPolesGain) +from scipy.signal._filter_design import BadCoefficients +import scipy.linalg as linalg + + +def _assert_poles_close(P1,P2, rtol=1e-8, atol=1e-8): + """ + Check each pole in P1 is close to a pole in P2 with a 1e-8 + relative tolerance or 1e-8 absolute tolerance (useful for zero poles). + These tolerances are very strict but the systems tested are known to + accept these poles so we should not be far from what is requested. + """ + P2 = P2.copy() + for p1 in P1: + found = False + for p2_idx in range(P2.shape[0]): + if np.allclose([np.real(p1), np.imag(p1)], + [np.real(P2[p2_idx]), np.imag(P2[p2_idx])], + rtol, atol): + found = True + np.delete(P2, p2_idx) + break + if not found: + raise ValueError("Can't find pole " + str(p1) + " in " + str(P2)) + + +class TestPlacePoles: + + def _check(self, A, B, P, **kwargs): + """ + Perform the most common tests on the poles computed by place_poles + and return the Bunch object for further specific tests + """ + fsf = place_poles(A, B, P, **kwargs) + expected, _ = np.linalg.eig(A - np.dot(B, fsf.gain_matrix)) + _assert_poles_close(expected, fsf.requested_poles) + _assert_poles_close(expected, fsf.computed_poles) + _assert_poles_close(P,fsf.requested_poles) + return fsf + + def test_real(self): + # Test real pole placement using KNV and YT0 algorithm and example 1 in + # section 4 of the reference publication (see place_poles docstring) + A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0, + 0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273, + 1.343, -2.104]).reshape(4, 4) + B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146,0]).reshape(4, 2) + P = np.array([-0.2, -0.5, -5.0566, -8.6659]) + + # Check that both KNV and YT compute correct K matrix + self._check(A, B, P, method='KNV0') + self._check(A, B, P, method='YT') + + # Try to reach the specific case in _YT_real where two singular + # values are almost equal. This is to improve code coverage but I + # have no way to be sure this code is really reached + + # on some architectures this can lead to a RuntimeWarning invalid + # value in divide (see gh-7590), so suppress it for now + with np.errstate(invalid='ignore'): + self._check(A, B, (2,2,3,3)) + + def test_complex(self): + # Test complex pole placement on a linearized car model, taken from L. + # Jaulin, Automatique pour la robotique, Cours et Exercices, iSTE + # editions p 184/185 + A = np.array([[0, 7, 0, 0], + [0, 0, 0, 7/3.], + [0, 0, 0, 0], + [0, 0, 0, 0]]) + B = np.array([[0, 0], + [0, 0], + [1, 0], + [0, 1]]) + # Test complex poles on YT + P = np.array([-3, -1, -2-1j, -2+1j]) + # on macOS arm64 this can lead to a RuntimeWarning invalid + # value in divide, so suppress it for now + with np.errstate(divide='ignore', invalid='ignore'): + self._check(A, B, P) + + # Try to reach the specific case in _YT_complex where two singular + # values are almost equal. This is to improve code coverage but I + # have no way to be sure this code is really reached + + P = [0-1e-6j,0+1e-6j,-10,10] + with np.errstate(divide='ignore', invalid='ignore'): + self._check(A, B, P, maxiter=1000) + + # Try to reach the specific case in _YT_complex where the rank two + # update yields two null vectors. This test was found via Monte Carlo. + + A = np.array( + [-2148,-2902, -2267, -598, -1722, -1829, -165, -283, -2546, + -167, -754, -2285, -543, -1700, -584, -2978, -925, -1300, + -1583, -984, -386, -2650, -764, -897, -517, -1598, 2, -1709, + -291, -338, -153, -1804, -1106, -1168, -867, -2297] + ).reshape(6,6) + + B = np.array( + [-108, -374, -524, -1285, -1232, -161, -1204, -672, -637, + -15, -483, -23, -931, -780, -1245, -1129, -1290, -1502, + -952, -1374, -62, -964, -930, -939, -792, -756, -1437, + -491, -1543, -686] + ).reshape(6,5) + P = [-25.-29.j, -25.+29.j, 31.-42.j, 31.+42.j, 33.-41.j, 33.+41.j] + self._check(A, B, P) + + # Use a lot of poles to go through all cases for update_order + # in _YT_loop + + big_A = np.ones((11,11))-np.eye(11) + big_B = np.ones((11,10))-np.diag([1]*10,1)[:,1:] + big_A[:6,:6] = A + big_B[:6,:5] = B + + P = [-10,-20,-30,40,50,60,70,-20-5j,-20+5j,5+3j,5-3j] + with np.errstate(divide='ignore', invalid='ignore'): + self._check(big_A, big_B, P) + + #check with only complex poles and only real poles + P = [-10,-20,-30,-40,-50,-60,-70,-80,-90,-100] + self._check(big_A[:-1,:-1], big_B[:-1,:-1], P) + P = [-10+10j,-20+20j,-30+30j,-40+40j,-50+50j, + -10-10j,-20-20j,-30-30j,-40-40j,-50-50j] + self._check(big_A[:-1,:-1], big_B[:-1,:-1], P) + + # need a 5x5 array to ensure YT handles properly when there + # is only one real pole and several complex + A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0, + 0,0,0,5,0,0,0,0,9]).reshape(5,5) + B = np.array([0,0,0,0,1,0,0,1,2,3]).reshape(5,2) + P = np.array([-2, -3+1j, -3-1j, -1+1j, -1-1j]) + with np.errstate(divide='ignore', invalid='ignore'): + place_poles(A, B, P) + + # same test with an odd number of real poles > 1 + # this is another specific case of YT + P = np.array([-2, -3, -4, -1+1j, -1-1j]) + with np.errstate(divide='ignore', invalid='ignore'): + self._check(A, B, P) + + def test_tricky_B(self): + # check we handle as we should the 1 column B matrices and + # n column B matrices (with n such as shape(A)=(n, n)) + A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0, + 0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273, + 1.343, -2.104]).reshape(4, 4) + B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146, 0, 1, 2, 3, 4, + 5, 6, 7, 8]).reshape(4, 4) + + # KNV or YT are not called here, it's a specific case with only + # one unique solution + P = np.array([-0.2, -0.5, -5.0566, -8.6659]) + fsf = self._check(A, B, P) + # rtol and nb_iter should be set to np.nan as the identity can be + # used as transfer matrix + assert np.isnan(fsf.rtol) + assert np.isnan(fsf.nb_iter) + + # check with complex poles too as they trigger a specific case in + # the specific case :-) + P = np.array((-2+1j,-2-1j,-3,-2)) + fsf = self._check(A, B, P) + assert np.isnan(fsf.rtol) + assert np.isnan(fsf.nb_iter) + + #now test with a B matrix with only one column (no optimisation) + B = B[:,0].reshape(4,1) + P = np.array((-2+1j,-2-1j,-3,-2)) + fsf = self._check(A, B, P) + + # we can't optimize anything, check they are set to 0 as expected + assert fsf.rtol == 0 + assert fsf.nb_iter == 0 + + @pytest.mark.thread_unsafe + def test_errors(self): + # Test input mistakes from user + A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0]).reshape(4,4) + B = np.array([0,0,0,0,1,0,0,1]).reshape(4,2) + + #should fail as the method keyword is invalid + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), + method="foo") + + #should fail as poles are not 1D array + assert_raises(ValueError, place_poles, A, B, + np.array((-2.1,-2.2,-2.3,-2.4)).reshape(4,1)) + + #should fail as A is not a 2D array + assert_raises(ValueError, place_poles, A[:,:,np.newaxis], B, + (-2.1,-2.2,-2.3,-2.4)) + + #should fail as B is not a 2D array + assert_raises(ValueError, place_poles, A, B[:,:,np.newaxis], + (-2.1,-2.2,-2.3,-2.4)) + + #should fail as there are too many poles + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4,-3)) + + #should fail as there are not enough poles + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3)) + + #should fail as the rtol is greater than 1 + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), + rtol=42) + + #should fail as maxiter is smaller than 1 + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), + maxiter=-42) + + # should fail as ndim(B) is two + assert_raises(ValueError, place_poles, A, B, (-2,-2,-2,-2)) + + # uncontrollable system + assert_raises(ValueError, place_poles, np.ones((4,4)), + np.ones((4,2)), (1,2,3,4)) + + # Should not raise ValueError as the poles can be placed but should + # raise a warning as the convergence is not reached + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + fsf = place_poles(A, B, (-1,-2,-3,-4), rtol=1e-16, maxiter=42) + assert len(w) == 1 + assert issubclass(w[-1].category, UserWarning) + assert ("Convergence was not reached after maxiter iterations" + in str(w[-1].message)) + assert fsf.nb_iter == 42 + + # should fail as a complex misses its conjugate + assert_raises(ValueError, place_poles, A, B, (-2+1j,-2-1j,-2+3j,-2)) + + # should fail as A is not square + assert_raises(ValueError, place_poles, A[:,:3], B, (-2,-3,-4,-5)) + + # should fail as B has not the same number of lines as A + assert_raises(ValueError, place_poles, A, B[:3,:], (-2,-3,-4,-5)) + + # should fail as KNV0 does not support complex poles + assert_raises(ValueError, place_poles, A, B, + (-2+1j,-2-1j,-2+3j,-2-3j), method="KNV0") + + +class TestSS2TF: + + def check_matrix_shapes(self, p, q, r): + ss2tf(np.zeros((p, p)), + np.zeros((p, q)), + np.zeros((r, p)), + np.zeros((r, q)), 0) + + def test_shapes(self): + # Each tuple holds: + # number of states, number of inputs, number of outputs + for p, q, r in [(3, 3, 3), (1, 3, 3), (1, 1, 1)]: + self.check_matrix_shapes(p, q, r) + + def test_basic(self): + # Test a round trip through tf2ss and ss2tf. + b = np.array([1.0, 3.0, 5.0]) + a = np.array([1.0, 2.0, 3.0]) + + A, B, C, D = tf2ss(b, a) + xp_assert_close(A, [[-2., -3], [1, 0]], rtol=1e-13) + xp_assert_close(B, [[1.], [0]], rtol=1e-13) + xp_assert_close(C, [[1., 2]], rtol=1e-13) + xp_assert_close(D, [[1.]], rtol=1e-14) + + bb, aa = ss2tf(A, B, C, D) + xp_assert_close(bb[0], b, rtol=1e-13) + xp_assert_close(aa, a, rtol=1e-13) + + def test_zero_order_round_trip(self): + # See gh-5760 + tf = (2, 1) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[0.]], rtol=1e-13) + xp_assert_close(B, [[0.]], rtol=1e-13) + xp_assert_close(C, [[0.]], rtol=1e-13) + xp_assert_close(D, [[2.]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[2., 0]], rtol=1e-13) + xp_assert_close(den, [1., 0], rtol=1e-13) + + tf = ([[5], [2]], 1) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[0.]], rtol=1e-13) + xp_assert_close(B, [[0.]], rtol=1e-13) + xp_assert_close(C, [[0.], [0]], rtol=1e-13) + xp_assert_close(D, [[5.], [2]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[5., 0], [2, 0]], rtol=1e-13) + xp_assert_close(den, [1., 0], rtol=1e-13) + + def test_simo_round_trip(self): + # See gh-5753 + tf = ([[1, 2], [1, 1]], [1, 2]) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[-2.]], rtol=1e-13) + xp_assert_close(B, [[1.]], rtol=1e-13) + xp_assert_close(C, [[0.], [-1.]], rtol=1e-13) + xp_assert_close(D, [[1.], [1.]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[1., 2.], [1., 1.]], rtol=1e-13) + xp_assert_close(den, [1., 2.], rtol=1e-13) + + tf = ([[1, 0, 1], [1, 1, 1]], [1, 1, 1]) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[-1., -1.], [1., 0.]], rtol=1e-13) + xp_assert_close(B, [[1.], [0.]], rtol=1e-13) + xp_assert_close(C, [[-1., 0.], [0., 0.]], rtol=1e-13) + xp_assert_close(D, [[1.], [1.]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[1., 0., 1.], [1., 1., 1.]], rtol=1e-13) + xp_assert_close(den, [1., 1., 1.], rtol=1e-13) + + tf = ([[1, 2, 3], [1, 2, 3]], [1, 2, 3, 4]) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[-2., -3, -4], [1, 0, 0], [0, 1, 0]], rtol=1e-13) + xp_assert_close(B, [[1.], [0], [0]], rtol=1e-13) + xp_assert_close(C, [[1., 2, 3], [1, 2, 3]], rtol=1e-13) + xp_assert_close(D, [[0.], [0]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[0., 1, 2, 3], [0, 1, 2, 3]], rtol=1e-13) + xp_assert_close(den, [1., 2, 3, 4], rtol=1e-13) + + tf = (np.array([1, [2, 3]], dtype=object), [1, 6]) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[-6.]], rtol=1e-31) + xp_assert_close(B, [[1.]], rtol=1e-31) + xp_assert_close(C, [[1.], [-9]], rtol=1e-31) + xp_assert_close(D, [[0.], [2]], rtol=1e-31) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[0., 1], [2, 3]], rtol=1e-13) + xp_assert_close(den, [1., 6], rtol=1e-13) + + tf = (np.array([[1, -3], [1, 2, 3]], dtype=object), [1, 6, 5]) + A, B, C, D = tf2ss(*tf) + xp_assert_close(A, [[-6., -5], [1, 0]], rtol=1e-13) + xp_assert_close(B, [[1.], [0]], rtol=1e-13) + xp_assert_close(C, [[1., -3], [-4, -2]], rtol=1e-13) + xp_assert_close(D, [[0.], [1]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[0., 1, -3], [1, 2, 3]], rtol=1e-13) + xp_assert_close(den, [1., 6, 5], rtol=1e-13) + + def test_all_int_arrays(self): + A = [[0, 1, 0], [0, 0, 1], [-3, -4, -2]] + B = [[0], [0], [1]] + C = [[5, 1, 0]] + D = [[0]] + num, den = ss2tf(A, B, C, D) + xp_assert_close(num, [[0.0, 0.0, 1.0, 5.0]], rtol=1e-13, atol=1e-14) + xp_assert_close(den, [1.0, 2.0, 4.0, 3.0], rtol=1e-13) + + def test_multioutput(self): + # Regression test for gh-2669. + + # 4 states + A = np.array([[-1.0, 0.0, 1.0, 0.0], + [-1.0, 0.0, 2.0, 0.0], + [-4.0, 0.0, 3.0, 0.0], + [-8.0, 8.0, 0.0, 4.0]]) + + # 1 input + B = np.array([[0.3], + [0.0], + [7.0], + [0.0]]) + + # 3 outputs + C = np.array([[0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + [8.0, 8.0, 0.0, 0.0]]) + + D = np.array([[0.0], + [0.0], + [1.0]]) + + # Get the transfer functions for all the outputs in one call. + b_all, a = ss2tf(A, B, C, D) + + # Get the transfer functions for each output separately. + b0, a0 = ss2tf(A, B, C[0], D[0]) + b1, a1 = ss2tf(A, B, C[1], D[1]) + b2, a2 = ss2tf(A, B, C[2], D[2]) + + # Check that we got the same results. + xp_assert_close(a0, a, rtol=1e-13) + xp_assert_close(a1, a, rtol=1e-13) + xp_assert_close(a2, a, rtol=1e-13) + xp_assert_close(b_all, np.vstack((b0, b1, b2)), rtol=1e-13, atol=1e-14) + + +class TestLsim: + digits_accuracy = 7 + + def lti_nowarn(self, *args): + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + system = lti(*args) + return system + + def test_first_order(self): + # y' = -y + # exact solution is y(t) = exp(-t) + system = self.lti_nowarn(-1.,1.,1.,0.) + t = np.linspace(0,5) + u = np.zeros_like(t) + tout, y, x = lsim(system, u, t, X0=[1.0]) + expected_x = np.exp(-tout) + assert_almost_equal(x, expected_x) + assert_almost_equal(y, expected_x) + + def test_second_order(self): + t = np.linspace(0, 10, 1001) + u = np.zeros_like(t) + # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = 0. + # With initial conditions x(0)=1.0 and x'(t)=0.0, the exact solution + # is (1-t)*exp(-t). + system = self.lti_nowarn([1.0], [1.0, 2.0, 1.0]) + tout, y, x = lsim(system, u, t, X0=[1.0, 0.0]) + expected_x = (1.0 - tout) * np.exp(-tout) + assert_almost_equal(x[:, 0], expected_x) + + def test_integrator(self): + # integrator: y' = u + system = self.lti_nowarn(0., 1., 1., 0.) + t = np.linspace(0,5) + u = t + tout, y, x = lsim(system, u, t) + expected_x = 0.5 * tout**2 + assert_almost_equal(x, expected_x, decimal=self.digits_accuracy) + assert_almost_equal(y, expected_x, decimal=self.digits_accuracy) + + def test_two_states(self): + # A system with two state variables, two inputs, and one output. + A = np.array([[-1.0, 0.0], [0.0, -2.0]]) + B = np.array([[1.0, 0.0], [0.0, 1.0]]) + C = np.array([1.0, 0.0]) + D = np.zeros((1, 2)) + + system = self.lti_nowarn(A, B, C, D) + + t = np.linspace(0, 10.0, 21) + u = np.zeros((len(t), 2)) + tout, y, x = lsim(system, U=u, T=t, X0=[1.0, 1.0]) + expected_y = np.exp(-tout) + expected_x0 = np.exp(-tout) + expected_x1 = np.exp(-2.0 * tout) + assert_almost_equal(y, expected_y) + assert_almost_equal(x[:, 0], expected_x0) + assert_almost_equal(x[:, 1], expected_x1) + + def test_double_integrator(self): + # double integrator: y'' = 2u + A = np.array([[0., 1.], [0., 0.]]) + B = np.array([[0.], [1.]]) + C = np.array([[2., 0.]]) + system = self.lti_nowarn(A, B, C, 0.) + t = np.linspace(0,5) + u = np.ones_like(t) + tout, y, x = lsim(system, u, t) + expected_x = np.transpose(np.array([0.5 * tout**2, tout])) + expected_y = tout**2 + assert_almost_equal(x, expected_x, decimal=self.digits_accuracy) + assert_almost_equal(y, expected_y, decimal=self.digits_accuracy) + + def test_jordan_block(self): + # Non-diagonalizable A matrix + # x1' + x1 = x2 + # x2' + x2 = u + # y = x1 + # Exact solution with u = 0 is y(t) = t exp(-t) + A = np.array([[-1., 1.], [0., -1.]]) + B = np.array([[0.], [1.]]) + C = np.array([[1., 0.]]) + system = self.lti_nowarn(A, B, C, 0.) + t = np.linspace(0,5) + u = np.zeros_like(t) + tout, y, x = lsim(system, u, t, X0=[0.0, 1.0]) + expected_y = tout * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_miso(self): + # A system with two state variables, two inputs, and one output. + A = np.array([[-1.0, 0.0], [0.0, -2.0]]) + B = np.array([[1.0, 0.0], [0.0, 1.0]]) + C = np.array([1.0, 0.0]) + D = np.zeros((1,2)) + system = self.lti_nowarn(A, B, C, D) + + t = np.linspace(0, 5.0, 101) + u = np.zeros((len(t), 2)) + tout, y, x = lsim(system, u, t, X0=[1.0, 1.0]) + expected_y = np.exp(-tout) + expected_x0 = np.exp(-tout) + expected_x1 = np.exp(-2.0*tout) + assert_almost_equal(y, expected_y) + assert_almost_equal(x[:,0], expected_x0) + assert_almost_equal(x[:,1], expected_x1) + + def test_nonzero_initial_time(self): + system = self.lti_nowarn(-1.,1.,1.,0.) + t = np.linspace(1,2) + u = np.zeros_like(t) + tout, y, x = lsim(system, u, t, X0=[1.0]) + expected_y = np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_nonequal_timesteps(self): + t = np.array([0.0, 1.0, 1.0, 3.0]) + u = np.array([0.0, 0.0, 1.0, 1.0]) + # Simple integrator: x'(t) = u(t) + system = ([1.0], [1.0, 0.0]) + with assert_raises(ValueError, + match="Time steps are not equally spaced."): + tout, y, x = lsim(system, u, t, X0=[1.0]) + + +class TestImpulse: + def test_first_order(self): + # First order system: x'(t) + x(t) = u(t) + # Exact impulse response is x(t) = exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = impulse(system) + expected_y = np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_first_order_fixed_time(self): + # Specify the desired time values for the output. + + # First order system: x'(t) + x(t) = u(t) + # Exact impulse response is x(t) = exp(-t). + system = ([1.0], [1.0,1.0]) + n = 21 + t = np.linspace(0, 2.0, n) + tout, y = impulse(system, T=t) + assert tout.shape == (n,) + assert_almost_equal(tout, t) + expected_y = np.exp(-t) + assert_almost_equal(y, expected_y) + + def test_first_order_initial(self): + # Specify an initial condition as a scalar. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact impulse response is x(t) = 4*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = impulse(system, X0=3.0) + expected_y = 4.0 * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_first_order_initial_list(self): + # Specify an initial condition as a list. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact impulse response is x(t) = 4*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = impulse(system, X0=[3.0]) + expected_y = 4.0 * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_integrator(self): + # Simple integrator: x'(t) = u(t) + system = ([1.0], [1.0,0.0]) + tout, y = impulse(system) + expected_y = np.ones_like(tout) + assert_almost_equal(y, expected_y) + + def test_second_order(self): + # Second order system with a repeated root: + # x''(t) + 2*x(t) + x(t) = u(t) + # The exact impulse response is t*exp(-t). + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y = impulse(system) + expected_y = tout * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_array_like(self): + # Test that function can accept sequences, scalars. + system = ([1.0], [1.0, 2.0, 1.0]) + # TODO: add meaningful test where X0 is a list + tout, y = impulse(system, X0=[3], T=[5, 6]) + tout, y = impulse(system, X0=[3], T=[5]) + + def test_array_like2(self): + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y = impulse(system, X0=3, T=5) + + +class TestStep: + def test_first_order(self): + # First order system: x'(t) + x(t) = u(t) + # Exact step response is x(t) = 1 - exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = step(system) + expected_y = 1.0 - np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_first_order_fixed_time(self): + # Specify the desired time values for the output. + + # First order system: x'(t) + x(t) = u(t) + # Exact step response is x(t) = 1 - exp(-t). + system = ([1.0], [1.0,1.0]) + n = 21 + t = np.linspace(0, 2.0, n) + tout, y = step(system, T=t) + assert tout.shape == (n,) + assert_almost_equal(tout, t) + expected_y = 1 - np.exp(-t) + assert_almost_equal(y, expected_y) + + def test_first_order_initial(self): + # Specify an initial condition as a scalar. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact step response is x(t) = 1 + 2*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = step(system, X0=3.0) + expected_y = 1 + 2.0*np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_first_order_initial_list(self): + # Specify an initial condition as a list. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact step response is x(t) = 1 + 2*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = step(system, X0=[3.0]) + expected_y = 1 + 2.0*np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_integrator(self): + # Simple integrator: x'(t) = u(t) + # Exact step response is x(t) = t. + system = ([1.0],[1.0,0.0]) + tout, y = step(system) + expected_y = tout + assert_almost_equal(y, expected_y) + + def test_second_order(self): + # Second order system with a repeated root: + # x''(t) + 2*x(t) + x(t) = u(t) + # The exact step response is 1 - (1 + t)*exp(-t). + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y = step(system) + expected_y = 1 - (1 + tout) * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_array_like(self): + # Test that function can accept sequences, scalars. + system = ([1.0], [1.0, 2.0, 1.0]) + # TODO: add meaningful test where X0 is a list + tout, y = step(system, T=[5, 6]) + + def test_complex_input(self): + # Test that complex input doesn't raise an error. + # `step` doesn't seem to have been designed for complex input, but this + # works and may be used, so add regression test. See gh-2654. + step(([], [-1], 1+0j)) + + +class TestLti: + def test_lti_instantiation(self): + # Test that lti can be instantiated with sequences, scalars. + # See PR-225. + + # TransferFunction + s = lti([1], [-1]) + assert isinstance(s, TransferFunction) + assert isinstance(s, lti) + assert not isinstance(s, dlti) + assert s.dt is None + + # ZerosPolesGain + s = lti(np.array([]), np.array([-1]), 1) + assert isinstance(s, ZerosPolesGain) + assert isinstance(s, lti) + assert not isinstance(s, dlti) + assert s.dt is None + + # StateSpace + s = lti([], [-1], 1) + s = lti([1], [-1], 1, 3) + assert isinstance(s, StateSpace) + assert isinstance(s, lti) + assert not isinstance(s, dlti) + assert s.dt is None + + +class TestStateSpace: + def test_initialization(self): + # Check that all initializations work + StateSpace(1, 1, 1, 1) + StateSpace([1], [2], [3], [4]) + StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]), + np.array([[1, 0]]), np.array([[0]])) + + def test_conversion(self): + # Check the conversion functions + s = StateSpace(1, 2, 3, 4) + assert isinstance(s.to_ss(), StateSpace) + assert isinstance(s.to_tf(), TransferFunction) + assert isinstance(s.to_zpk(), ZerosPolesGain) + + # Make sure copies work + assert StateSpace(s) is not s + assert s.to_ss() is not s + + def test_properties(self): + # Test setters/getters for cross class properties. + # This implicitly tests to_tf() and to_zpk() + + # Getters + s = StateSpace(1, 1, 1, 1) + xp_assert_equal(s.poles, [1.]) + xp_assert_equal(s.zeros, [0.]) + assert s.dt is None + + def test_operators(self): + # Test +/-/* operators on systems + + class BadType: + pass + + s1 = StateSpace(np.array([[-0.5, 0.7], [0.3, -0.8]]), + np.array([[1], [0]]), + np.array([[1, 0]]), + np.array([[0]]), + ) + + s2 = StateSpace(np.array([[-0.2, -0.1], [0.4, -0.1]]), + np.array([[1], [0]]), + np.array([[1, 0]]), + np.array([[0]]) + ) + + s_discrete = s1.to_discrete(0.1) + s2_discrete = s2.to_discrete(0.2) + s3_discrete = s2.to_discrete(0.1) + + # Impulse response + t = np.linspace(0, 1, 100) + u = np.zeros_like(t) + u[0] = 1 + + # Test multiplication + for typ in (int, float, complex, np.float32, np.complex128, np.array): + xp_assert_close(lsim(typ(2) * s1, U=u, T=t)[1], + typ(2) * lsim(s1, U=u, T=t)[1]) + + xp_assert_close(lsim(s1 * typ(2), U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] * typ(2)) + + xp_assert_close(lsim(s1 / typ(2), U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] / typ(2)) + + with assert_raises(TypeError): + typ(2) / s1 + + xp_assert_close(lsim(s1 * 2, U=u, T=t)[1], + lsim(s1, U=2 * u, T=t)[1]) + + xp_assert_close(lsim(s1 * s2, U=u, T=t)[1], + lsim(s1, U=lsim(s2, U=u, T=t)[1], T=t)[1], + atol=1e-5) + + with assert_raises(TypeError): + s1 / s1 + + with assert_raises(TypeError): + s1 * s_discrete + + with assert_raises(TypeError): + # Check different discretization constants + s_discrete * s2_discrete + + with assert_raises(TypeError): + s1 * BadType() + + with assert_raises(TypeError): + BadType() * s1 + + with assert_raises(TypeError): + s1 / BadType() + + with assert_raises(TypeError): + BadType() / s1 + + # Test addition + xp_assert_close(lsim(s1 + 2, U=u, T=t)[1], + 2 * u + lsim(s1, U=u, T=t)[1]) + + # Check for dimension mismatch + with assert_raises(ValueError): + s1 + np.array([1, 2]) + + with assert_raises(ValueError): + np.array([1, 2]) + s1 + + with assert_raises(TypeError): + s1 + s_discrete + + with assert_raises(ValueError): + s1 / np.array([[1, 2], [3, 4]]) + + with assert_raises(TypeError): + # Check different discretization constants + s_discrete + s2_discrete + + with assert_raises(TypeError): + s1 + BadType() + + with assert_raises(TypeError): + BadType() + s1 + + xp_assert_close(lsim(s1 + s2, U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] + lsim(s2, U=u, T=t)[1]) + + # Test subtraction + xp_assert_close(lsim(s1 - 2, U=u, T=t)[1], + -2 * u + lsim(s1, U=u, T=t)[1]) + + xp_assert_close(lsim(2 - s1, U=u, T=t)[1], + 2 * u + lsim(-s1, U=u, T=t)[1]) + + xp_assert_close(lsim(s1 - s2, U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] - lsim(s2, U=u, T=t)[1]) + + with assert_raises(TypeError): + s1 - BadType() + + with assert_raises(TypeError): + BadType() - s1 + + s = s_discrete + s3_discrete + assert s.dt == 0.1 + + s = s_discrete * s3_discrete + assert s.dt == 0.1 + + s = 3 * s_discrete + assert s.dt == 0.1 + + s = -s_discrete + assert s.dt == 0.1 + +class TestTransferFunction: + def test_initialization(self): + # Check that all initializations work + TransferFunction(1, 1) + TransferFunction([1], [2]) + TransferFunction(np.array([1]), np.array([2])) + + def test_conversion(self): + # Check the conversion functions + s = TransferFunction([1, 0], [1, -1]) + assert isinstance(s.to_ss(), StateSpace) + assert isinstance(s.to_tf(), TransferFunction) + assert isinstance(s.to_zpk(), ZerosPolesGain) + + # Make sure copies work + assert TransferFunction(s) is not s + assert s.to_tf() is not s + + def test_properties(self): + # Test setters/getters for cross class properties. + # This implicitly tests to_ss() and to_zpk() + + # Getters + s = TransferFunction([1, 0], [1, -1]) + xp_assert_equal(s.poles, [1.]) + xp_assert_equal(s.zeros, [0.]) + + +class TestZerosPolesGain: + def test_initialization(self): + # Check that all initializations work + ZerosPolesGain(1, 1, 1) + ZerosPolesGain([1], [2], 1) + ZerosPolesGain(np.array([1]), np.array([2]), 1) + + def test_conversion(self): + #Check the conversion functions + s = ZerosPolesGain(1, 2, 3) + assert isinstance(s.to_ss(), StateSpace) + assert isinstance(s.to_tf(), TransferFunction) + assert isinstance(s.to_zpk(), ZerosPolesGain) + + # Make sure copies work + assert ZerosPolesGain(s) is not s + assert s.to_zpk() is not s + + +class Test_abcd_normalize: + def setup_method(self): + self.A = np.array([[1.0, 2.0], [3.0, 4.0]]) + self.B = np.array([[-1.0], [5.0]]) + self.C = np.array([[4.0, 5.0]]) + self.D = np.array([[2.5]]) + + def test_no_matrix_fails(self): + assert_raises(ValueError, abcd_normalize) + + def test_A_nosquare_fails(self): + assert_raises(ValueError, abcd_normalize, [1, -1], + self.B, self.C, self.D) + + def test_AB_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, [-1, 5], + self.C, self.D) + + def test_AC_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, self.B, + [[4.0], [5.0]], self.D) + + def test_CD_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, self.B, + self.C, [2.5, 0]) + + def test_BD_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, [-1, 5], + self.C, self.D) + + def test_normalized_matrices_unchanged(self): + A, B, C, D = abcd_normalize(self.A, self.B, self.C, self.D) + xp_assert_equal(A, self.A) + xp_assert_equal(B, self.B) + xp_assert_equal(C, self.C) + xp_assert_equal(D, self.D) + + def test_shapes(self): + A, B, C, D = abcd_normalize(self.A, self.B, [1, 0], 0) + xp_assert_equal(A.shape[0], A.shape[1]) + xp_assert_equal(A.shape[0], B.shape[0]) + xp_assert_equal(A.shape[0], C.shape[1]) + xp_assert_equal(C.shape[0], D.shape[0]) + xp_assert_equal(B.shape[1], D.shape[1]) + + def test_zero_dimension_is_not_none1(self): + B_ = np.zeros((2, 0)) + D_ = np.zeros((0, 0)) + A, B, C, D = abcd_normalize(A=self.A, B=B_, D=D_) + xp_assert_equal(A, self.A) + xp_assert_equal(B, B_) + xp_assert_equal(D, D_) + assert C.shape[0] == D_.shape[0] + assert C.shape[1] == self.A.shape[0] + + def test_zero_dimension_is_not_none2(self): + B_ = np.zeros((2, 0)) + C_ = np.zeros((0, 2)) + A, B, C, D = abcd_normalize(A=self.A, B=B_, C=C_) + xp_assert_equal(A, self.A) + xp_assert_equal(B, B_) + xp_assert_equal(C, C_) + assert D.shape[0] == C_.shape[0] + assert D.shape[1] == B_.shape[1] + + def test_missing_A(self): + A, B, C, D = abcd_normalize(B=self.B, C=self.C, D=self.D) + assert A.shape[0] == A.shape[1] + assert A.shape[0] == B.shape[0] + assert A.shape == (self.B.shape[0], self.B.shape[0]) + + def test_missing_B(self): + A, B, C, D = abcd_normalize(A=self.A, C=self.C, D=self.D) + assert B.shape[0] == A.shape[0] + assert B.shape[1] == D.shape[1] + assert B.shape == (self.A.shape[0], self.D.shape[1]) + + def test_missing_C(self): + A, B, C, D = abcd_normalize(A=self.A, B=self.B, D=self.D) + assert C.shape[0] == D.shape[0] + assert C.shape[1] == A.shape[0] + assert C.shape == (self.D.shape[0], self.A.shape[0]) + + def test_missing_D(self): + A, B, C, D = abcd_normalize(A=self.A, B=self.B, C=self.C) + assert D.shape[0] == C.shape[0] + assert D.shape[1] == B.shape[1] + assert D.shape == (self.C.shape[0], self.B.shape[1]) + + def test_missing_AB(self): + A, B, C, D = abcd_normalize(C=self.C, D=self.D) + assert A.shape[0] == A.shape[1] + assert A.shape[0] == B.shape[0] + assert B.shape[1] == D.shape[1] + assert A.shape == (self.C.shape[1], self.C.shape[1]) + assert B.shape == (self.C.shape[1], self.D.shape[1]) + + def test_missing_AC(self): + A, B, C, D = abcd_normalize(B=self.B, D=self.D) + assert A.shape[0] == A.shape[1] + assert A.shape[0] == B.shape[0] + assert C.shape[0] == D.shape[0] + assert C.shape[1] == A.shape[0] + assert A.shape == (self.B.shape[0], self.B.shape[0]) + assert C.shape == (self.D.shape[0], self.B.shape[0]) + + def test_missing_AD(self): + A, B, C, D = abcd_normalize(B=self.B, C=self.C) + assert A.shape[0] == A.shape[1] + assert A.shape[0] == B.shape[0] + assert D.shape[0] == C.shape[0] + assert D.shape[1] == B.shape[1] + assert A.shape == (self.B.shape[0], self.B.shape[0]) + assert D.shape == (self.C.shape[0], self.B.shape[1]) + + def test_missing_BC(self): + A, B, C, D = abcd_normalize(A=self.A, D=self.D) + assert B.shape[0] == A.shape[0] + assert B.shape[1] == D.shape[1] + assert C.shape[0] == D.shape[0] + assert C.shape[1], A.shape[0] + assert B.shape == (self.A.shape[0], self.D.shape[1]) + assert C.shape == (self.D.shape[0], self.A.shape[0]) + + def test_missing_ABC_fails(self): + assert_raises(ValueError, abcd_normalize, D=self.D) + + def test_missing_BD_fails(self): + assert_raises(ValueError, abcd_normalize, A=self.A, C=self.C) + + def test_missing_CD_fails(self): + assert_raises(ValueError, abcd_normalize, A=self.A, B=self.B) + + +class Test_bode: + + def test_01(self): + # Test bode() magnitude calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 1 / (s + 1), + # cutoff: 1 rad/s, slope: -20 dB/decade + # H(s=0.1) ~= 0 dB + # H(s=1) ~= -3 dB + # H(s=10) ~= -20 dB + # H(s=100) ~= -40 dB + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, mag, phase = bode(system, w=w) + expected_mag = [0, -3, -20, -40] + assert_almost_equal(mag, expected_mag, decimal=1) + + def test_02(self): + # Test bode() phase calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 1 / (s + 1), + # angle(H(s=0.1)) ~= -5.7 deg + # angle(H(s=1)) ~= -45 deg + # angle(H(s=10)) ~= -84.3 deg + system = lti([1], [1, 1]) + w = [0.1, 1, 10] + w, mag, phase = bode(system, w=w) + expected_phase = [-5.7, -45, -84.3] + assert_almost_equal(phase, expected_phase, decimal=1) + + def test_03(self): + # Test bode() magnitude calculation. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, mag, phase = bode(system, w=w) + jw = w * 1j + y = np.polyval(system.num, jw) / np.polyval(system.den, jw) + expected_mag = 20.0 * np.log10(abs(y)) + assert_almost_equal(mag, expected_mag) + + def test_04(self): + # Test bode() phase calculation. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, mag, phase = bode(system, w=w) + jw = w * 1j + y = np.polyval(system.num, jw) / np.polyval(system.den, jw) + expected_phase = np.arctan2(y.imag, y.real) * 180.0 / np.pi + assert_almost_equal(phase, expected_phase) + + def test_05(self): + # Test that bode() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + n = 10 + # Expected range is from 0.01 to 10. + expected_w = np.logspace(-2, 1, n) + w, mag, phase = bode(system, n=n) + assert_almost_equal(w, expected_w) + + def test_06(self): + # Test that bode() doesn't fail on a system with a pole at 0. + # integrator, pole at zero: H(s) = 1 / s + system = lti([1], [1, 0]) + w, mag, phase = bode(system, n=2) + assert w[0] == 0.01 # a fail would give not-a-number + + def test_07(self): + # bode() should not fail on a system with pure imaginary poles. + # The test passes if bode doesn't raise an exception. + system = lti([1], [1, 0, 100]) + w, mag, phase = bode(system, n=2) + + def test_08(self): + # Test that bode() return continuous phase, issues/2331. + system = lti([], [-10, -30, -40, -60, -70], 1) + w, mag, phase = system.bode(w=np.logspace(-3, 40, 100)) + assert_almost_equal(min(phase), -450, decimal=15) + + def test_from_state_space(self): + # Ensure that bode works with a system that was created from the + # state space representation matrices A, B, C, D. In this case, + # system.num will be a 2-D array with shape (1, n+1), where (n,n) + # is the shape of A. + # A Butterworth lowpass filter is used, so we know the exact + # frequency response. + a = np.array([1.0, 2.0, 2.0, 1.0]) + A = linalg.companion(a).T + B = np.array([[0.0], [0.0], [1.0]]) + C = np.array([[1.0, 0.0, 0.0]]) + D = np.array([[0.0]]) + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + system = lti(A, B, C, D) + w, mag, phase = bode(system, n=100) + + expected_magnitude = 20 * np.log10(np.sqrt(1.0 / (1.0 + w**6))) + assert_almost_equal(mag, expected_magnitude) + + +class Test_freqresp: + + def test_output_manual(self): + # Test freqresp() output calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 1 / (s + 1), + # re(H(s=0.1)) ~= 0.99 + # re(H(s=1)) ~= 0.5 + # re(H(s=10)) ~= 0.0099 + system = lti([1], [1, 1]) + w = [0.1, 1, 10] + w, H = freqresp(system, w=w) + expected_re = [0.99, 0.5, 0.0099] + expected_im = [-0.099, -0.5, -0.099] + assert_almost_equal(H.real, expected_re, decimal=1) + assert_almost_equal(H.imag, expected_im, decimal=1) + + def test_output(self): + # Test freqresp() output calculation. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, H = freqresp(system, w=w) + s = w * 1j + expected = np.polyval(system.num, s) / np.polyval(system.den, s) + assert_almost_equal(H.real, expected.real) + assert_almost_equal(H.imag, expected.imag) + + def test_freq_range(self): + # Test that freqresp() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + # Expected range is from 0.01 to 10. + system = lti([1], [1, 1]) + n = 10 + expected_w = np.logspace(-2, 1, n) + w, H = freqresp(system, n=n) + assert_almost_equal(w, expected_w) + + def test_pole_zero(self): + # Test that freqresp() doesn't fail on a system with a pole at 0. + # integrator, pole at zero: H(s) = 1 / s + system = lti([1], [1, 0]) + w, H = freqresp(system, n=2) + assert w[0] == 0.01 # a fail would give not-a-number + + def test_from_state_space(self): + # Ensure that freqresp works with a system that was created from the + # state space representation matrices A, B, C, D. In this case, + # system.num will be a 2-D array with shape (1, n+1), where (n,n) is + # the shape of A. + # A Butterworth lowpass filter is used, so we know the exact + # frequency response. + a = np.array([1.0, 2.0, 2.0, 1.0]) + A = linalg.companion(a).T + B = np.array([[0.0],[0.0],[1.0]]) + C = np.array([[1.0, 0.0, 0.0]]) + D = np.array([[0.0]]) + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + system = lti(A, B, C, D) + w, H = freqresp(system, n=100) + s = w * 1j + expected = (1.0 / (1.0 + 2*s + 2*s**2 + s**3)) + assert_almost_equal(H.real, expected.real) + assert_almost_equal(H.imag, expected.imag) + + def test_from_zpk(self): + # 4th order low-pass filter: H(s) = 1 / (s + 1) + system = lti([],[-1]*4,[1]) + w = [0.1, 1, 10, 100] + w, H = freqresp(system, w=w) + s = w * 1j + expected = 1 / (s + 1)**4 + assert_almost_equal(H.real, expected.real) + assert_almost_equal(H.imag, expected.imag) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_max_len_seq.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_max_len_seq.py new file mode 100644 index 0000000000000000000000000000000000000000..7610b3f898571d10d75a64f00d900168c7142fbe --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_max_len_seq.py @@ -0,0 +1,71 @@ +import numpy as np +from pytest import raises as assert_raises +from scipy._lib._array_api import xp_assert_close, xp_assert_equal + +from numpy.fft import fft, ifft + +from scipy.signal import max_len_seq + + +class TestMLS: + + def test_mls_inputs(self): + # can't all be zero state + assert_raises(ValueError, max_len_seq, + 10, state=np.zeros(10)) + # wrong size state + assert_raises(ValueError, max_len_seq, 10, + state=np.ones(3)) + # wrong length + assert_raises(ValueError, max_len_seq, 10, length=-1) + xp_assert_equal(max_len_seq(10, length=0)[0], + np.asarray([], dtype=np.int8) + ) + # unknown taps + assert_raises(ValueError, max_len_seq, 64) + # bad taps + assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1]) + + def test_mls_output(self): + # define some alternate working taps + alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4], + 8: [7, 5, 3]} + # assume the other bit levels work, too slow to test higher orders... + for nbits in range(2, 8): + for state in [None, np.round(np.random.rand(nbits))]: + for taps in [None, alt_taps[nbits]]: + if state is not None and np.all(state == 0): + state[0] = 1 # they can't all be zero + orig_m = max_len_seq(nbits, state=state, + taps=taps)[0] + m = 2. * orig_m - 1. # convert to +/- 1 representation + # First, make sure we got all 1's or -1 + err_msg = "mls had non binary terms" + xp_assert_equal(np.abs(m), np.ones_like(m), + err_msg=err_msg) + # Test via circular cross-correlation, which is just mult. + # in the frequency domain with one signal conjugated + tester = np.real(ifft(fft(m) * np.conj(fft(m)))) + out_len = 2**nbits - 1 + # impulse amplitude == test_len + err_msg = "mls impulse has incorrect value" + xp_assert_close(tester[0], + float(out_len), + err_msg=err_msg + ) + # steady-state is -1 + err_msg = "mls steady-state has incorrect value" + xp_assert_close(tester[1:], + np.full(out_len - 1, -1, dtype=tester.dtype), + err_msg=err_msg) + # let's do the split thing using a couple options + for n in (1, 2**(nbits - 1)): + m1, s1 = max_len_seq(nbits, state=state, taps=taps, + length=n) + m2, s2 = max_len_seq(nbits, state=s1, taps=taps, + length=1) + m3, s3 = max_len_seq(nbits, state=s2, taps=taps, + length=out_len - n - 1) + new_m = np.concatenate((m1, m2, m3)) + xp_assert_equal(orig_m, new_m) + diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_peak_finding.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_peak_finding.py new file mode 100644 index 0000000000000000000000000000000000000000..8de5a2379c2d43c0a10b5b3facd0b33be0778a36 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_peak_finding.py @@ -0,0 +1,915 @@ +import copy + +import numpy as np +import pytest +from pytest import raises, warns +from scipy._lib._array_api import xp_assert_close, xp_assert_equal + +from scipy.signal._peak_finding import ( + argrelmax, + argrelmin, + peak_prominences, + peak_widths, + _unpack_condition_args, + find_peaks, + find_peaks_cwt, + _identify_ridge_lines +) +from scipy.signal.windows import gaussian +from scipy.signal._peak_finding_utils import _local_maxima_1d, PeakPropertyWarning + + +def _gen_gaussians(center_locs, sigmas, total_length): + xdata = np.arange(0, total_length).astype(float) + out_data = np.zeros(total_length, dtype=float) + for ind, sigma in enumerate(sigmas): + tmp = (xdata - center_locs[ind]) / sigma + out_data += np.exp(-(tmp**2)) + return out_data + + +def _gen_gaussians_even(sigmas, total_length): + num_peaks = len(sigmas) + delta = total_length / (num_peaks + 1) + center_locs = np.linspace(delta, total_length - delta, num=num_peaks).astype(int) + out_data = _gen_gaussians(center_locs, sigmas, total_length) + return out_data, center_locs + + +def _gen_ridge_line(start_locs, max_locs, length, distances, gaps): + """ + Generate coordinates for a ridge line. + + Will be a series of coordinates, starting a start_loc (length 2). + The maximum distance between any adjacent columns will be + `max_distance`, the max distance between adjacent rows + will be `map_gap'. + + `max_locs` should be the size of the intended matrix. The + ending coordinates are guaranteed to be less than `max_locs`, + although they may not approach `max_locs` at all. + """ + + def keep_bounds(num, max_val): + out = max(num, 0) + out = min(out, max_val) + return out + + gaps = copy.deepcopy(gaps) + distances = copy.deepcopy(distances) + + locs = np.zeros([length, 2], dtype=int) + locs[0, :] = start_locs + total_length = max_locs[0] - start_locs[0] - sum(gaps) + if total_length < length: + raise ValueError('Cannot generate ridge line according to constraints') + dist_int = length / len(distances) - 1 + gap_int = length / len(gaps) - 1 + for ind in range(1, length): + nextcol = locs[ind - 1, 1] + nextrow = locs[ind - 1, 0] + 1 + if (ind % dist_int == 0) and (len(distances) > 0): + nextcol += ((-1)**ind)*distances.pop() + if (ind % gap_int == 0) and (len(gaps) > 0): + nextrow += gaps.pop() + nextrow = keep_bounds(nextrow, max_locs[0]) + nextcol = keep_bounds(nextcol, max_locs[1]) + locs[ind, :] = [nextrow, nextcol] + + return [locs[:, 0], locs[:, 1]] + + +class TestLocalMaxima1d: + + def test_empty(self): + """Test with empty signal.""" + x = np.array([], dtype=np.float64) + for array in _local_maxima_1d(x): + xp_assert_equal(array, np.array([]), check_dtype=False) + assert array.base is None + + def test_linear(self): + """Test with linear signal.""" + x = np.linspace(0, 100) + for array in _local_maxima_1d(x): + xp_assert_equal(array, np.array([], dtype=np.intp)) + assert array.base is None + + def test_simple(self): + """Test with simple signal.""" + x = np.linspace(-10, 10, 50) + x[2::3] += 1 + expected = np.arange(2, 50, 3, dtype=np.intp) + for array in _local_maxima_1d(x): + # For plateaus of size 1, the edges are identical with the + # midpoints + xp_assert_equal(array, expected, check_dtype=False) + assert array.base is None + + def test_flat_maxima(self): + """Test if flat maxima are detected correctly.""" + x = np.array([-1.3, 0, 1, 0, 2, 2, 0, 3, 3, 3, 2.99, 4, 4, 4, 4, -10, + -5, -5, -5, -5, -5, -10]) + midpoints, left_edges, right_edges = _local_maxima_1d(x) + xp_assert_equal(midpoints, np.array([2, 4, 8, 12, 18]), check_dtype=False) + xp_assert_equal(left_edges, np.array([2, 4, 7, 11, 16]), check_dtype=False) + xp_assert_equal(right_edges, np.array([2, 5, 9, 14, 20]), check_dtype=False) + + @pytest.mark.parametrize('x', [ + np.array([1., 0, 2]), + np.array([3., 3, 0, 4, 4]), + np.array([5., 5, 5, 0, 6, 6, 6]), + ]) + def test_signal_edges(self, x): + """Test if behavior on signal edges is correct.""" + for array in _local_maxima_1d(x): + xp_assert_equal(array, np.array([], dtype=np.intp)) + assert array.base is None + + def test_exceptions(self): + """Test input validation and raised exceptions.""" + with raises(ValueError, match="wrong number of dimensions"): + _local_maxima_1d(np.ones((1, 1))) + with raises(ValueError, match="expected 'const float64_t'"): + _local_maxima_1d(np.ones(1, dtype=int)) + with raises(TypeError, match="list"): + _local_maxima_1d([1., 2.]) + with raises(TypeError, match="'x' must not be None"): + _local_maxima_1d(None) + + +class TestRidgeLines: + + def test_empty(self): + test_matr = np.zeros([20, 100]) + lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1) + assert len(lines) == 0 + + def test_minimal(self): + test_matr = np.zeros([20, 100]) + test_matr[0, 10] = 1 + lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1) + assert len(lines) == 1 + + test_matr = np.zeros([20, 100]) + test_matr[0:2, 10] = 1 + lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1) + assert len(lines) == 1 + + def test_single_pass(self): + distances = [0, 1, 2, 5] + gaps = [0, 1, 2, 0, 1] + test_matr = np.zeros([20, 50]) + 1e-12 + length = 12 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_distances = np.full(20, max(distances)) + identified_lines = _identify_ridge_lines(test_matr, + max_distances, + max(gaps) + 1) + assert len(identified_lines) == 1 + for iline_, line_ in zip(identified_lines[0], line): + xp_assert_equal(iline_, line_, check_dtype=False) + + def test_single_bigdist(self): + distances = [0, 1, 2, 5] + gaps = [0, 1, 2, 4] + test_matr = np.zeros([20, 50]) + length = 12 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_dist = 3 + max_distances = np.full(20, max_dist) + #This should get 2 lines, since the distance is too large + identified_lines = _identify_ridge_lines(test_matr, + max_distances, + max(gaps) + 1) + assert len(identified_lines) == 2 + + for iline in identified_lines: + adists = np.diff(iline[1]) + np.testing.assert_array_less(np.abs(adists), max_dist) + + agaps = np.diff(iline[0]) + np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) + + def test_single_biggap(self): + distances = [0, 1, 2, 5] + max_gap = 3 + gaps = [0, 4, 2, 1] + test_matr = np.zeros([20, 50]) + length = 12 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_dist = 6 + max_distances = np.full(20, max_dist) + #This should get 2 lines, since the gap is too large + identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap) + assert len(identified_lines) == 2 + + for iline in identified_lines: + adists = np.diff(iline[1]) + np.testing.assert_array_less(np.abs(adists), max_dist) + + agaps = np.diff(iline[0]) + np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) + + def test_single_biggaps(self): + distances = [0] + max_gap = 1 + gaps = [3, 6] + test_matr = np.zeros([50, 50]) + length = 30 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_dist = 1 + max_distances = np.full(50, max_dist) + #This should get 3 lines, since the gaps are too large + identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap) + assert len(identified_lines) == 3 + + for iline in identified_lines: + adists = np.diff(iline[1]) + np.testing.assert_array_less(np.abs(adists), max_dist) + + agaps = np.diff(iline[0]) + np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) + + +class TestArgrel: + + def test_empty(self): + # Regression test for gh-2832. + # When there are no relative extrema, make sure that + # the number of empty arrays returned matches the + # dimension of the input. + + empty_array = np.array([], dtype=int) + + z1 = np.zeros(5) + + i = argrelmin(z1) + xp_assert_equal(len(i), 1) + xp_assert_equal(i[0], empty_array, check_dtype=False) + + z2 = np.zeros((3, 5)) + + row, col = argrelmin(z2, axis=0) + xp_assert_equal(row, empty_array, check_dtype=False) + xp_assert_equal(col, empty_array, check_dtype=False) + + row, col = argrelmin(z2, axis=1) + xp_assert_equal(row, empty_array, check_dtype=False) + xp_assert_equal(col, empty_array, check_dtype=False) + + def test_basic(self): + # Note: the docstrings for the argrel{min,max,extrema} functions + # do not give a guarantee of the order of the indices, so we'll + # sort them before testing. + + x = np.array([[1, 2, 2, 3, 2], + [2, 1, 2, 2, 3], + [3, 2, 1, 2, 2], + [2, 3, 2, 1, 2], + [1, 2, 3, 2, 1]]) + + row, col = argrelmax(x, axis=0) + order = np.argsort(row) + xp_assert_equal(row[order], [1, 2, 3], check_dtype=False) + xp_assert_equal(col[order], [4, 0, 1], check_dtype=False) + + row, col = argrelmax(x, axis=1) + order = np.argsort(row) + xp_assert_equal(row[order], [0, 3, 4], check_dtype=False) + xp_assert_equal(col[order], [3, 1, 2], check_dtype=False) + + row, col = argrelmin(x, axis=0) + order = np.argsort(row) + xp_assert_equal(row[order], [1, 2, 3], check_dtype=False) + xp_assert_equal(col[order], [1, 2, 3], check_dtype=False) + + row, col = argrelmin(x, axis=1) + order = np.argsort(row) + xp_assert_equal(row[order], [1, 2, 3], check_dtype=False) + xp_assert_equal(col[order], [1, 2, 3], check_dtype=False) + + def test_highorder(self): + order = 2 + sigmas = [1.0, 2.0, 10.0, 5.0, 15.0] + test_data, act_locs = _gen_gaussians_even(sigmas, 500) + test_data[act_locs + order] = test_data[act_locs]*0.99999 + test_data[act_locs - order] = test_data[act_locs]*0.99999 + rel_max_locs = argrelmax(test_data, order=order, mode='clip')[0] + + assert len(rel_max_locs) == len(act_locs) + assert (rel_max_locs == act_locs).all() + + def test_2d_gaussians(self): + sigmas = [1.0, 2.0, 10.0] + test_data, act_locs = _gen_gaussians_even(sigmas, 100) + rot_factor = 20 + rot_range = np.arange(0, len(test_data)) - rot_factor + test_data_2 = np.vstack([test_data, test_data[rot_range]]) + rel_max_rows, rel_max_cols = argrelmax(test_data_2, axis=1, order=1) + + for rw in range(0, test_data_2.shape[0]): + inds = (rel_max_rows == rw) + + assert len(rel_max_cols[inds]) == len(act_locs) + assert (act_locs == (rel_max_cols[inds] - rot_factor*rw)).all() + + +class TestPeakProminences: + + def test_empty(self): + """ + Test if an empty array is returned if no peaks are provided. + """ + out = peak_prominences([1, 2, 3], []) + for arr, dtype in zip(out, [np.float64, np.intp, np.intp]): + assert arr.size == 0 + assert arr.dtype == dtype + + out = peak_prominences([], []) + for arr, dtype in zip(out, [np.float64, np.intp, np.intp]): + assert arr.size == 0 + assert arr.dtype == dtype + + def test_basic(self): + """ + Test if height of prominences is correctly calculated in signal with + rising baseline (peak widths are 1 sample). + """ + # Prepare basic signal + x = np.array([-1, 1.2, 1.2, 1, 3.2, 1.3, 2.88, 2.1]) + peaks = np.array([1, 2, 4, 6]) + lbases = np.array([0, 0, 0, 5]) + rbases = np.array([3, 3, 5, 7]) + proms = x[peaks] - np.max([x[lbases], x[rbases]], axis=0) + # Test if calculation matches handcrafted result + out = peak_prominences(x, peaks) + xp_assert_equal(out[0], proms, check_dtype=False) + xp_assert_equal(out[1], lbases, check_dtype=False) + xp_assert_equal(out[2], rbases, check_dtype=False) + + def test_edge_cases(self): + """ + Test edge cases. + """ + # Peaks have same height, prominence and bases + x = [0, 2, 1, 2, 1, 2, 0] + peaks = [1, 3, 5] + proms, lbases, rbases = peak_prominences(x, peaks) + xp_assert_equal(proms, np.asarray([2.0, 2, 2]), check_dtype=False) + xp_assert_equal(lbases, [0, 0, 0], check_dtype=False) + xp_assert_equal(rbases, [6, 6, 6], check_dtype=False) + + # Peaks have same height & prominence but different bases + x = [0, 1, 0, 1, 0, 1, 0] + peaks = np.array([1, 3, 5]) + proms, lbases, rbases = peak_prominences(x, peaks) + xp_assert_equal(proms, np.asarray([1.0, 1, 1])) + xp_assert_equal(lbases, peaks - 1, check_dtype=False) + xp_assert_equal(rbases, peaks + 1, check_dtype=False) + + def test_non_contiguous(self): + """ + Test with non-C-contiguous input arrays. + """ + x = np.repeat([-9, 9, 9, 0, 3, 1], 2) + peaks = np.repeat([1, 2, 4], 2) + proms, lbases, rbases = peak_prominences(x[::2], peaks[::2]) + xp_assert_equal(proms, np.asarray([9.0, 9, 2])) + xp_assert_equal(lbases, [0, 0, 3], check_dtype=False) + xp_assert_equal(rbases, [3, 3, 5], check_dtype=False) + + def test_wlen(self): + """ + Test if wlen actually shrinks the evaluation range correctly. + """ + x = [0, 1, 2, 3, 1, 0, -1] + peak = [3] + # Test rounding behavior of wlen + proms = peak_prominences(x, peak) + for prom, val in zip(proms, [3.0, 0, 6]): + assert prom == val + + for wlen, i in [(8, 0), (7, 0), (6, 0), (5, 1), (3.2, 1), (3, 2), (1.1, 2)]: + proms = peak_prominences(x, peak, wlen) + for prom, val in zip(proms, [3. - i, 0 + i, 6 - i]): + assert prom == val + + def test_exceptions(self): + """ + Verify that exceptions and warnings are raised. + """ + # x with dimension > 1 + with raises(ValueError, match='1-D array'): + peak_prominences([[0, 1, 1, 0]], [1, 2]) + # peaks with dimension > 1 + with raises(ValueError, match='1-D array'): + peak_prominences([0, 1, 1, 0], [[1, 2]]) + # x with dimension < 1 + with raises(ValueError, match='1-D array'): + peak_prominences(3, [0,]) + + # empty x with supplied + with raises(ValueError, match='not a valid index'): + peak_prominences([], [0]) + # invalid indices with non-empty x + for p in [-100, -1, 3, 1000]: + with raises(ValueError, match='not a valid index'): + peak_prominences([1, 0, 2], [p]) + + # peaks is not cast-able to np.intp + with raises(TypeError, match='cannot safely cast'): + peak_prominences([0, 1, 1, 0], [1.1, 2.3]) + + # wlen < 3 + with raises(ValueError, match='wlen'): + peak_prominences(np.arange(10), [3, 5], wlen=1) + + @pytest.mark.thread_unsafe + def test_warnings(self): + """ + Verify that appropriate warnings are raised. + """ + msg = "some peaks have a prominence of 0" + for p in [0, 1, 2]: + with warns(PeakPropertyWarning, match=msg): + peak_prominences([1, 0, 2], [p,]) + with warns(PeakPropertyWarning, match=msg): + peak_prominences([0, 1, 1, 1, 0], [2], wlen=2) + + +class TestPeakWidths: + + def test_empty(self): + """ + Test if an empty array is returned if no peaks are provided. + """ + widths = peak_widths([], [])[0] + assert isinstance(widths, np.ndarray) + assert widths.size == 0 + widths = peak_widths([1, 2, 3], [])[0] + assert isinstance(widths, np.ndarray) + assert widths.size == 0 + out = peak_widths([], []) + for arr in out: + assert isinstance(arr, np.ndarray) + assert arr.size == 0 + + @pytest.mark.filterwarnings("ignore:some peaks have a width of 0") + def test_basic(self): + """ + Test a simple use case with easy to verify results at different relative + heights. + """ + x = np.array([1, 0, 1, 2, 1, 0, -1]) + prominence = 2 + for rel_height, width_true, lip_true, rip_true in [ + (0., 0., 3., 3.), # raises warning + (0.25, 1., 2.5, 3.5), + (0.5, 2., 2., 4.), + (0.75, 3., 1.5, 4.5), + (1., 4., 1., 5.), + (2., 5., 1., 6.), + (3., 5., 1., 6.) + ]: + width_calc, height, lip_calc, rip_calc = peak_widths( + x, [3], rel_height) + xp_assert_close(width_calc, np.asarray([width_true])) + xp_assert_close(height, np.asarray([2 - rel_height * prominence])) + xp_assert_close(lip_calc, np.asarray([lip_true])) + xp_assert_close(rip_calc, np.asarray([rip_true])) + + def test_non_contiguous(self): + """ + Test with non-C-contiguous input arrays. + """ + x = np.repeat([0, 100, 50], 4) + peaks = np.repeat([1], 3) + result = peak_widths(x[::4], peaks[::3]) + xp_assert_equal(result, + np.asarray([[0.75], [75], [0.75], [1.5]]) + ) + + def test_exceptions(self): + """ + Verify that argument validation works as intended. + """ + with raises(ValueError, match='1-D array'): + # x with dimension > 1 + peak_widths(np.zeros((3, 4)), np.ones(3)) + with raises(ValueError, match='1-D array'): + # x with dimension < 1 + peak_widths(3, [0]) + with raises(ValueError, match='1-D array'): + # peaks with dimension > 1 + peak_widths(np.arange(10), np.ones((3, 2), dtype=np.intp)) + with raises(ValueError, match='1-D array'): + # peaks with dimension < 1 + peak_widths(np.arange(10), 3) + with raises(ValueError, match='not a valid index'): + # peak pos exceeds x.size + peak_widths(np.arange(10), [8, 11]) + with raises(ValueError, match='not a valid index'): + # empty x with peaks supplied + peak_widths([], [1, 2]) + with raises(TypeError, match='cannot safely cast'): + # peak cannot be safely cast to intp + peak_widths(np.arange(10), [1.1, 2.3]) + with raises(ValueError, match='rel_height'): + # rel_height is < 0 + peak_widths([0, 1, 0, 1, 0], [1, 3], rel_height=-1) + with raises(TypeError, match='None'): + # prominence data contains None + peak_widths([1, 2, 1], [1], prominence_data=(None, None, None)) + + @pytest.mark.thread_unsafe + def test_warnings(self): + """ + Verify that appropriate warnings are raised. + """ + msg = "some peaks have a width of 0" + with warns(PeakPropertyWarning, match=msg): + # Case: rel_height is 0 + peak_widths([0, 1, 0], [1], rel_height=0) + with warns(PeakPropertyWarning, match=msg): + # Case: prominence is 0 and bases are identical + peak_widths( + [0, 1, 1, 1, 0], [2], + prominence_data=(np.array([0.], np.float64), + np.array([2], np.intp), + np.array([2], np.intp)) + ) + + def test_mismatching_prominence_data(self): + """Test with mismatching peak and / or prominence data.""" + x = [0, 1, 0] + peak = [1] + for i, (prominences, left_bases, right_bases) in enumerate([ + ((1.,), (-1,), (2,)), # left base not in x + ((1.,), (0,), (3,)), # right base not in x + ((1.,), (2,), (0,)), # swapped bases same as peak + ((1., 1.), (0, 0), (2, 2)), # array shapes don't match peaks + ((1., 1.), (0,), (2,)), # arrays with different shapes + ((1.,), (0, 0), (2,)), # arrays with different shapes + ((1.,), (0,), (2, 2)) # arrays with different shapes + ]): + # Make sure input is matches output of signal.peak_prominences + prominence_data = (np.array(prominences, dtype=np.float64), + np.array(left_bases, dtype=np.intp), + np.array(right_bases, dtype=np.intp)) + # Test for correct exception + if i < 3: + match = "prominence data is invalid for peak" + else: + match = "arrays in `prominence_data` must have the same shape" + with raises(ValueError, match=match): + peak_widths(x, peak, prominence_data=prominence_data) + + @pytest.mark.filterwarnings("ignore:some peaks have a width of 0") + def test_intersection_rules(self): + """Test if x == eval_height counts as an intersection.""" + # Flatt peak with two possible intersection points if evaluated at 1 + x = [0, 1, 2, 1, 3, 3, 3, 1, 2, 1, 0] + # relative height is 0 -> width is 0 as well, raises warning + xp_assert_close(peak_widths(x, peaks=[5], rel_height=0), + [(0.,), (3.,), (5.,), (5.,)]) + # width_height == x counts as intersection -> nearest 1 is chosen + xp_assert_close(peak_widths(x, peaks=[5], rel_height=2/3), + [(4.,), (1.,), (3.,), (7.,)]) + + +def test_unpack_condition_args(): + """ + Verify parsing of condition arguments for `scipy.signal.find_peaks` function. + """ + x = np.arange(10) + amin_true = x + amax_true = amin_true + 10 + peaks = amin_true[1::2] + + # Test unpacking with None or interval + assert (None, None) == _unpack_condition_args((None, None), x, peaks) + assert (1, None) == _unpack_condition_args(1, x, peaks) + assert (1, None) == _unpack_condition_args((1, None), x, peaks) + assert (None, 2) == _unpack_condition_args((None, 2), x, peaks) + assert (3., 4.5) == _unpack_condition_args((3., 4.5), x, peaks) + + # Test if borders are correctly reduced with `peaks` + amin_calc, amax_calc = _unpack_condition_args((amin_true, amax_true), x, peaks) + xp_assert_equal(amin_calc, amin_true[peaks]) + xp_assert_equal(amax_calc, amax_true[peaks]) + + # Test raises if array borders don't match x + with raises(ValueError, match="array size of lower"): + _unpack_condition_args(amin_true, np.arange(11), peaks) + with raises(ValueError, match="array size of upper"): + _unpack_condition_args((None, amin_true), np.arange(11), peaks) + + +class TestFindPeaks: + + # Keys of optionally returned properties + property_keys = {'peak_heights', 'left_thresholds', 'right_thresholds', + 'prominences', 'left_bases', 'right_bases', 'widths', + 'width_heights', 'left_ips', 'right_ips'} + + def test_constant(self): + """ + Test behavior for signal without local maxima. + """ + open_interval = (None, None) + peaks, props = find_peaks(np.ones(10), + height=open_interval, threshold=open_interval, + prominence=open_interval, width=open_interval) + assert peaks.size == 0 + for key in self.property_keys: + assert props[key].size == 0 + + def test_plateau_size(self): + """ + Test plateau size condition for peaks. + """ + # Prepare signal with peaks with peak_height == plateau_size + plateau_sizes = np.array([1, 2, 3, 4, 8, 20, 111]) + x = np.zeros(plateau_sizes.size * 2 + 1) + x[1::2] = plateau_sizes + repeats = np.ones(x.size, dtype=int) + repeats[1::2] = x[1::2] + x = np.repeat(x, repeats) + + # Test full output + peaks, props = find_peaks(x, plateau_size=(None, None)) + xp_assert_equal(peaks, [1, 3, 7, 11, 18, 33, 100], check_dtype=False) + xp_assert_equal(props["plateau_sizes"], plateau_sizes, check_dtype=False) + xp_assert_equal(props["left_edges"], peaks - (plateau_sizes - 1) // 2, + check_dtype=False) + xp_assert_equal(props["right_edges"], peaks + plateau_sizes // 2, + check_dtype=False) + + # Test conditions + xp_assert_equal(find_peaks(x, plateau_size=4)[0], [11, 18, 33, 100], + check_dtype=False) + xp_assert_equal(find_peaks(x, plateau_size=(None, 3.5))[0], [1, 3, 7], + check_dtype=False) + xp_assert_equal(find_peaks(x, plateau_size=(5, 50))[0], [18, 33], + check_dtype=False) + + def test_height_condition(self): + """ + Test height condition for peaks. + """ + x = (0., 1/3, 0., 2.5, 0, 4., 0) + peaks, props = find_peaks(x, height=(None, None)) + xp_assert_equal(peaks, np.array([1, 3, 5]), check_dtype=False) + xp_assert_equal(props['peak_heights'], np.array([1/3, 2.5, 4.]), + check_dtype=False) + xp_assert_equal(find_peaks(x, height=0.5)[0], np.array([3, 5]), + check_dtype=False) + xp_assert_equal(find_peaks(x, height=(None, 3))[0], np.array([1, 3]), + check_dtype=False) + xp_assert_equal(find_peaks(x, height=(2, 3))[0], np.array([3]), + check_dtype=False) + + def test_threshold_condition(self): + """ + Test threshold condition for peaks. + """ + x = (0, 2, 1, 4, -1) + peaks, props = find_peaks(x, threshold=(None, None)) + xp_assert_equal(peaks, np.array([1, 3]), check_dtype=False) + xp_assert_equal(props['left_thresholds'], np.array([2.0, 3.0])) + xp_assert_equal(props['right_thresholds'], np.array([1.0, 5.0])) + xp_assert_equal(find_peaks(x, threshold=2)[0], np.array([3]), + check_dtype=False) + xp_assert_equal(find_peaks(x, threshold=3.5)[0], np.array([], dtype=int), + check_dtype=False) + xp_assert_equal(find_peaks(x, threshold=(None, 5))[0], np.array([1, 3]), + check_dtype=False) + xp_assert_equal(find_peaks(x, threshold=(None, 4))[0], np.array([1]), + check_dtype=False) + xp_assert_equal(find_peaks(x, threshold=(2, 4))[0], np.array([], dtype=int), + check_dtype=False) + + def test_distance_condition(self): + """ + Test distance condition for peaks. + """ + # Peaks of different height with constant distance 3 + peaks_all = np.arange(1, 21, 3) + x = np.zeros(21) + x[peaks_all] += np.linspace(1, 2, peaks_all.size) + + # Test if peaks with "minimal" distance are still selected (distance = 3) + xp_assert_equal(find_peaks(x, distance=3)[0], peaks_all, check_dtype=False) + + # Select every second peak (distance > 3) + peaks_subset = find_peaks(x, distance=3.0001)[0] + # Test if peaks_subset is subset of peaks_all + assert np.setdiff1d(peaks_subset, peaks_all, assume_unique=True).size == 0 + + # Test if every second peak was removed + dfs = np.diff(peaks_subset) + xp_assert_equal(dfs, 6*np.ones_like(dfs)) + + # Test priority of peak removal + x = [-2, 1, -1, 0, -3] + peaks_subset = find_peaks(x, distance=10)[0] # use distance > x size + assert peaks_subset.size == 1 and peaks_subset[0] == 1 + + def test_prominence_condition(self): + """ + Test prominence condition for peaks. + """ + x = np.linspace(0, 10, 100) + peaks_true = np.arange(1, 99, 2) + offset = np.linspace(1, 10, peaks_true.size) + x[peaks_true] += offset + prominences = x[peaks_true] - x[peaks_true + 1] + interval = (3, 9) + keep = np.nonzero( + (interval[0] <= prominences) & (prominences <= interval[1])) + + peaks_calc, properties = find_peaks(x, prominence=interval) + xp_assert_equal(peaks_calc, peaks_true[keep], check_dtype=False) + xp_assert_equal(properties['prominences'], prominences[keep], check_dtype=False) + xp_assert_equal(properties['left_bases'], + np.zeros_like(properties['left_bases'])) + xp_assert_equal(properties['right_bases'], peaks_true[keep] + 1, + check_dtype=False) + + def test_width_condition(self): + """ + Test width condition for peaks. + """ + x = np.array([1, 0, 1, 2, 1, 0, -1, 4, 0]) + peaks, props = find_peaks(x, width=(None, 2), rel_height=0.75) + assert peaks.size == 1 + xp_assert_equal(peaks, 7*np.ones_like(peaks)) + xp_assert_close(props['widths'], np.asarray([1.35])) + xp_assert_close(props['width_heights'], np.asarray([1.])) + xp_assert_close(props['left_ips'], np.asarray([6.4])) + xp_assert_close(props['right_ips'], np.asarray([7.75])) + + def test_properties(self): + """ + Test returned properties. + """ + open_interval = (None, None) + x = [0, 1, 0, 2, 1.5, 0, 3, 0, 5, 9] + peaks, props = find_peaks(x, + height=open_interval, threshold=open_interval, + prominence=open_interval, width=open_interval) + assert len(props) == len(self.property_keys) + for key in self.property_keys: + assert peaks.size == props[key].size + + def test_raises(self): + """ + Test exceptions raised by function. + """ + with raises(ValueError, match="1-D array"): + find_peaks(np.array(1)) + with raises(ValueError, match="1-D array"): + find_peaks(np.ones((2, 2))) + with raises(ValueError, match="distance"): + find_peaks(np.arange(10), distance=-1) + + @pytest.mark.filterwarnings("ignore:some peaks have a prominence of 0", + "ignore:some peaks have a width of 0") + def test_wlen_smaller_plateau(self): + """ + Test behavior of prominence and width calculation if the given window + length is smaller than a peak's plateau size. + + Regression test for gh-9110. + """ + peaks, props = find_peaks([0, 1, 1, 1, 0], prominence=(None, None), + width=(None, None), wlen=2) + xp_assert_equal(peaks, 2 * np.ones_like(peaks)) + xp_assert_equal(props["prominences"], np.zeros_like(props["prominences"])) + xp_assert_equal(props["widths"], np.zeros_like(props["widths"])) + xp_assert_equal(props["width_heights"], np.ones_like(props["width_heights"])) + for key in ("left_bases", "right_bases", "left_ips", "right_ips"): + xp_assert_equal(props[key], peaks, check_dtype=False) + + @pytest.mark.parametrize("kwargs", [ + {}, + {"distance": 3.0}, + {"prominence": (None, None)}, + {"width": (None, 2)}, + + ]) + def test_readonly_array(self, kwargs): + """ + Test readonly arrays are accepted. + """ + x = np.linspace(0, 10, 15) + x_readonly = x.copy() + x_readonly.flags.writeable = False + + peaks, _ = find_peaks(x) + peaks_readonly, _ = find_peaks(x_readonly, **kwargs) + + xp_assert_close(peaks, peaks_readonly) + + +class TestFindPeaksCwt: + + def test_find_peaks_exact(self): + """ + Generate a series of gaussians and attempt to find the peak locations. + """ + sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0] + num_points = 500 + test_data, act_locs = _gen_gaussians_even(sigmas, num_points) + widths = np.arange(0.1, max(sigmas)) + found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0, + min_length=None) + xp_assert_equal(found_locs, act_locs, + check_dtype=False, + err_msg="Found maximum locations did not equal those expected" + ) + + def test_find_peaks_withnoise(self): + """ + Verify that peak locations are (approximately) found + for a series of gaussians with added noise. + """ + sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0] + num_points = 500 + test_data, act_locs = _gen_gaussians_even(sigmas, num_points) + widths = np.arange(0.1, max(sigmas)) + noise_amp = 0.07 + np.random.seed(18181911) + test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp) + found_locs = find_peaks_cwt(test_data, widths, min_length=15, + gap_thresh=1, min_snr=noise_amp / 5) + + err_msg ='Different number of peaks found than expected' + assert len(found_locs) == len(act_locs), err_msg + diffs = np.abs(found_locs - act_locs) + max_diffs = np.array(sigmas) / 5 + np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' + + f'by more than {max_diffs}') + + def test_find_peaks_nopeak(self): + """ + Verify that no peak is found in + data that's just noise. + """ + noise_amp = 1.0 + num_points = 100 + rng = np.random.RandomState(181819141) + test_data = (rng.rand(num_points) - 0.5)*(2*noise_amp) + widths = np.arange(10, 50) + found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30) + assert len(found_locs) == 0 + + def test_find_peaks_with_non_default_wavelets(self): + x = gaussian(200, 2) + widths = np.array([1, 2, 3, 4]) + a = find_peaks_cwt(x, widths, wavelet=gaussian) + + xp_assert_equal(a, np.asarray([100]), check_dtype=False) + + def test_find_peaks_window_size(self): + """ + Verify that window_size is passed correctly to private function and + affects the result. + """ + sigmas = [2.0, 2.0] + num_points = 1000 + test_data, act_locs = _gen_gaussians_even(sigmas, num_points) + widths = np.arange(0.1, max(sigmas), 0.2) + noise_amp = 0.05 + rng = np.random.RandomState(18181911) + test_data += (rng.rand(num_points) - 0.5)*(2*noise_amp) + + # Possibly contrived negative region to throw off peak finding + # when window_size is too large + test_data[250:320] -= 1 + + found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3, + min_length=None, window_size=None) + with pytest.raises(AssertionError): + assert found_locs.size == act_locs.size + + found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3, + min_length=None, window_size=20) + assert found_locs.size == act_locs.size + + def test_find_peaks_with_one_width(self): + """ + Verify that the `width` argument + in `find_peaks_cwt` can be a float + """ + xs = np.arange(0, np.pi, 0.05) + test_data = np.sin(xs) + widths = 1 + found_locs = find_peaks_cwt(test_data, widths) + + np.testing.assert_equal(found_locs, 32) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_result_type.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_result_type.py new file mode 100644 index 0000000000000000000000000000000000000000..a2cadd325a7e36c877df8532ba957712831c2dad --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_result_type.py @@ -0,0 +1,51 @@ +# Regressions tests on result types of some signal functions + +import numpy as np + +from scipy.signal import (decimate, + lfilter_zi, + lfiltic, + sos2tf, + sosfilt_zi) + + +def test_decimate(): + ones_f32 = np.ones(32, dtype=np.float32) + assert decimate(ones_f32, 2).dtype == np.float32 + + ones_i64 = np.ones(32, dtype=np.int64) + assert decimate(ones_i64, 2).dtype == np.float64 + + +def test_lfilter_zi(): + b_f32 = np.array([1, 2, 3], dtype=np.float32) + a_f32 = np.array([4, 5, 6], dtype=np.float32) + assert lfilter_zi(b_f32, a_f32).dtype == np.float32 + + +def test_lfiltic(): + # this would return f32 when given a mix of f32 / f64 args + b_f32 = np.array([1, 2, 3], dtype=np.float32) + a_f32 = np.array([4, 5, 6], dtype=np.float32) + x_f32 = np.ones(32, dtype=np.float32) + + b_f64 = b_f32.astype(np.float64) + a_f64 = a_f32.astype(np.float64) + x_f64 = x_f32.astype(np.float64) + + assert lfiltic(b_f64, a_f32, x_f32).dtype == np.float64 + assert lfiltic(b_f32, a_f64, x_f32).dtype == np.float64 + assert lfiltic(b_f32, a_f32, x_f64).dtype == np.float64 + assert lfiltic(b_f32, a_f32, x_f32, x_f64).dtype == np.float64 + + +def test_sos2tf(): + sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32) + b, a = sos2tf(sos_f32) + assert b.dtype == np.float32 + assert a.dtype == np.float32 + + +def test_sosfilt_zi(): + sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32) + assert sosfilt_zi(sos_f32).dtype == np.float32 diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_savitzky_golay.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_savitzky_golay.py new file mode 100644 index 0000000000000000000000000000000000000000..61d958e35d91b7d537e0bd3551b6cec3f45a4983 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_savitzky_golay.py @@ -0,0 +1,362 @@ +import pytest +import numpy as np +from numpy.testing import (assert_equal, + assert_array_equal, +) + +from scipy._lib._array_api import ( + assert_almost_equal, assert_array_almost_equal, xp_assert_close +) + +from scipy.ndimage import convolve1d # type: ignore[attr-defined] + +from scipy.signal import savgol_coeffs, savgol_filter +from scipy.signal._savitzky_golay import _polyder + + +def check_polyder(p, m, expected): + dp = _polyder(p, m) + assert_array_equal(dp, expected) + + +def test_polyder(): + cases = [ + ([5], 0, [5]), + ([5], 1, [0]), + ([3, 2, 1], 0, [3, 2, 1]), + ([3, 2, 1], 1, [6, 2]), + ([3, 2, 1], 2, [6]), + ([3, 2, 1], 3, [0]), + ([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]), + ([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]), + ([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]), + ([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]), + ] + for p, m, expected in cases: + check_polyder(np.array(p).T, m, np.array(expected).T) + + +#-------------------------------------------------------------------- +# savgol_coeffs tests +#-------------------------------------------------------------------- + +def alt_sg_coeffs(window_length, polyorder, pos): + """This is an alternative implementation of the SG coefficients. + + It uses numpy.polyfit and numpy.polyval. The results should be + equivalent to those of savgol_coeffs(), but this implementation + is slower. + + window_length should be odd. + + """ + if pos is None: + pos = window_length // 2 + t = np.arange(window_length) + unit = (t == pos).astype(int) + h = np.polyval(np.polyfit(t, unit, polyorder), t) + return h + + +def test_sg_coeffs_trivial(): + # Test a trivial case of savgol_coeffs: polyorder = window_length - 1 + h = savgol_coeffs(1, 0) + xp_assert_close(h, [1.0]) + + h = savgol_coeffs(3, 2) + xp_assert_close(h, [0.0, 1, 0], atol=1e-10) + + h = savgol_coeffs(5, 4) + xp_assert_close(h, [0.0, 0, 1, 0, 0], atol=1e-10) + + h = savgol_coeffs(5, 4, pos=1) + xp_assert_close(h, [0.0, 0, 0, 1, 0], atol=1e-10) + + h = savgol_coeffs(5, 4, pos=1, use='dot') + xp_assert_close(h, [0.0, 1, 0, 0, 0], atol=1e-10) + + +def compare_coeffs_to_alt(window_length, order): + # For the given window_length and order, compare the results + # of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1. + # Also include pos=None. + for pos in [None] + list(range(window_length)): + h1 = savgol_coeffs(window_length, order, pos=pos, use='dot') + h2 = alt_sg_coeffs(window_length, order, pos=pos) + xp_assert_close(h1, h2, atol=1e-10, + err_msg=("window_length = %d, order = %d, pos = %s" % + (window_length, order, pos))) + + +def test_sg_coeffs_compare(): + # Compare savgol_coeffs() to alt_sg_coeffs(). + for window_length in range(1, 8, 2): + for order in range(window_length): + compare_coeffs_to_alt(window_length, order) + + +def test_sg_coeffs_exact(): + polyorder = 4 + window_length = 9 + halflen = window_length // 2 + + x = np.linspace(0, 21, 43) + delta = x[1] - x[0] + + # The data is a cubic polynomial. We'll use an order 4 + # SG filter, so the filtered values should equal the input data + # (except within half window_length of the edges). + y = 0.5 * x ** 3 - x + h = savgol_coeffs(window_length, polyorder) + y0 = convolve1d(y, h) + xp_assert_close(y0[halflen:-halflen], y[halflen:-halflen]) + + # Check the same input, but use deriv=1. dy is the exact result. + dy = 1.5 * x ** 2 - 1 + h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta) + y1 = convolve1d(y, h) + xp_assert_close(y1[halflen:-halflen], dy[halflen:-halflen]) + + # Check the same input, but use deriv=2. d2y is the exact result. + d2y = 3.0 * x + h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta) + y2 = convolve1d(y, h) + xp_assert_close(y2[halflen:-halflen], d2y[halflen:-halflen]) + + +def test_sg_coeffs_deriv(): + # The data in `x` is a sampled parabola, so using savgol_coeffs with an + # order 2 or higher polynomial should give exact results. + i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0]) + x = i ** 2 / 4 + dx = i / 2 + d2x = np.full_like(i, 0.5) + for pos in range(x.size): + coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot') + xp_assert_close(coeffs0.dot(x), x[pos], atol=1e-10) + coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1) + xp_assert_close(coeffs1.dot(x), dx[pos], atol=1e-10) + coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2) + xp_assert_close(coeffs2.dot(x), d2x[pos], atol=1e-10) + + +def test_sg_coeffs_deriv_gt_polyorder(): + """ + If deriv > polyorder, the coefficients should be all 0. + This is a regression test for a bug where, e.g., + savgol_coeffs(5, polyorder=1, deriv=2) + raised an error. + """ + coeffs = savgol_coeffs(5, polyorder=1, deriv=2) + assert_array_equal(coeffs, np.zeros(5)) + coeffs = savgol_coeffs(7, polyorder=4, deriv=6) + assert_array_equal(coeffs, np.zeros(7)) + + +def test_sg_coeffs_large(): + # Test that for large values of window_length and polyorder the array of + # coefficients returned is symmetric. The aim is to ensure that + # no potential numeric overflow occurs. + coeffs0 = savgol_coeffs(31, 9) + assert_array_almost_equal(coeffs0, coeffs0[::-1]) + coeffs1 = savgol_coeffs(31, 9, deriv=1) + assert_array_almost_equal(coeffs1, -coeffs1[::-1]) + +# -------------------------------------------------------------------- +# savgol_coeffs tests for even window length +# -------------------------------------------------------------------- + + +def test_sg_coeffs_even_window_length(): + # Simple case - deriv=0, polyorder=0, 1 + window_lengths = [4, 6, 8, 10, 12, 14, 16] + for length in window_lengths: + h_p_d = savgol_coeffs(length, 0, 0) + xp_assert_close(h_p_d, np.ones_like(h_p_d) / length) + + # Verify with closed forms + # deriv=1, polyorder=1, 2 + def h_p_d_closed_form_1(k, m): + return 6*(k - 0.5)/((2*m + 1)*m*(2*m - 1)) + + # deriv=2, polyorder=2 + def h_p_d_closed_form_2(k, m): + numer = 15*(-4*m**2 + 1 + 12*(k - 0.5)**2) + denom = 4*(2*m + 1)*(m + 1)*m*(m - 1)*(2*m - 1) + return numer/denom + + for length in window_lengths: + m = length//2 + expected_output = [h_p_d_closed_form_1(k, m) + for k in range(-m + 1, m + 1)][::-1] + actual_output = savgol_coeffs(length, 1, 1) + xp_assert_close(expected_output, actual_output) + actual_output = savgol_coeffs(length, 2, 1) + xp_assert_close(expected_output, actual_output) + + expected_output = [h_p_d_closed_form_2(k, m) + for k in range(-m + 1, m + 1)][::-1] + actual_output = savgol_coeffs(length, 2, 2) + xp_assert_close(expected_output, actual_output) + actual_output = savgol_coeffs(length, 3, 2) + xp_assert_close(expected_output, actual_output) + +#-------------------------------------------------------------------- +# savgol_filter tests +#-------------------------------------------------------------------- + + +def test_sg_filter_trivial(): + """ Test some trivial edge cases for savgol_filter().""" + x = np.array([1.0]) + y = savgol_filter(x, 1, 0) + assert_equal(y, [1.0]) + + # Input is a single value. With a window length of 3 and polyorder 1, + # the value in y is from the straight-line fit of (-1,0), (0,3) and + # (1, 0) at 0. This is just the average of the three values, hence 1.0. + x = np.array([3.0]) + y = savgol_filter(x, 3, 1, mode='constant') + assert_almost_equal(y, [1.0], decimal=15) + + x = np.array([3.0]) + y = savgol_filter(x, 3, 1, mode='nearest') + assert_almost_equal(y, [3.0], decimal=15) + + x = np.array([1.0] * 3) + y = savgol_filter(x, 3, 1, mode='wrap') + assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15) + + +def test_sg_filter_basic(): + # Some basic test cases for savgol_filter(). + x = np.array([1.0, 2.0, 1.0]) + y = savgol_filter(x, 3, 1, mode='constant') + xp_assert_close(y, [1.0, 4.0 / 3, 1.0]) + + y = savgol_filter(x, 3, 1, mode='mirror') + xp_assert_close(y, [5.0 / 3, 4.0 / 3, 5.0 / 3]) + + y = savgol_filter(x, 3, 1, mode='wrap') + xp_assert_close(y, [4.0 / 3, 4.0 / 3, 4.0 / 3]) + + +def test_sg_filter_2d(): + x = np.array([[1.0, 2.0, 1.0], + [2.0, 4.0, 2.0]]) + expected = np.array([[1.0, 4.0 / 3, 1.0], + [2.0, 8.0 / 3, 2.0]]) + y = savgol_filter(x, 3, 1, mode='constant') + xp_assert_close(y, expected) + + y = savgol_filter(x.T, 3, 1, mode='constant', axis=0) + xp_assert_close(y, expected.T) + + +def test_sg_filter_interp_edges(): + # Another test with low degree polynomial data, for which we can easily + # give the exact results. In this test, we use mode='interp', so + # savgol_filter should match the exact solution for the entire data set, + # including the edges. + t = np.linspace(-5, 5, 21) + delta = t[1] - t[0] + # Polynomial test data. + x = np.array([t, + 3 * t ** 2, + t ** 3 - t]) + dx = np.array([np.ones_like(t), + 6 * t, + 3 * t ** 2 - 1.0]) + d2x = np.array([np.zeros_like(t), + np.full_like(t, 6), + 6 * t]) + + window_length = 7 + + y = savgol_filter(x, window_length, 3, axis=-1, mode='interp') + xp_assert_close(y, x, atol=1e-12) + + y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp', + deriv=1, delta=delta) + xp_assert_close(y1, dx, atol=1e-12) + + y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp', + deriv=2, delta=delta) + xp_assert_close(y2, d2x, atol=1e-12) + + # Transpose everything, and test again with axis=0. + + x = x.T + dx = dx.T + d2x = d2x.T + + y = savgol_filter(x, window_length, 3, axis=0, mode='interp') + xp_assert_close(y, x, atol=1e-12) + + y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp', + deriv=1, delta=delta) + xp_assert_close(y1, dx, atol=1e-12) + + y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp', + deriv=2, delta=delta) + xp_assert_close(y2, d2x, atol=1e-12) + + +def test_sg_filter_interp_edges_3d(): + # Test mode='interp' with a 3-D array. + t = np.linspace(-5, 5, 21) + delta = t[1] - t[0] + x1 = np.array([t, -t]) + x2 = np.array([t ** 2, 3 * t ** 2 + 5]) + x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t]) + dx1 = np.array([np.ones_like(t), -np.ones_like(t)]) + dx2 = np.array([2 * t, 6 * t]) + dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5]) + + # z has shape (3, 2, 21) + z = np.array([x1, x2, x3]) + dz = np.array([dx1, dx2, dx3]) + + y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta) + xp_assert_close(y, z, atol=1e-10) + + dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta) + xp_assert_close(dy, dz, atol=1e-10) + + # z has shape (3, 21, 2) + z = np.array([x1.T, x2.T, x3.T]) + dz = np.array([dx1.T, dx2.T, dx3.T]) + + y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta) + xp_assert_close(y, z, atol=1e-10) + + dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta) + xp_assert_close(dy, dz, atol=1e-10) + + # z has shape (21, 3, 2) + z = z.swapaxes(0, 1).copy() + dz = dz.swapaxes(0, 1).copy() + + y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta) + xp_assert_close(y, z, atol=1e-10) + + dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta) + xp_assert_close(dy, dz, atol=1e-10) + + +def test_sg_filter_valid_window_length_3d(): + """Tests that the window_length check is using the correct axis.""" + + x = np.ones((10, 20, 30)) + + savgol_filter(x, window_length=29, polyorder=3, mode='interp') + + with pytest.raises(ValueError, match='window_length must be less than'): + # window_length is more than x.shape[-1]. + savgol_filter(x, window_length=31, polyorder=3, mode='interp') + + savgol_filter(x, window_length=9, polyorder=3, axis=0, mode='interp') + + with pytest.raises(ValueError, match='window_length must be less than'): + # window_length is more than x.shape[0]. + savgol_filter(x, window_length=11, polyorder=3, axis=0, mode='interp') diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_short_time_fft.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_short_time_fft.py new file mode 100644 index 0000000000000000000000000000000000000000..6251deea10e6404792a4bd2ef22f137390cb9e0f --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_short_time_fft.py @@ -0,0 +1,872 @@ +"""Unit tests for module `_short_time_fft`. + +This file's structure loosely groups the tests into the following sequential +categories: + +1. Test function `_calc_dual_canonical_window`. +2. Test for invalid parameters and exceptions in `ShortTimeFFT` (until the + `test_from_window` function). +3. Test algorithmic properties of STFT/ISTFT. Some tests were ported from + ``test_spectral.py``. + +Notes +----- +* Mypy 0.990 does interpret the line:: + + from scipy.stats import norm as normal_distribution + + incorrectly (but the code works), hence a ``type: ignore`` was appended. +""" +import math +from itertools import product +from typing import cast, get_args, Literal + +import numpy as np +import pytest +from scipy._lib._array_api import xp_assert_close, xp_assert_equal +from scipy.fft import fftshift +from scipy.stats import norm as normal_distribution # type: ignore +from scipy.signal import get_window, welch, stft, istft, spectrogram + +from scipy.signal._short_time_fft import FFT_MODE_TYPE, \ + _calc_dual_canonical_window, ShortTimeFFT, PAD_TYPE +from scipy.signal.windows import gaussian + + +def test__calc_dual_canonical_window_roundtrip(): + """Test dual window calculation with a round trip to verify duality. + + Note that this works only for canonical window pairs (having minimal + energy) like a Gaussian. + + The window is the same as in the example of `from ShortTimeFFT.from_dual`. + """ + win = gaussian(51, std=10, sym=True) + d_win = _calc_dual_canonical_window(win, 10) + win2 = _calc_dual_canonical_window(d_win, 10) + xp_assert_close(win2, win) + + +def test__calc_dual_canonical_window_exceptions(): + """Raise all exceptions in `_calc_dual_canonical_window`.""" + # Verify that calculation can fail: + with pytest.raises(ValueError, match="hop=5 is larger than window len.*"): + _calc_dual_canonical_window(np.ones(4), 5) + with pytest.raises(ValueError, match=".* Transform not invertible!"): + _calc_dual_canonical_window(np.array([.1, .2, .3, 0]), 4) + + # Verify that parameter `win` may not be integers: + with pytest.raises(ValueError, match="Parameter 'win' cannot be of int.*"): + _calc_dual_canonical_window(np.ones(4, dtype=int), 1) + + +def test_invalid_initializer_parameters(): + """Verify that exceptions get raised on invalid parameters when + instantiating ShortTimeFFT. """ + with pytest.raises(ValueError, match=r"Parameter win must be 1d, " + + r"but win.shape=\(2, 2\)!"): + ShortTimeFFT(np.ones((2, 2)), hop=4, fs=1) + with pytest.raises(ValueError, match="Parameter win must have " + + "finite entries"): + ShortTimeFFT(np.array([1, np.inf, 2, 3]), hop=4, fs=1) + with pytest.raises(ValueError, match="Parameter hop=0 is not " + + "an integer >= 1!"): + ShortTimeFFT(np.ones(4), hop=0, fs=1) + with pytest.raises(ValueError, match="Parameter hop=2.0 is not " + + "an integer >= 1!"): + # noinspection PyTypeChecker + ShortTimeFFT(np.ones(4), hop=2.0, fs=1) + with pytest.raises(ValueError, match=r"dual_win.shape=\(5,\) must equal " + + r"win.shape=\(4,\)!"): + ShortTimeFFT(np.ones(4), hop=2, fs=1, dual_win=np.ones(5)) + with pytest.raises(ValueError, match="Parameter dual_win must be " + + "a finite array!"): + ShortTimeFFT(np.ones(3), hop=2, fs=1, + dual_win=np.array([np.nan, 2, 3])) + + +def test_exceptions_properties_methods(): + """Verify that exceptions get raised when setting properties or calling + method of ShortTimeFFT to/with invalid values.""" + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) + with pytest.raises(ValueError, match="Sampling interval T=-1 must be " + + "positive!"): + SFT.T = -1 + with pytest.raises(ValueError, match="Sampling frequency fs=-1 must be " + + "positive!"): + SFT.fs = -1 + with pytest.raises(ValueError, match="fft_mode='invalid_typ' not in " + + r"\('twosided', 'centered', " + + r"'onesided', 'onesided2X'\)!"): + SFT.fft_mode = 'invalid_typ' + with pytest.raises(ValueError, match="For scaling is None, " + + "fft_mode='onesided2X' is invalid.*"): + SFT.fft_mode = 'onesided2X' + with pytest.raises(ValueError, match="Attribute mfft=7 needs to be " + + "at least the window length.*"): + SFT.mfft = 7 + with pytest.raises(ValueError, match="scaling='invalid' not in.*"): + # noinspection PyTypeChecker + SFT.scale_to('invalid') + with pytest.raises(ValueError, match="phase_shift=3.0 has the unit .*"): + SFT.phase_shift = 3.0 + with pytest.raises(ValueError, match="-mfft < phase_shift < mfft " + + "does not hold.*"): + SFT.phase_shift = 2*SFT.mfft + with pytest.raises(ValueError, match="Parameter padding='invalid' not.*"): + # noinspection PyTypeChecker + g = SFT._x_slices(np.zeros(16), k_off=0, p0=0, p1=1, padding='invalid') + next(g) # execute generator + with pytest.raises(ValueError, match="Trend type must be 'linear' " + + "or 'constant'"): + # noinspection PyTypeChecker + SFT.stft_detrend(np.zeros(16), detr='invalid') + with pytest.raises(ValueError, match="Parameter detr=nan is not a str, " + + "function or None!"): + # noinspection PyTypeChecker + SFT.stft_detrend(np.zeros(16), detr=np.nan) + with pytest.raises(ValueError, match="Invalid Parameter p0=0, p1=200.*"): + SFT.p_range(100, 0, 200) + + with pytest.raises(ValueError, match="f_axis=0 may not be equal to " + + "t_axis=0!"): + SFT.istft(np.zeros((SFT.f_pts, 2)), t_axis=0, f_axis=0) + with pytest.raises(ValueError, match=r"S.shape\[f_axis\]=2 must be equal" + + " to self.f_pts=5.*"): + SFT.istft(np.zeros((2, 2))) + with pytest.raises(ValueError, match=r"S.shape\[t_axis\]=1 needs to have" + + " at least 2 slices.*"): + SFT.istft(np.zeros((SFT.f_pts, 1))) + with pytest.raises(ValueError, match=r".*\(k1=100\) <= \(k_max=12\) " + + "is false!$"): + SFT.istft(np.zeros((SFT.f_pts, 3)), k1=100) + with pytest.raises(ValueError, match=r"\(k1=1\) - \(k0=0\) = 1 has to " + + "be at least.* length 4!"): + SFT.istft(np.zeros((SFT.f_pts, 3)), k0=0, k1=1) + + with pytest.raises(ValueError, match=r"Parameter axes_seq='invalid' " + + r"not in \['tf', 'ft'\]!"): + # noinspection PyTypeChecker + SFT.extent(n=100, axes_seq='invalid') + with pytest.raises(ValueError, match="Attribute fft_mode=twosided must.*"): + SFT.fft_mode = 'twosided' + SFT.extent(n=100) + + +@pytest.mark.parametrize('m', ('onesided', 'onesided2X')) +def test_exceptions_fft_mode_complex_win(m: FFT_MODE_TYPE): + """Verify that one-sided spectra are not allowed with complex-valued + windows or with complex-valued signals. + + The reason being, the `rfft` function only accepts real-valued input. + """ + with pytest.raises(ValueError, + match=f"One-sided spectra, i.e., fft_mode='{m}'.*"): + ShortTimeFFT(np.ones(8)*1j, hop=4, fs=1, fft_mode=m) + + SFT = ShortTimeFFT(np.ones(8)*1j, hop=4, fs=1, fft_mode='twosided') + with pytest.raises(ValueError, + match=f"One-sided spectra, i.e., fft_mode='{m}'.*"): + SFT.fft_mode = m + + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1, scale_to='psd', fft_mode='onesided') + with pytest.raises(ValueError, match="Complex-valued `x` not allowed for self.*"): + SFT.stft(np.ones(8)*1j) + SFT.fft_mode = 'onesided2X' + with pytest.raises(ValueError, match="Complex-valued `x` not allowed for self.*"): + SFT.stft(np.ones(8)*1j) + + +def test_invalid_fft_mode_RuntimeError(): + """Ensure exception gets raised when property `fft_mode` is invalid. """ + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) + SFT._fft_mode = 'invalid_typ' + + with pytest.raises(RuntimeError): + _ = SFT.f + with pytest.raises(RuntimeError): + SFT._fft_func(np.ones(8)) + with pytest.raises(RuntimeError): + SFT._ifft_func(np.ones(8)) + + +@pytest.mark.parametrize('win_params, Nx', [(('gaussian', 2.), 9), # in docstr + ('triang', 7), + (('kaiser', 4.0), 9), + (('exponential', None, 1.), 9), + (4.0, 9)]) +def test_from_window(win_params, Nx: int): + """Verify that `from_window()` handles parameters correctly. + + The window parameterizations are documented in the `get_window` docstring. + """ + w_sym, fs = get_window(win_params, Nx, fftbins=False), 16. + w_per = get_window(win_params, Nx, fftbins=True) + SFT0 = ShortTimeFFT(w_sym, hop=3, fs=fs, fft_mode='twosided', + scale_to='psd', phase_shift=1) + nperseg = len(w_sym) + noverlap = nperseg - SFT0.hop + SFT1 = ShortTimeFFT.from_window(win_params, fs, nperseg, noverlap, + symmetric_win=True, fft_mode='twosided', + scale_to='psd', phase_shift=1) + # periodic window: + SFT2 = ShortTimeFFT.from_window(win_params, fs, nperseg, noverlap, + symmetric_win=False, fft_mode='twosided', + scale_to='psd', phase_shift=1) + # Be informative when comparing instances: + xp_assert_equal(SFT1.win, SFT0.win) + xp_assert_close(SFT2.win, w_per / np.sqrt(sum(w_per**2) * fs)) + for n_ in ('hop', 'T', 'fft_mode', 'mfft', 'scaling', 'phase_shift'): + v0, v1, v2 = (getattr(SFT_, n_) for SFT_ in (SFT0, SFT1, SFT2)) + assert v1 == v0, f"SFT1.{n_}={v1} does not equal SFT0.{n_}={v0}" + assert v2 == v0, f"SFT2.{n_}={v2} does not equal SFT0.{n_}={v0}" + + +def test_dual_win_roundtrip(): + """Verify the duality of `win` and `dual_win`. + + Note that this test does not work for arbitrary windows, since dual windows + are not unique. It always works for invertible STFTs if the windows do not + overlap. + """ + # Non-standard values for keyword arguments (except for `scale_to`): + kw = dict(hop=4, fs=1, fft_mode='twosided', mfft=8, scale_to=None, + phase_shift=2) + SFT0 = ShortTimeFFT(np.ones(4), **kw) + SFT1 = ShortTimeFFT.from_dual(SFT0.dual_win, **kw) + xp_assert_close(SFT1.dual_win, SFT0.win) + + +@pytest.mark.parametrize('scale_to, fac_psd, fac_mag', + [(None, 0.25, 0.125), + ('magnitude', 2.0, 1), + ('psd', 1, 0.5)]) +def test_scaling(scale_to: Literal['magnitude', 'psd'], fac_psd, fac_mag): + """Verify scaling calculations. + + * Verify passing `scale_to`parameter to ``__init__(). + * Roundtrip while changing scaling factor. + """ + SFT = ShortTimeFFT(np.ones(4) * 2, hop=4, fs=1, scale_to=scale_to) + assert SFT.fac_psd == fac_psd + assert SFT.fac_magnitude == fac_mag + # increase coverage by accessing properties twice: + assert SFT.fac_psd == fac_psd + assert SFT.fac_magnitude == fac_mag + + x = np.fft.irfft([0, 0, 7, 0, 0, 0, 0]) # periodic signal + Sx = SFT.stft(x) + Sx_mag, Sx_psd = Sx * SFT.fac_magnitude, Sx * SFT.fac_psd + + SFT.scale_to('magnitude') + x_mag = SFT.istft(Sx_mag, k1=len(x)) + xp_assert_close(x_mag, x) + + SFT.scale_to('psd') + x_psd = SFT.istft(Sx_psd, k1=len(x)) + xp_assert_close(x_psd, x) + + +def test_scale_to(): + """Verify `scale_to()` method.""" + SFT = ShortTimeFFT(np.ones(4) * 2, hop=4, fs=1, scale_to=None) + + SFT.scale_to('magnitude') + assert SFT.scaling == 'magnitude' + assert SFT.fac_psd == 2.0 + assert SFT.fac_magnitude == 1 + + SFT.scale_to('psd') + assert SFT.scaling == 'psd' + assert SFT.fac_psd == 1 + assert SFT.fac_magnitude == 0.5 + + SFT.scale_to('psd') # needed for coverage + + for scale, s_fac in zip(('magnitude', 'psd'), (8, 4)): + SFT = ShortTimeFFT(np.ones(4) * 2, hop=4, fs=1, scale_to=None) + dual_win = SFT.dual_win.copy() + + SFT.scale_to(cast(Literal['magnitude', 'psd'], scale)) + xp_assert_close(SFT.dual_win, dual_win * s_fac) + + +def test_x_slices_padding(): + """Verify padding. + + The reference arrays were taken from the docstrings of `zero_ext`, + `const_ext`, `odd_ext()`, and `even_ext()` from the _array_tools module. + """ + SFT = ShortTimeFFT(np.ones(5), hop=4, fs=1) + x = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]], dtype=float) + d = {'zeros': [[[0, 0, 1, 2, 3], [0, 0, 0, 1, 4]], + [[3, 4, 5, 0, 0], [4, 9, 16, 0, 0]]], + 'edge': [[[1, 1, 1, 2, 3], [0, 0, 0, 1, 4]], + [[3, 4, 5, 5, 5], [4, 9, 16, 16, 16]]], + 'even': [[[3, 2, 1, 2, 3], [4, 1, 0, 1, 4]], + [[3, 4, 5, 4, 3], [4, 9, 16, 9, 4]]], + 'odd': [[[-1, 0, 1, 2, 3], [-4, -1, 0, 1, 4]], + [[3, 4, 5, 6, 7], [4, 9, 16, 23, 28]]]} + for p_, xx in d.items(): + gen = SFT._x_slices(np.array(x), 0, 0, 2, padding=cast(PAD_TYPE, p_)) + yy = np.array([y_.copy() for y_ in gen]) # due to inplace copying + xx = np.asarray(xx, dtype=np.float64) + xp_assert_equal(yy, xx, err_msg=f"Failed '{p_}' padding.") + + +def test_invertible(): + """Verify `invertible` property. """ + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) + assert SFT.invertible + SFT = ShortTimeFFT(np.ones(8), hop=9, fs=1) + assert not SFT.invertible + + +def test_border_values(): + """Ensure that minimum and maximum values of slices are correct.""" + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) + assert SFT.p_min == 0 + assert SFT.k_min == -4 + assert SFT.lower_border_end == (4, 1) + assert SFT.lower_border_end == (4, 1) # needed to test caching + assert SFT.p_max(10) == 4 + assert SFT.k_max(10) == 16 + assert SFT.upper_border_begin(10) == (4, 2) + + +def test_border_values_exotic(): + """Ensure that the border calculations are correct for windows with + zeros. """ + w = np.array([0, 0, 0, 0, 0, 0, 0, 1.]) + SFT = ShortTimeFFT(w, hop=1, fs=1) + assert SFT.lower_border_end == (0, 0) + + SFT = ShortTimeFFT(np.flip(w), hop=20, fs=1) + assert SFT.upper_border_begin(4) == (0, 0) + + SFT._hop = -1 # provoke unreachable line + with pytest.raises(RuntimeError): + _ = SFT.k_max(4) + with pytest.raises(RuntimeError): + _ = SFT.k_min + + +def test_t(): + """Verify that the times of the slices are correct. """ + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=2) + assert SFT.T == 1/2 + assert SFT.fs == 2. + assert SFT.delta_t == 4 * 1/2 + t_stft = np.arange(0, SFT.p_max(10)) * SFT.delta_t + xp_assert_equal(SFT.t(10), t_stft) + xp_assert_equal(SFT.t(10, 1, 3), t_stft[1:3]) + SFT.T = 1/4 + assert SFT.T == 1/4 + assert SFT.fs == 4 + SFT.fs = 1/8 + assert SFT.fs == 1/8 + assert SFT.T == 8 + + +@pytest.mark.parametrize('fft_mode, f', + [('onesided', [0., 1., 2.]), + ('onesided2X', [0., 1., 2.]), + ('twosided', [0., 1., 2., -2., -1.]), + ('centered', [-2., -1., 0., 1., 2.])]) +def test_f(fft_mode: FFT_MODE_TYPE, f): + """Verify the frequency values property `f`.""" + SFT = ShortTimeFFT(np.ones(5), hop=4, fs=5, fft_mode=fft_mode, + scale_to='psd') + xp_assert_equal(SFT.f, f) + + +@pytest.mark.parametrize('n', [20, 21]) +@pytest.mark.parametrize('m', [5, 6]) +@pytest.mark.parametrize('fft_mode', ['onesided', 'centered']) +def test_extent(n, m, fft_mode: FFT_MODE_TYPE): + """Ensure that the `extent()` method is correct. """ + SFT = ShortTimeFFT(np.ones(m), hop=m, fs=m, fft_mode=fft_mode) + + t0 = SFT.t(n)[0] # first timestamp + t1 = SFT.t(n)[-1] + SFT.delta_t # last timestamp + 1 + t0c, t1c = t0 - SFT.delta_t / 2, t1 - SFT.delta_t / 2 # centered timestamps + + f0 = SFT.f[0] # first frequency + f1 = SFT.f[-1] + SFT.delta_f # last frequency + 1 + f0c, f1c = f0 - SFT.delta_f / 2, f1 - SFT.delta_f / 2 # centered frequencies + + assert SFT.extent(n, 'tf', False) == (t0, t1, f0, f1) + assert SFT.extent(n, 'ft', False) == (f0, f1, t0, t1) + assert SFT.extent(n, 'tf', True) == (t0c, t1c, f0c, f1c) + assert SFT.extent(n, 'ft', True) == (f0c, f1c, t0c, t1c) + + +def test_spectrogram(): + """Verify spectrogram and cross-spectrogram methods. """ + SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) + x, y = np.ones(10), np.arange(10) + X, Y = SFT.stft(x), SFT.stft(y) + xp_assert_close(SFT.spectrogram(x), X.real**2+X.imag**2) + xp_assert_close(SFT.spectrogram(x, y), X * Y.conj()) + + +@pytest.mark.parametrize('n', [8, 9]) +def test_fft_func_roundtrip(n: int): + """Test roundtrip `ifft_func(fft_func(x)) == x` for all permutations of + relevant parameters. """ + np.random.seed(2394795) + x0 = np.random.rand(n) + w, h_n = np.ones(n), 4 + + pp = dict( + fft_mode=get_args(FFT_MODE_TYPE), + mfft=[None, n, n+1, n+2], + scaling=[None, 'magnitude', 'psd'], + phase_shift=[None, -n+1, 0, n // 2, n-1]) + for f_typ, mfft, scaling, phase_shift in product(*pp.values()): + if f_typ == 'onesided2X' and scaling is None: + continue # this combination is forbidden + SFT = ShortTimeFFT(w, h_n, fs=n, fft_mode=f_typ, mfft=mfft, + scale_to=scaling, phase_shift=phase_shift) + X0 = SFT._fft_func(x0) + x1 = SFT._ifft_func(X0) + xp_assert_close(x0.astype(x1.dtype), x1, + err_msg="_fft_func() roundtrip failed for " + + f"{f_typ=}, {mfft=}, {scaling=}, {phase_shift=}") + + SFT = ShortTimeFFT(w, h_n, fs=1) + SFT._fft_mode = 'invalid_fft' # type: ignore + with pytest.raises(RuntimeError): + SFT._fft_func(x0) + with pytest.raises(RuntimeError): + SFT._ifft_func(x0) + + +@pytest.mark.parametrize('i', range(19)) +def test_impulse_roundtrip(i): + """Roundtrip for an impulse being at different positions `i`.""" + n = 19 + w, h_n = np.ones(8), 3 + x = np.zeros(n) + x[i] = 1 + + SFT = ShortTimeFFT(w, hop=h_n, fs=1, scale_to=None, phase_shift=None) + Sx = SFT.stft(x) + # test slicing the input signal into two parts: + n_q = SFT.nearest_k_p(n // 2) + Sx0 = SFT.stft(x[:n_q], padding='zeros') + Sx1 = SFT.stft(x[n_q:], padding='zeros') + q0_ub = SFT.upper_border_begin(n_q)[1] - SFT.p_min + q1_le = SFT.lower_border_end[1] - SFT.p_min + xp_assert_close(Sx0[:, :q0_ub], Sx[:, :q0_ub], err_msg=f"{i=}") + xp_assert_close(Sx1[:, q1_le:], Sx[:, q1_le-Sx1.shape[1]:], + err_msg=f"{i=}") + + Sx01 = np.hstack((Sx0[:, :q0_ub], + Sx0[:, q0_ub:] + Sx1[:, :q1_le], + Sx1[:, q1_le:])) + xp_assert_close(Sx, Sx01, atol=1e-8, err_msg=f"{i=}") + + y = SFT.istft(Sx, 0, n) + xp_assert_close(y, x, atol=1e-8, err_msg=f"{i=}") + y0 = SFT.istft(Sx, 0, n//2) + xp_assert_close(x[:n//2], y0, atol=1e-8, err_msg=f"{i=}") + y1 = SFT.istft(Sx, n // 2, n) + xp_assert_close(x[n // 2:], y1, atol=1e-8, err_msg=f"{i=}") + + +@pytest.mark.parametrize('hop', [1, 7, 8]) +def test_asymmetric_window_roundtrip(hop: int): + """An asymmetric window could uncover indexing problems. """ + np.random.seed(23371) + + w = np.arange(16) / 8 # must be of type float + w[len(w)//2:] = 1 + SFT = ShortTimeFFT(w, hop, fs=1) + + x = 10 * np.random.randn(64) + Sx = SFT.stft(x) + x1 = SFT.istft(Sx, k1=len(x)) + xp_assert_close(x1, x1, err_msg="Roundtrip for asymmetric window with " + + f" {hop=} failed!") + + +@pytest.mark.parametrize('m_num', [6, 7]) +def test_minimal_length_signal(m_num): + """Verify that the shortest allowed signal works. """ + SFT = ShortTimeFFT(np.ones(m_num), m_num//2, fs=1) + n = math.ceil(m_num/2) + x = np.ones(n) + Sx = SFT.stft(x) + x1 = SFT.istft(Sx, k1=n) + xp_assert_close(x1, x, err_msg=f"Roundtrip minimal length signal ({n=})" + + f" for {m_num} sample window failed!") + with pytest.raises(ValueError, match=rf"len\(x\)={n-1} must be >= ceil.*"): + SFT.stft(x[:-1]) + with pytest.raises(ValueError, match=rf"S.shape\[t_axis\]={Sx.shape[1]-1}" + f" needs to have at least {Sx.shape[1]} slices"): + SFT.istft(Sx[:, :-1], k1=n) + + +def test_tutorial_stft_sliding_win(): + """Verify example in "Sliding Windows" subsection from the "User Guide". + + In :ref:`tutorial_stft_sliding_win` (file ``signal.rst``) of the + :ref:`user_guide` the behavior the border behavior of + ``ShortTimeFFT(np.ones(6), 2, fs=1)`` with a 50 sample signal is discussed. + This test verifies the presented indexes. + """ + SFT = ShortTimeFFT(np.ones(6), 2, fs=1) + + # Lower border: + assert SFT.m_num_mid == 3, f"Slice middle is not 3 but {SFT.m_num_mid=}" + assert SFT.p_min == -1, f"Lowest slice {SFT.p_min=} is not -1" + assert SFT.k_min == -5, f"Lowest slice sample {SFT.p_min=} is not -5" + k_lb, p_lb = SFT.lower_border_end + assert p_lb == 2, f"First unaffected slice {p_lb=} is not 2" + assert k_lb == 5, f"First unaffected sample {k_lb=} is not 5" + + n = 50 # upper signal border + assert (p_max := SFT.p_max(n)) == 27, f"Last slice {p_max=} must be 27" + assert (k_max := SFT.k_max(n)) == 55, f"Last sample {k_max=} must be 55" + k_ub, p_ub = SFT.upper_border_begin(n) + assert p_ub == 24, f"First upper border slice {p_ub=} must be 24" + assert k_ub == 45, f"First upper border slice {k_ub=} must be 45" + + +def test_tutorial_stft_legacy_stft(): + """Verify STFT example in "Comparison with Legacy Implementation" from the + "User Guide". + + In :ref:`tutorial_stft_legacy_stft` (file ``signal.rst``) of the + :ref:`user_guide` the legacy and the new implementation are compared. + """ + fs, N = 200, 1001 # # 200 Hz sampling rate for 5 s signal + t_z = np.arange(N) / fs # time indexes for signal + z = np.exp(2j*np.pi * 70 * (t_z - 0.2 * t_z ** 2)) # complex-valued chirp + + nperseg, noverlap = 50, 40 + win = ('gaussian', 1e-2 * fs) # Gaussian with 0.01 s standard deviation + + # Legacy STFT: + f0_u, t0, Sz0_u = stft(z, fs, win, nperseg, noverlap, + return_onesided=False, scaling='spectrum') + Sz0 = fftshift(Sz0_u, axes=0) + + # New STFT: + SFT = ShortTimeFFT.from_window(win, fs, nperseg, noverlap, + fft_mode='centered', + scale_to='magnitude', phase_shift=None) + Sz1 = SFT.stft(z) + + xp_assert_close(Sz0, Sz1[:, 2:-1]) + + xp_assert_close((abs(Sz1[:, 1]).min(), abs(Sz1[:, 1]).max()), + (6.925060911593139e-07, 8.00271269218721e-07)) + + t0_r, z0_r = istft(Sz0_u, fs, win, nperseg, noverlap, input_onesided=False, + scaling='spectrum') + z1_r = SFT.istft(Sz1, k1=N) + assert len(z0_r) == N + 9 + xp_assert_close(z0_r[:N], z) + xp_assert_close(z1_r, z) + + # Spectrogram is just the absolute square of th STFT: + xp_assert_close(SFT.spectrogram(z), abs(Sz1) ** 2) + + +def test_tutorial_stft_legacy_spectrogram(): + """Verify spectrogram example in "Comparison with Legacy Implementation" + from the "User Guide". + + In :ref:`tutorial_stft_legacy_stft` (file ``signal.rst``) of the + :ref:`user_guide` the legacy and the new implementation are compared. + """ + fs, N = 200, 1001 # 200 Hz sampling rate for almost 5 s signal + t_z = np.arange(N) / fs # time indexes for signal + z = np.exp(2j*np.pi*70 * (t_z - 0.2*t_z**2)) # complex-valued sweep + + nperseg, noverlap = 50, 40 + win = ('gaussian', 1e-2 * fs) # Gaussian with 0.01 s standard dev. + + # Legacy spectrogram: + f2_u, t2, Sz2_u = spectrogram(z, fs, win, nperseg, noverlap, detrend=None, + return_onesided=False, scaling='spectrum', + mode='complex') + + f2, Sz2 = fftshift(f2_u), fftshift(Sz2_u, axes=0) + + # New STFT: + SFT = ShortTimeFFT.from_window(win, fs, nperseg, noverlap, + fft_mode='centered', scale_to='magnitude', + phase_shift=None) + Sz3 = SFT.stft(z, p0=0, p1=(N-noverlap) // SFT.hop, k_offset=nperseg // 2) + t3 = SFT.t(N, p0=0, p1=(N-noverlap) // SFT.hop, k_offset=nperseg // 2) + + xp_assert_close(t2, t3) + xp_assert_close(f2, SFT.f) + xp_assert_close(Sz2, Sz3) + + +def test_permute_axes(): + """Verify correctness of four-dimensional signal by permuting its + shape. """ + n = 25 + SFT = ShortTimeFFT(np.ones(8)/8, hop=3, fs=n) + x0 = np.arange(n, dtype=np.float64) + Sx0 = SFT.stft(x0) + Sx0 = Sx0.reshape((Sx0.shape[0], 1, 1, 1, Sx0.shape[-1])) + SxT = np.moveaxis(Sx0, (0, -1), (-1, 0)) + + atol = 2 * np.finfo(SFT.win.dtype).resolution + for i in range(4): + y = np.reshape(x0, np.roll((n, 1, 1, 1), i)) + Sy = SFT.stft(y, axis=i) + xp_assert_close(Sy, np.moveaxis(Sx0, 0, i)) + + yb0 = SFT.istft(Sy, k1=n, f_axis=i) + xp_assert_close(yb0, y, atol=atol) + # explicit t-axis parameter (for coverage): + yb1 = SFT.istft(Sy, k1=n, f_axis=i, t_axis=Sy.ndim-1) + xp_assert_close(yb1, y, atol=atol) + + SyT = np.moveaxis(Sy, (i, -1), (-1, i)) + xp_assert_close(SyT, np.moveaxis(SxT, 0, i)) + + ybT = SFT.istft(SyT, k1=n, t_axis=i, f_axis=-1) + xp_assert_close(ybT, y, atol=atol) + + +@pytest.mark.parametrize("fft_mode", + ('twosided', 'centered', 'onesided', 'onesided2X')) +def test_roundtrip_multidimensional(fft_mode: FFT_MODE_TYPE): + """Test roundtrip of a multidimensional input signal versus its components. + + This test can uncover potential problems with `fftshift()`. + """ + n = 9 + x = np.arange(4*n*2, dtype=np.float64).reshape(4, n, 2) + SFT = ShortTimeFFT(get_window('hann', 4), hop=2, fs=1, + scale_to='magnitude', fft_mode=fft_mode) + Sx = SFT.stft(x, axis=1) + y = SFT.istft(Sx, k1=n, f_axis=1, t_axis=-1) + xp_assert_close(y, x.astype(y.dtype), err_msg='Multidim. roundtrip failed!') + + for i, j in product(range(x.shape[0]), range(x.shape[2])): + y_ = SFT.istft(Sx[i, :, j, :], k1=n) + xp_assert_close(y_, x[i, :, j].astype(y_.dtype), + err_msg="Multidim. roundtrip for component " + + f"x[{i}, :, {j}] and {fft_mode=} failed!") + +@pytest.mark.parametrize("phase_shift", (0, 4, None)) +def test_roundtrip_two_dimensional(phase_shift: int|None): + """Test roundtrip of a 2 channel input signal with `mfft` set with different + values for `phase_shift` + + Tests for Issue https://github.com/scipy/scipy/issues/21671 + """ + n = 21 + SFT = ShortTimeFFT.from_window('hann', fs=1, nperseg=13, noverlap=7, + mfft=16, phase_shift=phase_shift) + x = np.arange(2*n, dtype=float).reshape(2, n) + Sx = SFT.stft(x) + y = SFT.istft(Sx, k1=n) + xp_assert_close(y, x, atol=2 * np.finfo(SFT.win.dtype).resolution, + err_msg='2-dim. roundtrip failed!') + + +@pytest.mark.parametrize('window, n, nperseg, noverlap', + [('boxcar', 100, 10, 0), # Test no overlap + ('boxcar', 100, 10, 9), # Test high overlap + ('bartlett', 101, 51, 26), # Test odd nperseg + ('hann', 1024, 256, 128), # Test defaults + (('tukey', 0.5), 1152, 256, 64), # Test Tukey + ('hann', 1024, 256, 255), # Test overlapped hann + ('boxcar', 100, 10, 3), # NOLA True, COLA False + ('bartlett', 101, 51, 37), # NOLA True, COLA False + ('hann', 1024, 256, 127), # NOLA True, COLA False + # NOLA True, COLA False: + (('tukey', 0.5), 1152, 256, 14), + ('hann', 1024, 256, 5)]) # NOLA True, COLA False +def test_roundtrip_windows(window, n: int, nperseg: int, noverlap: int): + """Roundtrip test adapted from `test_spectral.TestSTFT`. + + The parameters are taken from the methods test_roundtrip_real(), + test_roundtrip_nola_not_cola(), test_roundtrip_float32(), + test_roundtrip_complex(). + """ + np.random.seed(2394655) + + w = get_window(window, nperseg) + SFT = ShortTimeFFT(w, nperseg - noverlap, fs=1, fft_mode='twosided', + phase_shift=None) + + z = 10 * np.random.randn(n) + 10j * np.random.randn(n) + Sz = SFT.stft(z) + z1 = SFT.istft(Sz, k1=len(z)) + xp_assert_close(z, z1, err_msg="Roundtrip for complex values failed") + + x = 10 * np.random.randn(n) + Sx = SFT.stft(x) + x1 = SFT.istft(Sx, k1=len(z)) + xp_assert_close(x.astype(np.complex128), x1, + err_msg="Roundtrip for float values failed") + + x32 = x.astype(np.float32) + Sx32 = SFT.stft(x32) + x32_1 = SFT.istft(Sx32, k1=len(x32)) + x32_1_r = x32_1.real + xp_assert_close(x32, x32_1_r.astype(np.float32), + err_msg="Roundtrip for 32 Bit float values failed") + xp_assert_close(x32.imag, np.zeros_like(x32.imag), + err_msg="Roundtrip for 32 Bit float values failed") + + +@pytest.mark.parametrize('signal_type', ('real', 'complex')) +def test_roundtrip_complex_window(signal_type): + """Test roundtrip for complex-valued window function + + The purpose of this test is to check if the dual window is calculated + correctly for complex-valued windows. + """ + np.random.seed(1354654) + win = np.exp(2j*np.linspace(0, np.pi, 8)) + SFT = ShortTimeFFT(win, 3, fs=1, fft_mode='twosided') + + z = 10 * np.random.randn(11) + if signal_type == 'complex': + z = z + 2j * z + Sz = SFT.stft(z) + z1 = SFT.istft(Sz, k1=len(z)) + xp_assert_close(z.astype(np.complex128), z1, + err_msg="Roundtrip for complex-valued window failed") + + +def test_average_all_segments(): + """Compare `welch` function with stft mean. + + Ported from `TestSpectrogram.test_average_all_segments` from file + ``test__spectral.py``. + """ + x = np.random.randn(1024) + + fs = 1.0 + window = ('tukey', 0.25) + nperseg, noverlap = 16, 2 + fw, Pw = welch(x, fs, window, nperseg, noverlap) + SFT = ShortTimeFFT.from_window(window, fs, nperseg, noverlap, + fft_mode='onesided2X', scale_to='psd', + phase_shift=None) + # `welch` positions the window differently than the STFT: + P = SFT.spectrogram(x, detr='constant', p0=0, + p1=(len(x)-noverlap)//SFT.hop, k_offset=nperseg//2) + + xp_assert_close(SFT.f, fw) + xp_assert_close(np.mean(P, axis=-1), Pw) + + +@pytest.mark.parametrize('window, N, nperseg, noverlap, mfft', + # from test_roundtrip_padded_FFT: + [('hann', 1024, 256, 128, 512), + ('hann', 1024, 256, 128, 501), + ('boxcar', 100, 10, 0, 33), + (('tukey', 0.5), 1152, 256, 64, 1024), + # from test_roundtrip_padded_signal: + ('boxcar', 101, 10, 0, None), + ('hann', 1000, 256, 128, None), + # from test_roundtrip_boundary_extension: + ('boxcar', 100, 10, 0, None), + ('boxcar', 100, 10, 9, None)]) +@pytest.mark.parametrize('padding', get_args(PAD_TYPE)) +def test_stft_padding_roundtrip(window, N: int, nperseg: int, noverlap: int, + mfft: int, padding): + """Test the parameter 'padding' of `stft` with roundtrips. + + The STFT parametrizations were taken from the methods + `test_roundtrip_padded_FFT`, `test_roundtrip_padded_signal` and + `test_roundtrip_boundary_extension` from class `TestSTFT` in file + ``test_spectral.py``. Note that the ShortTimeFFT does not need the + concept of "boundary extension". + """ + x = normal_distribution.rvs(size=N, random_state=2909) # real signal + z = x * np.exp(1j * np.pi / 4) # complex signal + + SFT = ShortTimeFFT.from_window(window, 1, nperseg, noverlap, + fft_mode='twosided', mfft=mfft) + Sx = SFT.stft(x, padding=padding) + x1 = SFT.istft(Sx, k1=N) + xp_assert_close(x1, x.astype(np.complex128), + err_msg=f"Failed real roundtrip with '{padding}' padding") + + Sz = SFT.stft(z, padding=padding) + z1 = SFT.istft(Sz, k1=N) + xp_assert_close(z1, z, err_msg="Failed complex roundtrip with " + + f" '{padding}' padding") + + +@pytest.mark.parametrize('N_x', (128, 129, 255, 256, 1337)) # signal length +@pytest.mark.parametrize('w_size', (128, 256)) # window length +@pytest.mark.parametrize('t_step', (4, 64)) # SFT time hop +@pytest.mark.parametrize('f_c', (7., 23.)) # frequency of input sine +def test_energy_conservation(N_x: int, w_size: int, t_step: int, f_c: float): + """Test if a `psd`-scaled STFT conserves the L2 norm. + + This test is adapted from MNE-Python [1]_. Besides being battle-tested, + this test has the benefit of using non-standard window including + non-positive values and a 2d input signal. + + Since `ShortTimeFFT` requires the signal length `N_x` to be at least the + window length `w_size`, the parameter `N_x` was changed from + ``(127, 128, 255, 256, 1337)`` to ``(128, 129, 255, 256, 1337)`` to be + more useful. + + .. [1] File ``test_stft.py`` of MNE-Python + https://github.com/mne-tools/mne-python/blob/main/mne/time_frequency/tests/test_stft.py + """ + window = np.sin(np.arange(.5, w_size + .5) / w_size * np.pi) + SFT = ShortTimeFFT(window, t_step, fs=1000, fft_mode='onesided2X', + scale_to='psd') + atol = 2*np.finfo(window.dtype).resolution + N_x = max(N_x, w_size) # minimal sing + # Test with low frequency signal + t = np.arange(N_x).astype(np.float64) + x = np.sin(2 * np.pi * f_c * t * SFT.T) + x = np.array([x, x + 1.]) + X = SFT.stft(x) + xp = SFT.istft(X, k1=N_x) + + max_freq = SFT.f[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))] + + assert X.shape[1] == SFT.f_pts + assert np.all(SFT.f >= 0.) + assert np.abs(max_freq - f_c) < 1. + xp_assert_close(x, xp, atol=atol) + + # check L2-norm squared (i.e., energy) conservation: + E_x = np.sum(x**2, axis=-1) * SFT.T # numerical integration + aX2 = X.real**2 + X.imag.real**2 + E_X = np.sum(np.sum(aX2, axis=-1) * SFT.delta_t, axis=-1) * SFT.delta_f + xp_assert_close(E_X, E_x, atol=atol) + + # Test with random signal + np.random.seed(2392795) + x = np.random.randn(2, N_x) + X = SFT.stft(x) + xp = SFT.istft(X, k1=N_x) + + assert X.shape[1] == SFT.f_pts + assert np.all(SFT.f >= 0.) + assert np.abs(max_freq - f_c) < 1. + xp_assert_close(x, xp, atol=atol) + + # check L2-norm squared (i.e., energy) conservation: + E_x = np.sum(x**2, axis=-1) * SFT.T # numeric integration + aX2 = X.real ** 2 + X.imag.real ** 2 + E_X = np.sum(np.sum(aX2, axis=-1) * SFT.delta_t, axis=-1) * SFT.delta_f + xp_assert_close(E_X, E_x, atol=atol) + + # Try with empty array + x = np.zeros((0, N_x)) + X = SFT.stft(x) + xp = SFT.istft(X, k1=N_x) + assert xp.shape == x.shape diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_signaltools.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_signaltools.py new file mode 100644 index 0000000000000000000000000000000000000000..1c00fe094d592b5d6d500bd222a989798aad3bfa --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_signaltools.py @@ -0,0 +1,3928 @@ +import sys + +from concurrent.futures import ThreadPoolExecutor, as_completed +from decimal import Decimal +from itertools import product +from math import gcd + +import pytest +from pytest import raises as assert_raises +from numpy.testing import ( + assert_equal, + assert_almost_equal, assert_array_equal, assert_array_almost_equal, + assert_allclose, assert_, assert_array_less, + suppress_warnings) +from numpy import array, arange +import numpy as np + +from scipy import fft as sp_fft +from scipy.ndimage import correlate1d +from scipy.optimize import fmin, linear_sum_assignment +from scipy import signal +from scipy.signal import ( + correlate, correlate2d, correlation_lags, convolve, convolve2d, + fftconvolve, oaconvolve, choose_conv_method, envelope, + hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos, + invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt, + sosfilt_zi, tf2zpk, BadCoefficients, detrend, unique_roots, residue, + residuez) +from scipy.signal.windows import hann +from scipy.signal._signaltools import (_filtfilt_gust, _compute_factors, + _group_poles) +from scipy.signal._upfirdn import _upfirdn_modes +from scipy._lib import _testutils +from scipy._lib._array_api import xp_assert_close +from scipy._lib._util import ComplexWarning, np_long, np_ulong + + +class _TestConvolve: + + def test_basic(self): + a = [3, 4, 5, 6, 5, 4] + b = [1, 2, 3] + c = convolve(a, b) + assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12])) + + def test_same(self): + a = [3, 4, 5] + b = [1, 2, 3, 4] + c = convolve(a, b, mode="same") + assert_array_equal(c, array([10, 22, 34])) + + def test_same_eq(self): + a = [3, 4, 5] + b = [1, 2, 3] + c = convolve(a, b, mode="same") + assert_array_equal(c, array([10, 22, 22])) + + def test_complex(self): + x = array([1 + 1j, 2 + 1j, 3 + 1j]) + y = array([1 + 1j, 2 + 1j]) + z = convolve(x, y) + assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j])) + + def test_zero_rank(self): + a = 1289 + b = 4567 + c = convolve(a, b) + assert_equal(c, a * b) + + def test_broadcastable(self): + a = np.arange(27).reshape(3, 3, 3) + b = np.arange(3) + for i in range(3): + b_shape = [1]*3 + b_shape[i] = 3 + x = convolve(a, b.reshape(b_shape), method='direct') + y = convolve(a, b.reshape(b_shape), method='fft') + assert_allclose(x, y) + + def test_single_element(self): + a = array([4967]) + b = array([3920]) + c = convolve(a, b) + assert_equal(c, a * b) + + def test_2d_arrays(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + c = convolve(a, b) + d = array([[2, 7, 16, 17, 12], + [10, 30, 62, 58, 38], + [12, 31, 58, 49, 30]]) + assert_array_equal(c, d) + + def test_input_swapping(self): + small = arange(8).reshape(2, 2, 2) + big = 1j * arange(27).reshape(3, 3, 3) + big += arange(27)[::-1].reshape(3, 3, 3) + + out_array = array( + [[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j], + [52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j], + [46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j], + [40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]], + + [[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j], + [282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j], + [246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j], + [142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]], + + [[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j], + [174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j], + [138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j], + [70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]], + + [[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j], + [68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j], + [38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j], + [12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]]) + + assert_array_equal(convolve(small, big, 'full'), out_array) + assert_array_equal(convolve(big, small, 'full'), out_array) + assert_array_equal(convolve(small, big, 'same'), + out_array[1:3, 1:3, 1:3]) + assert_array_equal(convolve(big, small, 'same'), + out_array[0:3, 0:3, 0:3]) + assert_array_equal(convolve(small, big, 'valid'), + out_array[1:3, 1:3, 1:3]) + assert_array_equal(convolve(big, small, 'valid'), + out_array[1:3, 1:3, 1:3]) + + def test_invalid_params(self): + a = [3, 4, 5] + b = [1, 2, 3] + assert_raises(ValueError, convolve, a, b, mode='spam') + assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft') + assert_raises(ValueError, convolve, a, b, mode='ham', method='direct') + assert_raises(ValueError, convolve, a, b, mode='full', method='bacon') + assert_raises(ValueError, convolve, a, b, mode='same', method='bacon') + + +class TestConvolve(_TestConvolve): + + def test_valid_mode2(self): + # See gh-5897 + a = [1, 2, 3, 6, 5, 3] + b = [2, 3, 4, 5, 3, 4, 2, 2, 1] + expected = [70, 78, 73, 65] + + out = convolve(a, b, 'valid') + assert_array_equal(out, expected) + + out = convolve(b, a, 'valid') + assert_array_equal(out, expected) + + a = [1 + 5j, 2 - 1j, 3 + 0j] + b = [2 - 3j, 1 + 0j] + expected = [2 - 3j, 8 - 10j] + + out = convolve(a, b, 'valid') + assert_array_equal(out, expected) + + out = convolve(b, a, 'valid') + assert_array_equal(out, expected) + + def test_same_mode(self): + a = [1, 2, 3, 3, 1, 2] + b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3] + c = convolve(a, b, 'same') + d = array([57, 61, 63, 57, 45, 36]) + assert_array_equal(c, d) + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'}) + + def test_convolve_method(self, n=100): + # this types data structure was manually encoded instead of + # using custom filters on the soon-to-be-removed np.sctypes + types = {'uint16', 'uint64', 'int64', 'int32', + 'complex128', 'float64', 'float16', + 'complex64', 'float32', 'int16', + 'uint8', 'uint32', 'int8', 'bool'} + args = [(t1, t2, mode) for t1 in types for t2 in types + for mode in ['valid', 'full', 'same']] + + # These are random arrays, which means test is much stronger than + # convolving testing by convolving two np.ones arrays + rng = np.random.RandomState(42) + array_types = {'i': rng.choice([0, 1], size=n), + 'f': rng.randn(n)} + array_types['b'] = array_types['u'] = array_types['i'] + array_types['c'] = array_types['f'] + 0.5j*array_types['f'] + + for t1, t2, mode in args: + x1 = array_types[np.dtype(t1).kind].astype(t1) + x2 = array_types[np.dtype(t2).kind].astype(t2) + + results = {key: convolve(x1, x2, method=key, mode=mode) + for key in ['fft', 'direct']} + + assert_equal(results['fft'].dtype, results['direct'].dtype) + + if 'bool' in t1 and 'bool' in t2: + assert_equal(choose_conv_method(x1, x2), 'direct') + continue + + # Found by experiment. Found approx smallest value for (rtol, atol) + # threshold to have tests pass. + if any([t in {'complex64', 'float32'} for t in [t1, t2]]): + kwargs = {'rtol': 1.0e-4, 'atol': 1e-6} + elif 'float16' in [t1, t2]: + # atol is default for np.allclose + kwargs = {'rtol': 1e-3, 'atol': 1e-3} + else: + # defaults for np.allclose (different from assert_allclose) + kwargs = {'rtol': 1e-5, 'atol': 1e-8} + + assert_allclose(results['fft'], results['direct'], **kwargs) + + def test_convolve_method_large_input(self): + # This is really a test that convolving two large integers goes to the + # direct method even if they're in the fft method. + for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]: + z = np.array([2**n], dtype=np.int64) + fft = convolve(z, z, method='fft') + direct = convolve(z, z, method='direct') + + # this is the case when integer precision gets to us + # issue #6076 has more detail, hopefully more tests after resolved + if n < 50: + assert_equal(fft, direct) + assert_equal(fft, 2**(2*n)) + assert_equal(direct, 2**(2*n)) + + def test_mismatched_dims(self): + # Input arrays should have the same number of dimensions + assert_raises(ValueError, convolve, [1], 2, method='direct') + assert_raises(ValueError, convolve, 1, [2], method='direct') + assert_raises(ValueError, convolve, [1], 2, method='fft') + assert_raises(ValueError, convolve, 1, [2], method='fft') + assert_raises(ValueError, convolve, [1], [[2]]) + assert_raises(ValueError, convolve, [3], 2) + + @pytest.mark.thread_unsafe + def test_dtype_deprecation(self): + # gh-21211 + a = np.asarray([1, 2, 3, 6, 5, 3], dtype=object) + b = np.asarray([2, 3, 4, 5, 3, 4, 2, 2, 1], dtype=object) + with pytest.deprecated_call(match="dtype=object is not supported"): + convolve(a, b) + + +class _TestConvolve2d: + + def test_2d_arrays(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + d = array([[2, 7, 16, 17, 12], + [10, 30, 62, 58, 38], + [12, 31, 58, 49, 30]]) + e = convolve2d(a, b) + assert_array_equal(e, d) + + def test_valid_mode(self): + e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + f = [[1, 2, 3], [3, 4, 5]] + h = array([[62, 80, 98, 116, 134]]) + + g = convolve2d(e, f, 'valid') + assert_array_equal(g, h) + + # See gh-5897 + g = convolve2d(f, e, 'valid') + assert_array_equal(g, h) + + def test_valid_mode_complx(self): + e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j + h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]]) + + g = convolve2d(e, f, 'valid') + assert_array_almost_equal(g, h) + + # See gh-5897 + g = convolve2d(f, e, 'valid') + assert_array_equal(g, h) + + def test_fillvalue(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + fillval = 1 + c = convolve2d(a, b, 'full', 'fill', fillval) + d = array([[24, 26, 31, 34, 32], + [28, 40, 62, 64, 52], + [32, 46, 67, 62, 48]]) + assert_array_equal(c, d) + + def test_fillvalue_errors(self): + msg = "could not cast `fillvalue` directly to the output " + with np.testing.suppress_warnings() as sup: + sup.filter(ComplexWarning, "Casting complex values") + with assert_raises(ValueError, match=msg): + convolve2d([[1]], [[1, 2]], fillvalue=1j) + + msg = "`fillvalue` must be scalar or an array with " + with assert_raises(ValueError, match=msg): + convolve2d([[1]], [[1, 2]], fillvalue=[1, 2]) + + def test_fillvalue_empty(self): + # Check that fillvalue being empty raises an error: + assert_raises(ValueError, convolve2d, [[1]], [[1, 2]], + fillvalue=[]) + + def test_wrap_boundary(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + c = convolve2d(a, b, 'full', 'wrap') + d = array([[80, 80, 74, 80, 80], + [68, 68, 62, 68, 68], + [80, 80, 74, 80, 80]]) + assert_array_equal(c, d) + + def test_sym_boundary(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + c = convolve2d(a, b, 'full', 'symm') + d = array([[34, 30, 44, 62, 66], + [52, 48, 62, 80, 84], + [82, 78, 92, 110, 114]]) + assert_array_equal(c, d) + + @pytest.mark.parametrize('func', [convolve2d, correlate2d]) + @pytest.mark.parametrize('boundary, expected', + [('symm', [[37.0, 42.0, 44.0, 45.0]]), + ('wrap', [[43.0, 44.0, 42.0, 39.0]])]) + def test_same_with_boundary(self, func, boundary, expected): + # Test boundary='symm' and boundary='wrap' with a "long" kernel. + # The size of the kernel requires that the values in the "image" + # be extended more than once to handle the requested boundary method. + # This is a regression test for gh-8684 and gh-8814. + image = np.array([[2.0, -1.0, 3.0, 4.0]]) + kernel = np.ones((1, 21)) + result = func(image, kernel, mode='same', boundary=boundary) + # The expected results were calculated "by hand". Because the + # kernel is all ones, the same result is expected for convolve2d + # and correlate2d. + assert_array_equal(result, expected) + + def test_boundary_extension_same(self): + # Regression test for gh-12686. + # Use ndimage.convolve with appropriate arguments to create the + # expected result. + import scipy.ndimage as ndi + a = np.arange(1, 10*3+1, dtype=float).reshape(10, 3) + b = np.arange(1, 10*10+1, dtype=float).reshape(10, 10) + c = convolve2d(a, b, mode='same', boundary='wrap') + assert_array_equal(c, ndi.convolve(a, b, mode='wrap', origin=(-1, -1))) + + def test_boundary_extension_full(self): + # Regression test for gh-12686. + # Use ndimage.convolve with appropriate arguments to create the + # expected result. + import scipy.ndimage as ndi + a = np.arange(1, 3*3+1, dtype=float).reshape(3, 3) + b = np.arange(1, 6*6+1, dtype=float).reshape(6, 6) + c = convolve2d(a, b, mode='full', boundary='wrap') + apad = np.pad(a, ((3, 3), (3, 3)), 'wrap') + assert_array_equal(c, ndi.convolve(apad, b, mode='wrap')[:-1, :-1]) + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'}) + + +class TestConvolve2d(_TestConvolve2d): + + def test_same_mode(self): + e = [[1, 2, 3], [3, 4, 5]] + f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + g = convolve2d(e, f, 'same') + h = array([[22, 28, 34], + [80, 98, 116]]) + assert_array_equal(g, h) + + def test_valid_mode2(self): + # See gh-5897 + e = [[1, 2, 3], [3, 4, 5]] + f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + expected = [[62, 80, 98, 116, 134]] + + out = convolve2d(e, f, 'valid') + assert_array_equal(out, expected) + + out = convolve2d(f, e, 'valid') + assert_array_equal(out, expected) + + e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]] + f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]] + expected = [[27 - 1j, 46. + 2j]] + + out = convolve2d(e, f, 'valid') + assert_array_equal(out, expected) + + # See gh-5897 + out = convolve2d(f, e, 'valid') + assert_array_equal(out, expected) + + def test_consistency_convolve_funcs(self): + # Compare np.convolve, signal.convolve, signal.convolve2d + a = np.arange(5) + b = np.array([3.2, 1.4, 3]) + for mode in ['full', 'valid', 'same']: + assert_almost_equal(np.convolve(a, b, mode=mode), + signal.convolve(a, b, mode=mode)) + assert_almost_equal(np.squeeze( + signal.convolve2d([a], [b], mode=mode)), + signal.convolve(a, b, mode=mode)) + + def test_invalid_dims(self): + assert_raises(ValueError, convolve2d, 3, 4) + assert_raises(ValueError, convolve2d, [3], [4]) + assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]]) + + @pytest.mark.slow + @pytest.mark.xfail_on_32bit("Can't create large array for test") + def test_large_array(self): + # Test indexing doesn't overflow an int (gh-10761) + n = 2**31 // (1000 * np.int64().itemsize) + _testutils.check_free_memory(2 * n * 1001 * np.int64().itemsize / 1e6) + + # Create a chequered pattern of 1s and 0s + a = np.zeros(1001 * n, dtype=np.int64) + a[::2] = 1 + a = np.lib.stride_tricks.as_strided(a, shape=(n, 1000), strides=(8008, 8)) + + count = signal.convolve2d(a, [[1, 1]]) + fails = np.where(count > 1) + assert fails[0].size == 0 + + +class TestFFTConvolve: + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_real(self, axes): + a = array([1, 2, 3]) + expected = array([1, 4, 10, 12, 9.]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_real_axes(self, axes): + a = array([1, 2, 3]) + expected = array([1, 4, 10, 12, 9.]) + + a = np.tile(a, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_complex(self, axes): + a = array([1 + 1j, 2 + 2j, 3 + 3j]) + expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_complex_axes(self, axes): + a = array([1 + 1j, 2 + 2j, 3 + 3j]) + expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) + + a = np.tile(a, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', + None, + [0, 1], + [1, 0], + [0, -1], + [-1, 0], + [-2, 1], + [1, -2], + [-2, -1], + [-1, -2]]) + def test_2d_real_same(self, axes): + a = array([[1, 2, 3], + [4, 5, 6]]) + expected = array([[1, 4, 10, 12, 9], + [8, 26, 56, 54, 36], + [16, 40, 73, 60, 36]]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [[1, 2], + [2, 1], + [1, -1], + [-1, 1], + [-2, 2], + [2, -2], + [-2, -1], + [-1, -2]]) + def test_2d_real_same_axes(self, axes): + a = array([[1, 2, 3], + [4, 5, 6]]) + expected = array([[1, 4, 10, 12, 9], + [8, 26, 56, 54, 36], + [16, 40, 73, 60, 36]]) + + a = np.tile(a, [2, 1, 1]) + expected = np.tile(expected, [2, 1, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', + None, + [0, 1], + [1, 0], + [0, -1], + [-1, 0], + [-2, 1], + [1, -2], + [-2, -1], + [-1, -2]]) + def test_2d_complex_same(self, axes): + a = array([[1 + 2j, 3 + 4j, 5 + 6j], + [2 + 1j, 4 + 3j, 6 + 5j]]) + expected = array([ + [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], + [10j, 44j, 118j, 156j, 122j], + [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] + ]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [[1, 2], + [2, 1], + [1, -1], + [-1, 1], + [-2, 2], + [2, -2], + [-2, -1], + [-1, -2]]) + def test_2d_complex_same_axes(self, axes): + a = array([[1 + 2j, 3 + 4j, 5 + 6j], + [2 + 1j, 4 + 3j, 6 + 5j]]) + expected = array([ + [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], + [10j, 44j, 118j, 156j, 122j], + [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] + ]) + + a = np.tile(a, [2, 1, 1]) + expected = np.tile(expected, [2, 1, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_real_same_mode(self, axes): + a = array([1, 2, 3]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected_1 = array([35., 41., 47.]) + expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) + + if axes == '': + out = fftconvolve(a, b, 'same') + else: + out = fftconvolve(a, b, 'same', axes=axes) + assert_array_almost_equal(out, expected_1) + + if axes == '': + out = fftconvolve(b, a, 'same') + else: + out = fftconvolve(b, a, 'same', axes=axes) + assert_array_almost_equal(out, expected_2) + + @pytest.mark.parametrize('axes', [1, -1, [1], [-1]]) + def test_real_same_mode_axes(self, axes): + a = array([1, 2, 3]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected_1 = array([35., 41., 47.]) + expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected_1 = np.tile(expected_1, [2, 1]) + expected_2 = np.tile(expected_2, [2, 1]) + + out = fftconvolve(a, b, 'same', axes=axes) + assert_array_almost_equal(out, expected_1) + + out = fftconvolve(b, a, 'same', axes=axes) + assert_array_almost_equal(out, expected_2) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_valid_mode_real(self, axes): + # See gh-5897 + a = array([3, 2, 1]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected = array([24., 31., 41., 43., 49., 25., 12.]) + + if axes == '': + out = fftconvolve(a, b, 'valid') + else: + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + if axes == '': + out = fftconvolve(b, a, 'valid') + else: + out = fftconvolve(b, a, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1]]) + def test_valid_mode_real_axes(self, axes): + # See gh-5897 + a = array([3, 2, 1]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected = array([24., 31., 41., 43., 49., 25., 12.]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_valid_mode_complex(self, axes): + a = array([3 - 1j, 2 + 7j, 1 + 0j]) + b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) + expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) + + if axes == '': + out = fftconvolve(a, b, 'valid') + else: + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + if axes == '': + out = fftconvolve(b, a, 'valid') + else: + out = fftconvolve(b, a, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_valid_mode_complex_axes(self, axes): + a = array([3 - 1j, 2 + 7j, 1 + 0j]) + b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) + expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + out = fftconvolve(b, a, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + def test_valid_mode_ignore_nonaxes(self): + # See gh-5897 + a = array([3, 2, 1]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected = array([24., 31., 41., 43., 49., 25., 12.]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [1, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'valid', axes=1) + assert_array_almost_equal(out, expected) + + def test_empty(self): + # Regression test for #1745: crashes with 0-length input. + assert_(fftconvolve([], []).size == 0) + assert_(fftconvolve([5, 6], []).size == 0) + assert_(fftconvolve([], [7]).size == 0) + + def test_zero_rank(self): + a = array(4967) + b = array(3920) + out = fftconvolve(a, b) + assert_equal(out, a * b) + + def test_single_element(self): + a = array([4967]) + b = array([3920]) + out = fftconvolve(a, b) + assert_equal(out, a * b) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_random_data(self, axes): + np.random.seed(1234) + a = np.random.rand(1233) + 1j * np.random.rand(1233) + b = np.random.rand(1321) + 1j * np.random.rand(1321) + expected = np.convolve(a, b, 'full') + + if axes == '': + out = fftconvolve(a, b, 'full') + else: + out = fftconvolve(a, b, 'full', axes=axes) + assert_(np.allclose(out, expected, rtol=1e-10)) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_random_data_axes(self, axes): + np.random.seed(1234) + a = np.random.rand(1233) + 1j * np.random.rand(1233) + b = np.random.rand(1321) + 1j * np.random.rand(1321) + expected = np.convolve(a, b, 'full') + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'full', axes=axes) + assert_(np.allclose(out, expected, rtol=1e-10)) + + @pytest.mark.parametrize('axes', [[1, 4], + [4, 1], + [1, -1], + [-1, 1], + [-4, 4], + [4, -4], + [-4, -1], + [-1, -4]]) + def test_random_data_multidim_axes(self, axes): + a_shape, b_shape = (123, 22), (132, 11) + np.random.seed(1234) + a = np.random.rand(*a_shape) + 1j * np.random.rand(*a_shape) + b = np.random.rand(*b_shape) + 1j * np.random.rand(*b_shape) + expected = convolve2d(a, b, 'full') + + a = a[:, :, None, None, None] + b = b[:, :, None, None, None] + expected = expected[:, :, None, None, None] + + a = np.moveaxis(a.swapaxes(0, 2), 1, 4) + b = np.moveaxis(b.swapaxes(0, 2), 1, 4) + expected = np.moveaxis(expected.swapaxes(0, 2), 1, 4) + + # use 1 for dimension 2 in a and 3 in b to test broadcasting + a = np.tile(a, [2, 1, 3, 1, 1]) + b = np.tile(b, [2, 1, 1, 4, 1]) + expected = np.tile(expected, [2, 1, 3, 4, 1]) + + out = fftconvolve(a, b, 'full', axes=axes) + assert_allclose(out, expected, rtol=1e-10, atol=1e-10) + + @pytest.mark.slow + @pytest.mark.parametrize( + 'n', + list(range(1, 100)) + + list(range(1000, 1500)) + + np.random.RandomState(1234).randint(1001, 10000, 5).tolist()) + def test_many_sizes(self, n): + a = np.random.rand(n) + 1j * np.random.rand(n) + b = np.random.rand(n) + 1j * np.random.rand(n) + expected = np.convolve(a, b, 'full') + + out = fftconvolve(a, b, 'full') + assert_allclose(out, expected, atol=1e-10) + + out = fftconvolve(a, b, 'full', axes=[0]) + assert_allclose(out, expected, atol=1e-10) + + @pytest.mark.thread_unsafe + def test_fft_nan(self): + n = 1000 + rng = np.random.default_rng(43876432987) + sig_nan = rng.standard_normal(n) + + for val in [np.nan, np.inf]: + sig_nan[100] = val + coeffs = signal.firwin(200, 0.2) + + msg = "Use of fft convolution.*|invalid value encountered.*" + with pytest.warns(RuntimeWarning, match=msg): + signal.convolve(sig_nan, coeffs, mode='same', method='fft') + +def fftconvolve_err(*args, **kwargs): + raise RuntimeError('Fell back to fftconvolve') + + +def gen_oa_shapes(sizes): + return [(a, b) for a, b in product(sizes, repeat=2) + if abs(a - b) > 3] + + +def gen_oa_shapes_2d(sizes): + shapes0 = gen_oa_shapes(sizes) + shapes1 = gen_oa_shapes(sizes) + shapes = [ishapes0+ishapes1 for ishapes0, ishapes1 in + zip(shapes0, shapes1)] + + modes = ['full', 'valid', 'same'] + return [ishapes+(imode,) for ishapes, imode in product(shapes, modes) + if imode != 'valid' or + (ishapes[0] > ishapes[1] and ishapes[2] > ishapes[3]) or + (ishapes[0] < ishapes[1] and ishapes[2] < ishapes[3])] + + +def gen_oa_shapes_eq(sizes): + return [(a, b) for a, b in product(sizes, repeat=2) + if a >= b] + + +class TestOAConvolve: + @pytest.mark.slow() + @pytest.mark.parametrize('shape_a_0, shape_b_0', + gen_oa_shapes_eq(list(range(100)) + + list(range(100, 1000, 23))) + ) + def test_real_manylens(self, shape_a_0, shape_b_0): + a = np.random.rand(shape_a_0) + b = np.random.rand(shape_b_0) + + expected = fftconvolve(a, b) + out = oaconvolve(a, b) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('shape_a_0, shape_b_0', + gen_oa_shapes([50, 47, 6, 4, 1])) + @pytest.mark.parametrize('is_complex', [True, False]) + @pytest.mark.parametrize('mode', ['full', 'valid', 'same']) + def test_1d_noaxes(self, shape_a_0, shape_b_0, + is_complex, mode, monkeypatch): + a = np.random.rand(shape_a_0) + b = np.random.rand(shape_b_0) + if is_complex: + a = a + 1j*np.random.rand(shape_a_0) + b = b + 1j*np.random.rand(shape_b_0) + + expected = fftconvolve(a, b, mode=mode) + + monkeypatch.setattr(signal._signaltools, 'fftconvolve', + fftconvolve_err) + out = oaconvolve(a, b, mode=mode) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [0, 1]) + @pytest.mark.parametrize('shape_a_0, shape_b_0', + gen_oa_shapes([50, 47, 6, 4])) + @pytest.mark.parametrize('shape_a_extra', [1, 3]) + @pytest.mark.parametrize('shape_b_extra', [1, 3]) + @pytest.mark.parametrize('is_complex', [True, False]) + @pytest.mark.parametrize('mode', ['full', 'valid', 'same']) + def test_1d_axes(self, axes, shape_a_0, shape_b_0, + shape_a_extra, shape_b_extra, + is_complex, mode, monkeypatch): + ax_a = [shape_a_extra]*2 + ax_b = [shape_b_extra]*2 + ax_a[axes] = shape_a_0 + ax_b[axes] = shape_b_0 + + a = np.random.rand(*ax_a) + b = np.random.rand(*ax_b) + if is_complex: + a = a + 1j*np.random.rand(*ax_a) + b = b + 1j*np.random.rand(*ax_b) + + expected = fftconvolve(a, b, mode=mode, axes=axes) + + monkeypatch.setattr(signal._signaltools, 'fftconvolve', + fftconvolve_err) + out = oaconvolve(a, b, mode=mode, axes=axes) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('shape_a_0, shape_b_0, ' + 'shape_a_1, shape_b_1, mode', + gen_oa_shapes_2d([50, 47, 6, 4])) + @pytest.mark.parametrize('is_complex', [True, False]) + def test_2d_noaxes(self, shape_a_0, shape_b_0, + shape_a_1, shape_b_1, mode, + is_complex, monkeypatch): + a = np.random.rand(shape_a_0, shape_a_1) + b = np.random.rand(shape_b_0, shape_b_1) + if is_complex: + a = a + 1j*np.random.rand(shape_a_0, shape_a_1) + b = b + 1j*np.random.rand(shape_b_0, shape_b_1) + + expected = fftconvolve(a, b, mode=mode) + + monkeypatch.setattr(signal._signaltools, 'fftconvolve', + fftconvolve_err) + out = oaconvolve(a, b, mode=mode) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [[0, 1], [0, 2], [1, 2]]) + @pytest.mark.parametrize('shape_a_0, shape_b_0, ' + 'shape_a_1, shape_b_1, mode', + gen_oa_shapes_2d([50, 47, 6, 4])) + @pytest.mark.parametrize('shape_a_extra', [1, 3]) + @pytest.mark.parametrize('shape_b_extra', [1, 3]) + @pytest.mark.parametrize('is_complex', [True, False]) + def test_2d_axes(self, axes, shape_a_0, shape_b_0, + shape_a_1, shape_b_1, mode, + shape_a_extra, shape_b_extra, + is_complex, monkeypatch): + ax_a = [shape_a_extra]*3 + ax_b = [shape_b_extra]*3 + ax_a[axes[0]] = shape_a_0 + ax_b[axes[0]] = shape_b_0 + ax_a[axes[1]] = shape_a_1 + ax_b[axes[1]] = shape_b_1 + + a = np.random.rand(*ax_a) + b = np.random.rand(*ax_b) + if is_complex: + a = a + 1j*np.random.rand(*ax_a) + b = b + 1j*np.random.rand(*ax_b) + + expected = fftconvolve(a, b, mode=mode, axes=axes) + + monkeypatch.setattr(signal._signaltools, 'fftconvolve', + fftconvolve_err) + out = oaconvolve(a, b, mode=mode, axes=axes) + + assert_array_almost_equal(out, expected) + + def test_empty(self): + # Regression test for #1745: crashes with 0-length input. + assert_(oaconvolve([], []).size == 0) + assert_(oaconvolve([5, 6], []).size == 0) + assert_(oaconvolve([], [7]).size == 0) + + def test_zero_rank(self): + a = array(4967) + b = array(3920) + out = oaconvolve(a, b) + assert_equal(out, a * b) + + def test_single_element(self): + a = array([4967]) + b = array([3920]) + out = oaconvolve(a, b) + assert_equal(out, a * b) + + +class TestAllFreqConvolves: + + @pytest.mark.parametrize('convapproach', + [fftconvolve, oaconvolve]) + def test_invalid_shapes(self, convapproach): + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + with assert_raises(ValueError, + match="For 'valid' mode, one must be at least " + "as large as the other in every dimension"): + convapproach(a, b, mode='valid') + + @pytest.mark.parametrize('convapproach', + [fftconvolve, oaconvolve]) + def test_invalid_shapes_axes(self, convapproach): + a = np.zeros([5, 6, 2, 1]) + b = np.zeros([5, 6, 3, 1]) + with assert_raises(ValueError, + match=r"incompatible shapes for in1 and in2:" + r" \(5L?, 6L?, 2L?, 1L?\) and" + r" \(5L?, 6L?, 3L?, 1L?\)"): + convapproach(a, b, axes=[0, 1]) + + @pytest.mark.parametrize('a,b', + [([1], 2), + (1, [2]), + ([3], [[2]])]) + @pytest.mark.parametrize('convapproach', + [fftconvolve, oaconvolve]) + def test_mismatched_dims(self, a, b, convapproach): + with assert_raises(ValueError, + match="in1 and in2 should have the same" + " dimensionality"): + convapproach(a, b) + + @pytest.mark.parametrize('convapproach', + [fftconvolve, oaconvolve]) + def test_invalid_flags(self, convapproach): + with assert_raises(ValueError, + match="acceptable mode flags are 'valid'," + " 'same', or 'full'"): + convapproach([1], [2], mode='chips') + + with assert_raises(ValueError, + match="when provided, axes cannot be empty"): + convapproach([1], [2], axes=[]) + + with assert_raises(ValueError, match="axes must be a scalar or " + "iterable of integers"): + convapproach([1], [2], axes=[[1, 2], [3, 4]]) + + with assert_raises(ValueError, match="axes must be a scalar or " + "iterable of integers"): + convapproach([1], [2], axes=[1., 2., 3., 4.]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + convapproach([1], [2], axes=[1]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + convapproach([1], [2], axes=[-2]) + + with assert_raises(ValueError, + match="all axes must be unique"): + convapproach([1], [2], axes=[0, 0]) + + @pytest.mark.filterwarnings('ignore::DeprecationWarning') + @pytest.mark.parametrize('dtype', [np.longdouble, np.clongdouble]) + def test_longdtype_input(self, dtype): + x = np.random.random((27, 27)).astype(dtype) + y = np.random.random((4, 4)).astype(dtype) + if np.iscomplexobj(dtype()): + x += .1j + y -= .1j + + res = fftconvolve(x, y) + assert_allclose(res, convolve(x, y, method='direct')) + assert res.dtype == dtype + + +class TestMedFilt: + + IN = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46], + [50, 50, 50, 50, 50, 0, 72, 77, 68, 66], + [50, 50, 50, 50, 50, 46, 47, 19, 64, 77], + [50, 50, 50, 50, 50, 42, 15, 29, 95, 35], + [50, 50, 50, 50, 50, 46, 34, 9, 21, 66], + [70, 97, 28, 68, 78, 77, 61, 58, 71, 42], + [64, 53, 44, 29, 68, 32, 19, 68, 24, 84], + [3, 33, 53, 67, 1, 78, 74, 55, 12, 83], + [7, 11, 46, 70, 60, 47, 24, 43, 61, 26], + [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]] + + OUT = [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0], + [0, 50, 50, 50, 50, 42, 19, 21, 29, 0], + [50, 50, 50, 50, 50, 47, 34, 34, 46, 35], + [50, 50, 50, 50, 50, 50, 42, 47, 64, 42], + [50, 50, 50, 50, 50, 50, 46, 55, 64, 35], + [33, 50, 50, 50, 50, 47, 46, 43, 55, 26], + [32, 50, 50, 50, 50, 47, 46, 45, 55, 26], + [7, 46, 50, 50, 47, 46, 46, 43, 45, 21], + [0, 32, 33, 39, 32, 32, 43, 43, 43, 0], + [0, 7, 11, 7, 4, 4, 19, 19, 24, 0]] + + KERNEL_SIZE = [7,3] + + def test_basic(self): + d = signal.medfilt(self.IN, self.KERNEL_SIZE) + e = signal.medfilt2d(np.array(self.IN, float), self.KERNEL_SIZE) + assert_array_equal(d, self.OUT) + assert_array_equal(d, e) + + @pytest.mark.parametrize('dtype', [np.ubyte, np.byte, np.ushort, np.short, + np_ulong, np_long, np.ulonglong, np.ulonglong, + np.float32, np.float64]) + def test_types(self, dtype): + # volume input and output types match + in_typed = np.array(self.IN, dtype=dtype) + assert_equal(signal.medfilt(in_typed).dtype, dtype) + assert_equal(signal.medfilt2d(in_typed).dtype, dtype) + + @pytest.mark.parametrize('dtype', [np.bool_, np.complex64, np.complex128, + np.clongdouble, np.float16, np.object_, + "float96", "float128"]) + def test_invalid_dtypes(self, dtype): + # We can only test this on platforms that support a native type of float96 or + # float128; comparing to np.longdouble allows us to filter out non-native types + if (dtype in ["float96", "float128"] + and np.finfo(np.longdouble).dtype != dtype): + pytest.skip(f"Platform does not support {dtype}") + + in_typed = np.array(self.IN, dtype=dtype) + with pytest.raises(ValueError, match="not supported"): + signal.medfilt(in_typed) + + with pytest.raises(ValueError, match="not supported"): + signal.medfilt2d(in_typed) + + def test_none(self): + # gh-1651, trac #1124. Ensure this does not segfault. + msg = "dtype=object is not supported by medfilt" + with assert_raises(ValueError, match=msg): + signal.medfilt(None) + + def test_odd_strides(self): + # Avoid a regression with possible contiguous + # numpy arrays that have odd strides. The stride value below gets + # us into wrong memory if used (but it does not need to be used) + dummy = np.arange(10, dtype=np.float64) + a = dummy[5:6] + a.strides = 16 + assert_(signal.medfilt(a, 1) == 5.) + + @pytest.mark.parametrize("dtype", [np.ubyte, np.float32, np.float64]) + def test_medfilt2d_parallel(self, dtype): + in_typed = np.array(self.IN, dtype=dtype) + expected = np.array(self.OUT, dtype=dtype) + + # This is used to simplify the indexing calculations. + assert in_typed.shape == expected.shape + + # We'll do the calculation in four chunks. M1 and N1 are the dimensions + # of the first output chunk. We have to extend the input by half the + # kernel size to be able to calculate the full output chunk. + M1 = expected.shape[0] // 2 + N1 = expected.shape[1] // 2 + offM = self.KERNEL_SIZE[0] // 2 + 1 + offN = self.KERNEL_SIZE[1] // 2 + 1 + + def apply(chunk): + # in = slice of in_typed to use. + # sel = slice of output to crop it to the correct region. + # out = slice of output array to store in. + M, N = chunk + if M == 0: + Min = slice(0, M1 + offM) + Msel = slice(0, -offM) + Mout = slice(0, M1) + else: + Min = slice(M1 - offM, None) + Msel = slice(offM, None) + Mout = slice(M1, None) + if N == 0: + Nin = slice(0, N1 + offN) + Nsel = slice(0, -offN) + Nout = slice(0, N1) + else: + Nin = slice(N1 - offN, None) + Nsel = slice(offN, None) + Nout = slice(N1, None) + + # Do the calculation, but do not write to the output in the threads. + chunk_data = in_typed[Min, Nin] + med = signal.medfilt2d(chunk_data, self.KERNEL_SIZE) + return med[Msel, Nsel], Mout, Nout + + # Give each chunk to a different thread. + output = np.zeros_like(expected) + with ThreadPoolExecutor(max_workers=4) as pool: + chunks = {(0, 0), (0, 1), (1, 0), (1, 1)} + futures = {pool.submit(apply, chunk) for chunk in chunks} + + # Store each result in the output as it arrives. + for future in as_completed(futures): + data, Mslice, Nslice = future.result() + output[Mslice, Nslice] = data + + assert_array_equal(output, expected) + + +class TestWiener: + + def test_basic(self): + g = array([[5, 6, 4, 3], + [3, 5, 6, 2], + [2, 3, 5, 6], + [1, 6, 9, 7]], 'd') + h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667], + [2.666666667, 4.33333333333, 4.44444444444, 2.8888888888], + [2.222222222, 4.4444444444, 5.4444444444, 4.801066874837], + [1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) + assert_array_almost_equal(signal.wiener(g), h, decimal=6) + assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6) + + +padtype_options = ["mean", "median", "minimum", "maximum", "line"] +padtype_options += _upfirdn_modes + + +class TestResample: + def test_basic(self): + # Some basic tests + + # Regression test for issue #3603. + # window.shape must equal to sig.shape[0] + sig = np.arange(128) + num = 256 + win = signal.get_window(('kaiser', 8.0), 160) + assert_raises(ValueError, signal.resample, sig, num, window=win) + + # Other degenerate conditions + assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1) + assert_raises(ValueError, signal.resample_poly, sig, 1, 0) + assert_raises(ValueError, signal.resample_poly, sig, 2, 1, padtype='') + assert_raises(ValueError, signal.resample_poly, sig, 2, 1, + padtype='mean', cval=10) + + # test for issue #6505 - should not modify window.shape when axis ≠ 0 + sig2 = np.tile(np.arange(160), (2, 1)) + signal.resample(sig2, num, axis=-1, window=win) + assert_(win.shape == (160,)) + + @pytest.mark.parametrize('window', (None, 'hamming')) + @pytest.mark.parametrize('N', (20, 19)) + @pytest.mark.parametrize('num', (100, 101, 10, 11)) + def test_rfft(self, N, num, window): + # Make sure the speed up using rfft gives the same result as the normal + # way using fft + x = np.linspace(0, 10, N, endpoint=False) + y = np.cos(-x**2/6.0) + assert_allclose(signal.resample(y, num, window=window), + signal.resample(y + 0j, num, window=window).real) + + y = np.array([np.cos(-x**2/6.0), np.sin(-x**2/6.0)]) + y_complex = y + 0j + assert_allclose( + signal.resample(y, num, axis=1, window=window), + signal.resample(y_complex, num, axis=1, window=window).real, + atol=1e-9) + + def test_input_domain(self): + # Test if both input domain modes produce the same results. + tsig = np.arange(256) + 0j + fsig = sp_fft.fft(tsig) + num = 256 + assert_allclose( + signal.resample(fsig, num, domain='freq'), + signal.resample(tsig, num, domain='time'), + atol=1e-9) + + @pytest.mark.parametrize('nx', (1, 2, 3, 5, 8)) + @pytest.mark.parametrize('ny', (1, 2, 3, 5, 8)) + @pytest.mark.parametrize('dtype', ('float', 'complex')) + def test_dc(self, nx, ny, dtype): + x = np.array([1] * nx, dtype) + y = signal.resample(x, ny) + assert_allclose(y, [1] * ny) + + @pytest.mark.thread_unsafe # due to Cython fused types, see cython#6506 + @pytest.mark.parametrize('padtype', padtype_options) + def test_mutable_window(self, padtype): + # Test that a mutable window is not modified + impulse = np.zeros(3) + window = np.random.RandomState(0).randn(2) + window_orig = window.copy() + signal.resample_poly(impulse, 5, 1, window=window, padtype=padtype) + assert_array_equal(window, window_orig) + + @pytest.mark.parametrize('padtype', padtype_options) + def test_output_float32(self, padtype): + # Test that float32 inputs yield a float32 output + x = np.arange(10, dtype=np.float32) + h = np.array([1, 1, 1], dtype=np.float32) + y = signal.resample_poly(x, 1, 2, window=h, padtype=padtype) + assert y.dtype == np.float32 + + @pytest.mark.parametrize('padtype', padtype_options) + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_output_match_dtype(self, padtype, dtype): + # Test that the dtype of x is preserved per issue #14733 + x = np.arange(10, dtype=dtype) + y = signal.resample_poly(x, 1, 2, padtype=padtype) + assert y.dtype == x.dtype + + @pytest.mark.parametrize( + "method, ext, padtype", + [("fft", False, None)] + + list( + product( + ["polyphase"], [False, True], padtype_options, + ) + ), + ) + def test_resample_methods(self, method, ext, padtype): + # Test resampling of sinusoids and random noise (1-sec) + rate = 100 + rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201] + + # Sinusoids, windowed to avoid edge artifacts + t = np.arange(rate) / float(rate) + freqs = np.array((1., 10., 40.))[:, np.newaxis] + x = np.sin(2 * np.pi * freqs * t) * hann(rate) + + for rate_to in rates_to: + t_to = np.arange(rate_to) / float(rate_to) + y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to) + if method == 'fft': + y_resamps = signal.resample(x, rate_to, axis=-1) + else: + if ext and rate_to != rate: + # Match default window design + g = gcd(rate_to, rate) + up = rate_to // g + down = rate // g + max_rate = max(up, down) + f_c = 1. / max_rate + half_len = 10 * max_rate + window = signal.firwin(2 * half_len + 1, f_c, + window=('kaiser', 5.0)) + polyargs = {'window': window, 'padtype': padtype} + else: + polyargs = {'padtype': padtype} + + y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1, + **polyargs) + + for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs): + if freq >= 0.5 * rate_to: + y_to.fill(0.) # mostly low-passed away + if padtype in ['minimum', 'maximum']: + assert_allclose(y_resamp, y_to, atol=3e-1) + else: + assert_allclose(y_resamp, y_to, atol=1e-3) + else: + assert_array_equal(y_to.shape, y_resamp.shape) + corr = np.corrcoef(y_to, y_resamp)[0, 1] + assert_(corr > 0.99, msg=(corr, rate, rate_to)) + + # Random data + rng = np.random.RandomState(0) + x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind + for rate_to in rates_to: + # random data + t_to = np.arange(rate_to) / float(rate_to) + y_to = np.interp(t_to, t, x) + if method == 'fft': + y_resamp = signal.resample(x, rate_to) + else: + y_resamp = signal.resample_poly(x, rate_to, rate, + padtype=padtype) + assert_array_equal(y_to.shape, y_resamp.shape) + corr = np.corrcoef(y_to, y_resamp)[0, 1] + assert_(corr > 0.99, msg=corr) + + # More tests of fft method (Master 0.18.1 fails these) + if method == 'fft': + x1 = np.array([1.+0.j, 0.+0.j]) + y1_test = signal.resample(x1, 4) + # upsampling a complex array + y1_true = np.array([1.+0.j, 0.5+0.j, 0.+0.j, 0.5+0.j]) + assert_allclose(y1_test, y1_true, atol=1e-12) + x2 = np.array([1., 0.5, 0., 0.5]) + y2_test = signal.resample(x2, 2) # downsampling a real array + y2_true = np.array([1., 0.]) + assert_allclose(y2_test, y2_true, atol=1e-12) + + def test_poly_vs_filtfilt(self): + # Check that up=1.0 gives same answer as filtfilt + slicing + random_state = np.random.RandomState(17) + try_types = (int, np.float32, np.complex64, float, complex) + size = 10000 + down_factors = [2, 11, 79] + + for dtype in try_types: + x = random_state.randn(size).astype(dtype) + if dtype in (np.complex64, np.complex128): + x += 1j * random_state.randn(size) + + # resample_poly assumes zeros outside of signl, whereas filtfilt + # can only constant-pad. Make them equivalent: + x[0] = 0 + x[-1] = 0 + + for down in down_factors: + h = signal.firwin(31, 1. / down, window='hamming') + yf = filtfilt(h, 1.0, x, padtype='constant')[::down] + + # Need to pass convolved version of filter to resample_poly, + # since filtfilt does forward and backward, but resample_poly + # only goes forward + hc = convolve(h, h[::-1]) + y = signal.resample_poly(x, 1, down, window=hc) + assert_allclose(yf, y, atol=1e-7, rtol=1e-7) + + def test_correlate1d(self): + for down in [2, 4]: + for nx in range(1, 40, down): + for nweights in (32, 33): + x = np.random.random((nx,)) + weights = np.random.random((nweights,)) + y_g = correlate1d(x, weights[::-1], mode='constant') + y_s = signal.resample_poly( + x, up=1, down=down, window=weights) + assert_allclose(y_g[::down], y_s) + + @pytest.mark.parametrize('dtype', [np.int32, np.float32]) + def test_gh_15620(self, dtype): + data = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype) + actual = signal.resample_poly(data, + up=2, + down=1, + padtype='smooth') + assert np.count_nonzero(actual) > 0 + + +class TestCSpline1DEval: + + def test_basic(self): + y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0]) + x = arange(len(y)) + dx = x[1] - x[0] + cj = signal.cspline1d(y) + + x2 = arange(len(y) * 10.0) / 10.0 + y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0]) + + # make sure interpolated values are on knot points + assert_array_almost_equal(y2[::10], y, decimal=5) + + def test_complex(self): + # create some smoothly varying complex signal to interpolate + x = np.arange(2) + y = np.zeros(x.shape, dtype=np.complex64) + T = 10.0 + f = 1.0 / T + y = np.exp(2.0J * np.pi * f * x) + + # get the cspline transform + cy = signal.cspline1d(y) + + # determine new test x value and interpolate + xnew = np.array([0.5]) + ynew = signal.cspline1d_eval(cy, xnew) + + assert_equal(ynew.dtype, y.dtype) + +class TestOrderFilt: + + def test_basic(self): + assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1), + [2, 3, 2]) + + +class _TestLinearFilter: + + def generate(self, shape): + x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) + return self.convert_dtype(x) + + def convert_dtype(self, arr): + if self.dtype == np.dtype('O'): + arr = np.asarray(arr) + out = np.empty(arr.shape, self.dtype) + iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'], + [['readonly'],['writeonly']]) + for x, y in iter: + y[...] = self.type(x[()]) + return out + else: + return np.asarray(arr, dtype=self.dtype) + + def test_rank_1_IIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, -0.5]) + y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) + assert_array_almost_equal(lfilter(b, a, x), y_r) + + def test_rank_1_FIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 1]) + a = self.convert_dtype([1]) + y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.]) + assert_array_almost_equal(lfilter(b, a, x), y_r) + + def test_rank_1_IIR_init_cond(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([0.5, -0.5]) + zi = self.convert_dtype([1, 2]) + y_r = self.convert_dtype([1, 5, 9, 13, 17, 21]) + zf_r = self.convert_dtype([13, -10]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_rank_1_FIR_init_cond(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 1, 1]) + a = self.convert_dtype([1]) + zi = self.convert_dtype([1, 1]) + y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.]) + zf_r = self.convert_dtype([9, 5]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_rank_2_IIR_axis_0(self): + x = self.generate((4, 3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4], + [6, 4, 2]]) + y = lfilter(b, a, x, axis=0) + assert_array_almost_equal(y_r2_a0, y) + + def test_rank_2_IIR_axis_1(self): + x = self.generate((4, 3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12], + [18, -16, 18]]) + y = lfilter(b, a, x, axis=1) + assert_array_almost_equal(y_r2_a1, y) + + def test_rank_2_IIR_axis_0_init_cond(self): + x = self.generate((4, 3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + zi = self.convert_dtype(np.ones((4,1))) + + y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13], + [19, -17, 19]]) + zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis] + y, zf = lfilter(b, a, x, axis=1, zi=zi) + assert_array_almost_equal(y_r2_a0_1, y) + assert_array_almost_equal(zf, zf_r) + + def test_rank_2_IIR_axis_1_init_cond(self): + x = self.generate((4,3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + zi = self.convert_dtype(np.ones((1,3))) + + y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1], + [1, 3, 5], [5, 3, 1]]) + zf_r = self.convert_dtype([[-23, -23, -23]]) + y, zf = lfilter(b, a, x, axis=0, zi=zi) + assert_array_almost_equal(y_r2_a0_0, y) + assert_array_almost_equal(zf, zf_r) + + def test_rank_3_IIR(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + + for axis in range(x.ndim): + y = lfilter(b, a, x, axis) + y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) + assert_array_almost_equal(y, y_r) + + def test_rank_3_IIR_init_cond(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + + for axis in range(x.ndim): + zi_shape = list(x.shape) + zi_shape[axis] = 1 + zi = self.convert_dtype(np.ones(zi_shape)) + zi1 = self.convert_dtype([1]) + y, zf = lfilter(b, a, x, axis, zi) + def lf0(w): + return lfilter(b, a, w, zi=zi1)[0] + def lf1(w): + return lfilter(b, a, w, zi=zi1)[1] + y_r = np.apply_along_axis(lf0, axis, x) + zf_r = np.apply_along_axis(lf1, axis, x) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_rank_3_FIR(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([1]) + + for axis in range(x.ndim): + y = lfilter(b, a, x, axis) + y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) + assert_array_almost_equal(y, y_r) + + def test_rank_3_FIR_init_cond(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([1]) + + for axis in range(x.ndim): + zi_shape = list(x.shape) + zi_shape[axis] = 2 + zi = self.convert_dtype(np.ones(zi_shape)) + zi1 = self.convert_dtype([1, 1]) + y, zf = lfilter(b, a, x, axis, zi) + def lf0(w): + return lfilter(b, a, w, zi=zi1)[0] + def lf1(w): + return lfilter(b, a, w, zi=zi1)[1] + y_r = np.apply_along_axis(lf0, axis, x) + zf_r = np.apply_along_axis(lf1, axis, x) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_zi_pseudobroadcast(self): + x = self.generate((4, 5, 20)) + b,a = signal.butter(8, 0.2, output='ba') + b = self.convert_dtype(b) + a = self.convert_dtype(a) + zi_size = b.shape[0] - 1 + + # lfilter requires x.ndim == zi.ndim exactly. However, zi can have + # length 1 dimensions. + zi_full = self.convert_dtype(np.ones((4, 5, zi_size))) + zi_sing = self.convert_dtype(np.ones((1, 1, zi_size))) + + y_full, zf_full = lfilter(b, a, x, zi=zi_full) + y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing) + + assert_array_almost_equal(y_sing, y_full) + assert_array_almost_equal(zf_full, zf_sing) + + # lfilter does not prepend ones + assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size)) + + def test_scalar_a(self): + # a can be a scalar. + x = self.generate(6) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([1]) + y_r = self.convert_dtype([0, 1, 2, 2, 2, 2]) + + y = lfilter(b, a[0], x) + assert_array_almost_equal(y, y_r) + + def test_zi_some_singleton_dims(self): + # lfilter doesn't really broadcast (no prepending of 1's). But does + # do singleton expansion if x and zi have the same ndim. This was + # broken only if a subset of the axes were singletons (gh-4681). + x = self.convert_dtype(np.zeros((3,2,5), 'l')) + b = self.convert_dtype(np.ones(5, 'l')) + a = self.convert_dtype(np.array([1,0,0])) + zi = np.ones((3,1,4), 'l') + zi[1,:,:] *= 2 + zi[2,:,:] *= 3 + zi = self.convert_dtype(zi) + + zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l')) + y_expected = np.zeros((3,2,5), 'l') + y_expected[:,:,:4] = [[[1]], [[2]], [[3]]] + y_expected = self.convert_dtype(y_expected) + + # IIR + y_iir, zf_iir = lfilter(b, a, x, -1, zi) + assert_array_almost_equal(y_iir, y_expected) + assert_array_almost_equal(zf_iir, zf_expected) + + # FIR + y_fir, zf_fir = lfilter(b, a[0], x, -1, zi) + assert_array_almost_equal(y_fir, y_expected) + assert_array_almost_equal(zf_fir, zf_expected) + + def base_bad_size_zi(self, b, a, x, axis, zi): + b = self.convert_dtype(b) + a = self.convert_dtype(a) + x = self.convert_dtype(x) + zi = self.convert_dtype(zi) + assert_raises(ValueError, lfilter, b, a, x, axis, zi) + + def test_bad_size_zi(self): + # rank 1 + x1 = np.arange(6) + self.base_bad_size_zi([1], [1], x1, -1, [1]) + self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1]) + self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]]) + self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]]) + self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1]) + self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]]) + self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3]) + + # rank 2 + x2 = np.arange(12).reshape((4,3)) + # for axis=0 zi.shape should == (max(len(a),len(b))-1, 3) + self.base_bad_size_zi([1], [1], x2, 0, [0]) + + # for each of these there are 5 cases tested (in this order): + # 1. not deep enough, right # elements + # 2. too deep, right # elements + # 3. right depth, right # elements, transposed + # 4. right depth, too few elements + # 5. right depth, too many elements + + self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]]) + + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]]) + + self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]]) + + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) + + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) + + # for axis=1 zi.shape should == (4, max(len(a),len(b))-1) + self.base_bad_size_zi([1], [1], x2, 1, [0]) + + self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]]) + + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) + + self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]]) + + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) + + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) + + def test_empty_zi(self): + # Regression test for #880: empty array for zi crashes. + x = self.generate((5,)) + a = self.convert_dtype([1]) + b = self.convert_dtype([1]) + zi = self.convert_dtype([]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, x) + assert_equal(zf.dtype, self.dtype) + assert_equal(zf.size, 0) + + def test_lfiltic_bad_zi(self): + # Regression test for #3699: bad initial conditions + a = self.convert_dtype([1]) + b = self.convert_dtype([1]) + # "y" sets the datatype of zi, so it truncates if int + zi = lfiltic(b, a, [1., 0]) + zi_1 = lfiltic(b, a, [1, 0]) + zi_2 = lfiltic(b, a, [True, False]) + assert_array_equal(zi, zi_1) + assert_array_equal(zi, zi_2) + + def test_short_x_FIR(self): + # regression test for #5116 + # x shorter than b, with non None zi fails + a = self.convert_dtype([1]) + b = self.convert_dtype([1, 0, -1]) + zi = self.convert_dtype([2, 7]) + x = self.convert_dtype([72]) + ye = self.convert_dtype([74]) + zfe = self.convert_dtype([7, -72]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, ye) + assert_array_almost_equal(zf, zfe) + + def test_short_x_IIR(self): + # regression test for #5116 + # x shorter than b, with non None zi fails + a = self.convert_dtype([1, 1]) + b = self.convert_dtype([1, 0, -1]) + zi = self.convert_dtype([2, 7]) + x = self.convert_dtype([72]) + ye = self.convert_dtype([74]) + zfe = self.convert_dtype([-67, -72]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, ye) + assert_array_almost_equal(zf, zfe) + + def test_do_not_modify_a_b_IIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, -1]) + b0 = b.copy() + a = self.convert_dtype([0.5, -0.5]) + a0 = a.copy() + y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) + y_f = lfilter(b, a, x) + assert_array_almost_equal(y_f, y_r) + assert_equal(b, b0) + assert_equal(a, a0) + + def test_do_not_modify_a_b_FIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 0, 1]) + b0 = b.copy() + a = self.convert_dtype([2]) + a0 = a.copy() + y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.]) + y_f = lfilter(b, a, x) + assert_array_almost_equal(y_f, y_r) + assert_equal(b, b0) + assert_equal(a, a0) + + @pytest.mark.parametrize("a", [1.0, [1.0], np.array(1.0)]) + @pytest.mark.parametrize("b", [1.0, [1.0], np.array(1.0)]) + def test_scalar_input(self, a, b): + data = np.random.randn(10) + assert_allclose( + lfilter(np.array([1.0]), np.array([1.0]), data), + lfilter(b, a, data)) + + @pytest.mark.thread_unsafe + def test_dtype_deprecation(self): + # gh-21211 + a = np.asarray([1, 2, 3, 6, 5, 3], dtype=object) + b = np.asarray([2, 3, 4, 5, 3, 4, 2, 2, 1], dtype=object) + with pytest.deprecated_call(match="dtype=object is not supported"): + lfilter(a, b, [1, 2, 3, 4]) + + +class TestLinearFilterFloat32(_TestLinearFilter): + dtype = np.dtype('f') + + +class TestLinearFilterFloat64(_TestLinearFilter): + dtype = np.dtype('d') + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +class TestLinearFilterFloatExtended(_TestLinearFilter): + dtype = np.dtype('g') + + +class TestLinearFilterComplex64(_TestLinearFilter): + dtype = np.dtype('F') + + +class TestLinearFilterComplex128(_TestLinearFilter): + dtype = np.dtype('D') + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +class TestLinearFilterComplexExtended(_TestLinearFilter): + dtype = np.dtype('G') + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +class TestLinearFilterDecimal(_TestLinearFilter): + dtype = np.dtype('O') + + def type(self, x): + return Decimal(str(x)) + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +class TestLinearFilterObject(_TestLinearFilter): + dtype = np.dtype('O') + type = float + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +def test_lfilter_bad_object(): + # lfilter: object arrays with non-numeric objects raise TypeError. + # Regression test for ticket #1452. + if hasattr(sys, 'abiflags') and 'd' in sys.abiflags: + pytest.skip('test is flaky when run with python3-dbg') + assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0]) + assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0]) + assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0]) + + +_pmf = pytest.mark.filterwarnings('ignore::DeprecationWarning') + +def test_lfilter_notimplemented_input(): + # Should not crash, gh-7991 + assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5]) + + +@pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short, + np_ulong, np_long, np.ulonglong, np.ulonglong, + np.float32, np.float64, + pytest.param(np.longdouble, marks=_pmf), + pytest.param(Decimal, marks=_pmf)] +) +class TestCorrelateReal: + def _setup_rank1(self, dt): + a = np.linspace(0, 3, 4).astype(dt) + b = np.linspace(1, 2, 2).astype(dt) + + y_r = np.array([0, 2, 5, 8, 3]).astype(dt) + return a, b, y_r + + def equal_tolerance(self, res_dt): + # default value of keyword + decimal = 6 + try: + dt_info = np.finfo(res_dt) + if hasattr(dt_info, 'resolution'): + decimal = int(-0.5*np.log10(dt_info.resolution)) + except Exception: + pass + return decimal + + def equal_tolerance_fft(self, res_dt): + # FFT implementations convert longdouble arguments down to + # double so don't expect better precision, see gh-9520 + if res_dt == np.longdouble: + return self.equal_tolerance(np.float64) + else: + return self.equal_tolerance(res_dt) + + def test_method(self, dt): + if dt == Decimal: + method = choose_conv_method([Decimal(4)], [Decimal(3)]) + assert_equal(method, 'direct') + else: + a, b, y_r = self._setup_rank3(dt) + y_fft = correlate(a, b, method='fft') + y_direct = correlate(a, b, method='direct') + + assert_array_almost_equal(y_r, + y_fft, + decimal=self.equal_tolerance_fft(y_fft.dtype),) + assert_array_almost_equal(y_r, + y_direct, + decimal=self.equal_tolerance(y_direct.dtype),) + assert_equal(y_fft.dtype, dt) + assert_equal(y_direct.dtype, dt) + + def test_rank1_valid(self, dt): + a, b, y_r = self._setup_rank1(dt) + y = correlate(a, b, 'valid') + assert_array_almost_equal(y, y_r[1:4]) + assert_equal(y.dtype, dt) + + # See gh-5897 + y = correlate(b, a, 'valid') + assert_array_almost_equal(y, y_r[1:4][::-1]) + assert_equal(y.dtype, dt) + + def test_rank1_same(self, dt): + a, b, y_r = self._setup_rank1(dt) + y = correlate(a, b, 'same') + assert_array_almost_equal(y, y_r[:-1]) + assert_equal(y.dtype, dt) + + def test_rank1_full(self, dt): + a, b, y_r = self._setup_rank1(dt) + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r) + assert_equal(y.dtype, dt) + + def _setup_rank3(self, dt): + a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype( + dt) + b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype( + dt) + + y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.], + [46., 432., 1062., 1840., 2672., 1698., 864., 266.], + [134., 736., 1662., 2768., 3920., 2418., 1168., 314.], + [260., 952., 1932., 3056., 4208., 2580., 1240., 332.], + [202., 664., 1290., 1984., 2688., 1590., 712., 150.], + [114., 344., 642., 960., 1280., 726., 296., 38.]], + + [[23., 400., 1035., 1832., 2696., 1737., 904., 293.], + [134., 920., 2166., 3680., 5280., 3306., 1640., 474.], + [325., 1544., 3369., 5512., 7720., 4683., 2192., 535.], + [571., 1964., 3891., 6064., 8272., 4989., 2324., 565.], + [434., 1360., 2586., 3920., 5264., 3054., 1312., 230.], + [241., 700., 1281., 1888., 2496., 1383., 532., 39.]], + + [[22., 214., 528., 916., 1332., 846., 430., 132.], + [86., 484., 1098., 1832., 2600., 1602., 772., 206.], + [188., 802., 1698., 2732., 3788., 2256., 1018., 218.], + [308., 1006., 1950., 2996., 4052., 2400., 1078., 230.], + [230., 692., 1290., 1928., 2568., 1458., 596., 78.], + [126., 354., 636., 924., 1212., 654., 234., 0.]]], + dtype=np.float64).astype(dt) + + return a, b, y_r + + def test_rank3_valid(self, dt): + a, b, y_r = self._setup_rank3(dt) + y = correlate(a, b, "valid") + assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5]) + assert_equal(y.dtype, dt) + + # See gh-5897 + y = correlate(b, a, "valid") + assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1]) + assert_equal(y.dtype, dt) + + def test_rank3_same(self, dt): + a, b, y_r = self._setup_rank3(dt) + y = correlate(a, b, "same") + assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2]) + assert_equal(y.dtype, dt) + + def test_rank3_all(self, dt): + a, b, y_r = self._setup_rank3(dt) + y = correlate(a, b) + assert_array_almost_equal(y, y_r) + assert_equal(y.dtype, dt) + + +class TestCorrelate: + # Tests that don't depend on dtype + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'}) + + def test_invalid_params(self): + a = [3, 4, 5] + b = [1, 2, 3] + assert_raises(ValueError, correlate, a, b, mode='spam') + assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft') + assert_raises(ValueError, correlate, a, b, mode='ham', method='direct') + assert_raises(ValueError, correlate, a, b, mode='full', method='bacon') + assert_raises(ValueError, correlate, a, b, mode='same', method='bacon') + + def test_mismatched_dims(self): + # Input arrays should have the same number of dimensions + assert_raises(ValueError, correlate, [1], 2, method='direct') + assert_raises(ValueError, correlate, 1, [2], method='direct') + assert_raises(ValueError, correlate, [1], 2, method='fft') + assert_raises(ValueError, correlate, 1, [2], method='fft') + assert_raises(ValueError, correlate, [1], [[2]]) + assert_raises(ValueError, correlate, [3], 2) + + def test_numpy_fastpath(self): + a = [1, 2, 3] + b = [4, 5] + assert_allclose(correlate(a, b, mode='same'), [5, 14, 23]) + + a = [1, 2, 3] + b = [4, 5, 6] + assert_allclose(correlate(a, b, mode='same'), [17, 32, 23]) + assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12]) + assert_allclose(correlate(a, b, mode='valid'), [32]) + + @pytest.mark.thread_unsafe + def test_dtype_deprecation(self): + # gh-21211 + a = np.asarray([1, 2, 3, 6, 5, 3], dtype=object) + b = np.asarray([2, 3, 4, 5, 3, 4, 2, 2, 1], dtype=object) + with pytest.deprecated_call(match="dtype=object is not supported"): + correlate(a, b) + + +@pytest.mark.parametrize("mode", ["valid", "same", "full"]) +@pytest.mark.parametrize("behind", [True, False]) +@pytest.mark.parametrize("input_size", [100, 101, 1000, 1001, 10000, 10001]) +def test_correlation_lags(mode, behind, input_size): + # generate random data + rng = np.random.RandomState(0) + in1 = rng.standard_normal(input_size) + offset = int(input_size/10) + # generate offset version of array to correlate with + if behind: + # y is behind x + in2 = np.concatenate([rng.standard_normal(offset), in1]) + expected = -offset + else: + # y is ahead of x + in2 = in1[offset:] + expected = offset + # cross correlate, returning lag information + correlation = correlate(in1, in2, mode=mode) + lags = correlation_lags(in1.size, in2.size, mode=mode) + # identify the peak + lag_index = np.argmax(correlation) + # Check as expected + assert_equal(lags[lag_index], expected) + # Correlation and lags shape should match + assert_equal(lags.shape, correlation.shape) + + +def test_correlation_lags_invalid_mode(): + with pytest.raises(ValueError, match="Mode asdfgh is invalid"): + correlation_lags(100, 100, mode="asdfgh") + + +@pytest.mark.parametrize('dt', [np.csingle, np.cdouble, + pytest.param(np.clongdouble, marks=_pmf)]) +class TestCorrelateComplex: + # The decimal precision to be used for comparing results. + # This value will be passed as the 'decimal' keyword argument of + # assert_array_almost_equal(). + # Since correlate may chose to use FFT method which converts + # longdoubles to doubles internally don't expect better precision + # for longdouble than for double (see gh-9520). + + def decimal(self, dt): + if dt == np.clongdouble: + dt = np.cdouble + return int(2 * np.finfo(dt).precision / 3) + + def _setup_rank1(self, dt, mode): + np.random.seed(9) + a = np.random.randn(10).astype(dt) + a += 1j * np.random.randn(10).astype(dt) + b = np.random.randn(8).astype(dt) + b += 1j * np.random.randn(8).astype(dt) + + y_r = (correlate(a.real, b.real, mode=mode) + + correlate(a.imag, b.imag, mode=mode)).astype(dt) + y_r += 1j * (-correlate(a.real, b.imag, mode=mode) + + correlate(a.imag, b.real, mode=mode)) + return a, b, y_r + + def test_rank1_valid(self, dt): + a, b, y_r = self._setup_rank1(dt, 'valid') + y = correlate(a, b, 'valid') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + # See gh-5897 + y = correlate(b, a, 'valid') + assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + def test_rank1_same(self, dt): + a, b, y_r = self._setup_rank1(dt, 'same') + y = correlate(a, b, 'same') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + def test_rank1_full(self, dt): + a, b, y_r = self._setup_rank1(dt, 'full') + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + def test_swap_full(self, dt): + d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt) + k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt) + y = correlate(d, k) + assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j]) + + def test_swap_same(self, dt): + d = [0.+0.j, 1.+1.j, 2.+2.j] + k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j] + y = correlate(d, k, mode="same") + assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j]) + + def test_rank3(self, dt): + a = np.random.randn(10, 8, 6).astype(dt) + a += 1j * np.random.randn(10, 8, 6).astype(dt) + b = np.random.randn(8, 6, 4).astype(dt) + b += 1j * np.random.randn(8, 6, 4).astype(dt) + + y_r = (correlate(a.real, b.real) + + correlate(a.imag, b.imag)).astype(dt) + y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) + + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) + assert_equal(y.dtype, dt) + + def test_rank0(self, dt): + a = np.array(np.random.randn()).astype(dt) + a += 1j * np.array(np.random.randn()).astype(dt) + b = np.array(np.random.randn()).astype(dt) + b += 1j * np.array(np.random.randn()).astype(dt) + + y_r = (correlate(a.real, b.real) + + correlate(a.imag, b.imag)).astype(dt) + y_r += 1j * np.array(-correlate(a.real, b.imag) + + correlate(a.imag, b.real)) + + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) + assert_equal(y.dtype, dt) + + assert_equal(correlate([1], [2j]), correlate(1, 2j)) + assert_equal(correlate([2j], [3j]), correlate(2j, 3j)) + assert_equal(correlate([3j], [4]), correlate(3j, 4)) + + +class TestCorrelate2d: + + def test_consistency_correlate_funcs(self): + # Compare np.correlate, signal.correlate, signal.correlate2d + a = np.arange(5) + b = np.array([3.2, 1.4, 3]) + for mode in ['full', 'valid', 'same']: + assert_almost_equal(np.correlate(a, b, mode=mode), + signal.correlate(a, b, mode=mode)) + assert_almost_equal(np.squeeze(signal.correlate2d([a], [b], + mode=mode)), + signal.correlate(a, b, mode=mode)) + + # See gh-5897 + if mode == 'valid': + assert_almost_equal(np.correlate(b, a, mode=mode), + signal.correlate(b, a, mode=mode)) + assert_almost_equal(np.squeeze(signal.correlate2d([b], [a], + mode=mode)), + signal.correlate(b, a, mode=mode)) + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'}) + + def test_complex_input(self): + assert_equal(signal.correlate2d([[1]], [[2j]]), -2j) + assert_equal(signal.correlate2d([[2j]], [[3j]]), 6) + assert_equal(signal.correlate2d([[3j]], [[4]]), 12j) + + +class TestLFilterZI: + + def test_basic(self): + a = np.array([1.0, -1.0, 0.5]) + b = np.array([1.0, 0.0, 2.0]) + zi_expected = np.array([5.0, -1.0]) + zi = lfilter_zi(b, a) + assert_array_almost_equal(zi, zi_expected) + + def test_scale_invariance(self): + # Regression test. There was a bug in which b was not correctly + # rescaled when a[0] was nonzero. + b = np.array([2, 8, 5]) + a = np.array([1, 1, 8]) + zi1 = lfilter_zi(b, a) + zi2 = lfilter_zi(2*b, 2*a) + assert_allclose(zi2, zi1, rtol=1e-12) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_types(self, dtype): + b = np.zeros((8), dtype=dtype) + a = np.array([1], dtype=dtype) + assert_equal(np.real(signal.lfilter_zi(b, a)).dtype, dtype) + + +class TestFiltFilt: + filtfilt_kind = 'tf' + + def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None, + method='pad', irlen=None): + if self.filtfilt_kind == 'tf': + b, a = zpk2tf(*zpk) + return filtfilt(b, a, x, axis, padtype, padlen, method, irlen) + elif self.filtfilt_kind == 'sos': + sos = zpk2sos(*zpk) + return sosfiltfilt(sos, x, axis, padtype, padlen) + + def test_basic(self): + zpk = tf2zpk([1, 2, 3], [1, 2, 3]) + out = self.filtfilt(zpk, np.arange(12)) + assert_allclose(out, arange(12), atol=5.28e-11) + + def test_sine(self): + rate = 2000 + t = np.linspace(0, 1.0, rate + 1) + # A signal with low frequency and a high frequency. + xlow = np.sin(5 * 2 * np.pi * t) + xhigh = np.sin(250 * 2 * np.pi * t) + x = xlow + xhigh + + zpk = butter(8, 0.125, output='zpk') + # r is the magnitude of the largest pole. + r = np.abs(zpk[1]).max() + eps = 1e-5 + # n estimates the number of steps for the + # transient to decay by a factor of eps. + n = int(np.ceil(np.log(eps) / np.log(r))) + + # High order lowpass filter... + y = self.filtfilt(zpk, x, padlen=n) + # Result should be just xlow. + err = np.abs(y - xlow).max() + assert_(err < 1e-4) + + # A 2D case. + x2d = np.vstack([xlow, xlow + xhigh]) + y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1) + assert_equal(y2d.shape, x2d.shape) + err = np.abs(y2d - xlow).max() + assert_(err < 1e-4) + + # Use the previous result to check the use of the axis keyword. + # (Regression test for ticket #1620) + y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0) + assert_equal(y2d, y2dt.T) + + def test_axis(self): + # Test the 'axis' keyword on a 3D array. + x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12) + zpk = butter(3, 0.125, output='zpk') + y0 = self.filtfilt(zpk, x, padlen=0, axis=0) + y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1) + assert_array_equal(y0, np.swapaxes(y1, 0, 1)) + y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2) + assert_array_equal(y0, np.swapaxes(y2, 0, 2)) + + def test_acoeff(self): + if self.filtfilt_kind != 'tf': + return # only necessary for TF + # test for 'a' coefficient as single number + out = signal.filtfilt([.5, .5], 1, np.arange(10)) + assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14) + + def test_gust_simple(self): + if self.filtfilt_kind != 'tf': + pytest.skip('gust only implemented for TF systems') + # The input array has length 2. The exact solution for this case + # was computed "by hand". + x = np.array([1.0, 2.0]) + b = np.array([0.5]) + a = np.array([1.0, -0.5]) + y, z1, z2 = _filtfilt_gust(b, a, x) + assert_allclose([z1[0], z2[0]], + [0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]]) + assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1], + 0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]]) + + def test_gust_scalars(self): + if self.filtfilt_kind != 'tf': + pytest.skip('gust only implemented for TF systems') + # The filter coefficients are both scalars, so the filter simply + # multiplies its input by b/a. When it is used in filtfilt, the + # factor is (b/a)**2. + x = np.arange(12) + b = 3.0 + a = 2.0 + y = filtfilt(b, a, x, method="gust") + expected = (b/a)**2 * x + assert_allclose(y, expected) + + +class TestSOSFiltFilt(TestFiltFilt): + filtfilt_kind = 'sos' + + def test_equivalence(self): + """Test equivalence between sosfiltfilt and filtfilt""" + x = np.random.RandomState(0).randn(1000) + for order in range(1, 6): + zpk = signal.butter(order, 0.35, output='zpk') + b, a = zpk2tf(*zpk) + sos = zpk2sos(*zpk) + y = filtfilt(b, a, x) + y_sos = sosfiltfilt(sos, x) + assert_allclose(y, y_sos, atol=1e-12, err_msg=f'order={order}') + + +def filtfilt_gust_opt(b, a, x): + """ + An alternative implementation of filtfilt with Gustafsson edges. + + This function computes the same result as + `scipy.signal._signaltools._filtfilt_gust`, but only 1-d arrays + are accepted. The problem is solved using `fmin` from `scipy.optimize`. + `_filtfilt_gust` is significantly faster than this implementation. + """ + def filtfilt_gust_opt_func(ics, b, a, x): + """Objective function used in filtfilt_gust_opt.""" + m = max(len(a), len(b)) - 1 + z0f = ics[:m] + z0b = ics[m:] + y_f = lfilter(b, a, x, zi=z0f)[0] + y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1] + + y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] + y_bf = lfilter(b, a, y_b, zi=z0f)[0] + value = np.sum((y_fb - y_bf)**2) + return value + + m = max(len(a), len(b)) - 1 + zi = lfilter_zi(b, a) + ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi)) + result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x), + xtol=1e-10, ftol=1e-12, + maxfun=10000, maxiter=10000, + full_output=True, disp=False) + opt, fopt, niter, funcalls, warnflag = result + if warnflag > 0: + raise RuntimeError("minimization failed in filtfilt_gust_opt: " + "warnflag=%d" % warnflag) + z0f = opt[:m] + z0b = opt[m:] + + # Apply the forward-backward filter using the computed initial + # conditions. + y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] + y = lfilter(b, a, y_b, zi=z0f)[0] + + return y, z0f, z0b + + +def check_filtfilt_gust(b, a, shape, axis, irlen=None): + # Generate x, the data to be filtered. + np.random.seed(123) + x = np.random.randn(*shape) + + # Apply filtfilt to x. This is the main calculation to be checked. + y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen) + + # Also call the private function so we can test the ICs. + yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) + + # filtfilt_gust_opt is an independent implementation that gives the + # expected result, but it only handles 1-D arrays, so use some looping + # and reshaping shenanigans to create the expected output arrays. + xx = np.swapaxes(x, axis, -1) + out_shape = xx.shape[:-1] + yo = np.empty_like(xx) + m = max(len(a), len(b)) - 1 + zo1 = np.empty(out_shape + (m,)) + zo2 = np.empty(out_shape + (m,)) + for indx in product(*[range(d) for d in out_shape]): + yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx]) + yo = np.swapaxes(yo, -1, axis) + zo1 = np.swapaxes(zo1, -1, axis) + zo2 = np.swapaxes(zo2, -1, axis) + + assert_allclose(y, yo, rtol=1e-8, atol=1e-9) + assert_allclose(yg, yo, rtol=1e-8, atol=1e-9) + assert_allclose(zg1, zo1, rtol=1e-8, atol=1e-9) + assert_allclose(zg2, zo2, rtol=1e-8, atol=1e-9) + + +@pytest.mark.fail_slow(10) +def test_choose_conv_method(): + for mode in ['valid', 'same', 'full']: + for ndim in [1, 2]: + n, k, true_method = 8, 6, 'direct' + x = np.random.randn(*((n,) * ndim)) + h = np.random.randn(*((k,) * ndim)) + + method = choose_conv_method(x, h, mode=mode) + assert_equal(method, true_method) + + method_try, times = choose_conv_method(x, h, mode=mode, measure=True) + assert_(method_try in {'fft', 'direct'}) + assert_(isinstance(times, dict)) + assert_('fft' in times.keys() and 'direct' in times.keys()) + + x = np.array([2**51], dtype=np.int64) + h = x.copy() + assert_equal(choose_conv_method(x, h, mode=mode), 'direct') + + +@pytest.mark.thread_unsafe +def test_choose_conv_dtype_deprecation(): + # gh-21211 + a = np.asarray([1, 2, 3, 6, 5, 3], dtype=object) + b = np.asarray([2, 3, 4, 5, 3, 4, 2, 2, 1], dtype=object) + with pytest.deprecated_call(match="dtype=object is not supported"): + choose_conv_method(a, b) + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +def test_choose_conv_method_2(): + for mode in ['valid', 'same', 'full']: + x = [Decimal(3), Decimal(2)] + h = [Decimal(1), Decimal(4)] + assert_equal(choose_conv_method(x, h, mode=mode), 'direct') + + n = 10 + for not_fft_conv_supp in ["complex256", "complex192"]: + if hasattr(np, not_fft_conv_supp): + x = np.ones(n, dtype=not_fft_conv_supp) + h = x.copy() + assert_equal(choose_conv_method(x, h, mode=mode), 'direct') + + +@pytest.mark.fail_slow(10) +def test_filtfilt_gust(): + # Design a filter. + z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk') + + # Find the approximate impulse response length of the filter. + eps = 1e-10 + r = np.max(np.abs(p)) + approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) + + np.random.seed(123) + + b, a = zpk2tf(z, p, k) + for irlen in [None, approx_impulse_len]: + signal_len = 5 * approx_impulse_len + + # 1-d test case + check_filtfilt_gust(b, a, (signal_len,), 0, irlen) + + # 3-d test case; test each axis. + for axis in range(3): + shape = [2, 2, 2] + shape[axis] = signal_len + check_filtfilt_gust(b, a, shape, axis, irlen) + + # Test case with length less than 2*approx_impulse_len. + # In this case, `filtfilt_gust` should behave the same as if + # `irlen=None` was given. + length = 2*approx_impulse_len - 50 + check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len) + + +class TestDecimate: + def test_bad_args(self): + x = np.arange(12) + assert_raises(TypeError, signal.decimate, x, q=0.5, n=1) + assert_raises(TypeError, signal.decimate, x, q=2, n=0.5) + + def test_basic_IIR(self): + x = np.arange(12) + y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round() + assert_array_equal(y, x[::2]) + + def test_basic_FIR(self): + x = np.arange(12) + y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round() + assert_array_equal(y, x[::2]) + + def test_shape(self): + # Regression test for ticket #1480. + z = np.zeros((30, 30)) + d0 = signal.decimate(z, 2, axis=0, zero_phase=False) + assert_equal(d0.shape, (15, 30)) + d1 = signal.decimate(z, 2, axis=1, zero_phase=False) + assert_equal(d1.shape, (30, 15)) + + def test_phaseshift_FIR(self): + with suppress_warnings() as sup: + sup.filter(BadCoefficients, "Badly conditioned filter") + self._test_phaseshift(method='fir', zero_phase=False) + + def test_zero_phase_FIR(self): + with suppress_warnings() as sup: + sup.filter(BadCoefficients, "Badly conditioned filter") + self._test_phaseshift(method='fir', zero_phase=True) + + def test_phaseshift_IIR(self): + self._test_phaseshift(method='iir', zero_phase=False) + + def test_zero_phase_IIR(self): + self._test_phaseshift(method='iir', zero_phase=True) + + def _test_phaseshift(self, method, zero_phase): + rate = 120 + rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3 + + t_tot = 100 # Need to let antialiasing filters settle + t = np.arange(rate*t_tot+1) / float(rate) + + # Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts + freqs = np.array(rates_to) * 0.8 / 2 + d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t) + * signal.windows.tukey(t.size, 0.1)) + + for rate_to in rates_to: + q = rate // rate_to + t_to = np.arange(rate_to*t_tot+1) / float(rate_to) + d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to) + * signal.windows.tukey(t_to.size, 0.1)) + + # Set up downsampling filters, match v0.17 defaults + if method == 'fir': + n = 30 + system = signal.dlti(signal.firwin(n + 1, 1. / q, + window='hamming'), 1.) + elif method == 'iir': + n = 8 + wc = 0.8*np.pi/q + system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi)) + + # Calculate expected phase response, as unit complex vector + if zero_phase is False: + _, h_resps = signal.freqz(system.num, system.den, + freqs/rate*2*np.pi) + h_resps /= np.abs(h_resps) + else: + h_resps = np.ones_like(freqs) + + y_resamps = signal.decimate(d.real, q, n, ftype=system, + zero_phase=zero_phase) + + # Get phase from complex inner product, like CSD + h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1) + h_resamps /= np.abs(h_resamps) + subnyq = freqs < 0.5*rate_to + + # Complex vectors should be aligned, only compare below nyquist + assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0, + atol=1e-3, rtol=1e-3) + + def test_auto_n(self): + # Test that our value of n is a reasonable choice (depends on + # the downsampling factor) + sfreq = 100. + n = 1000 + t = np.arange(n) / sfreq + # will alias for decimations (>= 15) + x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t) + assert_allclose(np.linalg.norm(x), 1., rtol=1e-3) + x_out = signal.decimate(x, 30, ftype='fir') + assert_array_less(np.linalg.norm(x_out), 0.01) + + def test_long_float32(self): + # regression: gh-15072. With 32-bit float and either lfilter + # or filtfilt, this is numerically unstable + x = signal.decimate(np.ones(10_000, dtype=np.float32), 10) + assert not any(np.isnan(x)) + + def test_float16_upcast(self): + # float16 must be upcast to float64 + x = signal.decimate(np.ones(100, dtype=np.float16), 10) + assert x.dtype.type == np.float64 + + def test_complex_iir_dlti(self): + # regression: gh-17845 + # centre frequency for filter [Hz] + fcentre = 50 + # filter passband width [Hz] + fwidth = 5 + # sample rate [Hz] + fs = 1e3 + + z, p, k = signal.butter(2, 2*np.pi*fwidth/2, output='zpk', fs=fs) + z = z.astype(complex) * np.exp(2j * np.pi * fcentre/fs) + p = p.astype(complex) * np.exp(2j * np.pi * fcentre/fs) + system = signal.dlti(z, p, k) + + t = np.arange(200) / fs + + # input + u = (np.exp(2j * np.pi * fcentre * t) + + 0.5 * np.exp(-2j * np.pi * fcentre * t)) + + ynzp = signal.decimate(u, 2, ftype=system, zero_phase=False) + ynzpref = signal.lfilter(*signal.zpk2tf(z, p, k), + u)[::2] + + assert_equal(ynzp, ynzpref) + + yzp = signal.decimate(u, 2, ftype=system, zero_phase=True) + yzpref = signal.filtfilt(*signal.zpk2tf(z, p, k), + u)[::2] + + assert_allclose(yzp, yzpref, rtol=1e-10, atol=1e-13) + + def test_complex_fir_dlti(self): + # centre frequency for filter [Hz] + fcentre = 50 + # filter passband width [Hz] + fwidth = 5 + # sample rate [Hz] + fs = 1e3 + numtaps = 20 + + # FIR filter about 0Hz + bbase = signal.firwin(numtaps, fwidth/2, fs=fs) + + # rotate these to desired frequency + zbase = np.roots(bbase) + zrot = zbase * np.exp(2j * np.pi * fcentre/fs) + # FIR filter about 50Hz, maintaining passband gain of 0dB + bz = bbase[0] * np.poly(zrot) + + system = signal.dlti(bz, 1) + + t = np.arange(200) / fs + + # input + u = (np.exp(2j * np.pi * fcentre * t) + + 0.5 * np.exp(-2j * np.pi * fcentre * t)) + + ynzp = signal.decimate(u, 2, ftype=system, zero_phase=False) + ynzpref = signal.upfirdn(bz, u, up=1, down=2)[:100] + + assert_equal(ynzp, ynzpref) + + yzp = signal.decimate(u, 2, ftype=system, zero_phase=True) + yzpref = signal.resample_poly(u, 1, 2, window=bz) + + assert_equal(yzp, yzpref) + + +class TestHilbert: + + def test_bad_args(self): + x = np.array([1.0 + 0.0j]) + assert_raises(ValueError, hilbert, x) + x = np.arange(8.0) + assert_raises(ValueError, hilbert, x, N=0) + + def test_hilbert_theoretical(self): + # test cases by Ariel Rokem + decimal = 14 + + pi = np.pi + t = np.arange(0, 2 * pi, pi / 256) + a0 = np.sin(t) + a1 = np.cos(t) + a2 = np.sin(2 * t) + a3 = np.cos(2 * t) + a = np.vstack([a0, a1, a2, a3]) + + h = hilbert(a) + h_abs = np.abs(h) + h_angle = np.angle(h) + h_real = np.real(h) + + # The real part should be equal to the original signals: + assert_almost_equal(h_real, a, decimal) + # The absolute value should be one everywhere, for this input: + assert_almost_equal(h_abs, np.ones(a.shape), decimal) + # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in + # the first 256 bins: + assert_almost_equal(h_angle[0, :256], + np.arange(-pi / 2, pi / 2, pi / 256), + decimal) + # For the 'slow' cosine - the phase should go from 0 to pi in the + # same interval: + assert_almost_equal( + h_angle[1, :256], np.arange(0, pi, pi / 256), decimal) + # The 'fast' sine should make this phase transition in half the time: + assert_almost_equal(h_angle[2, :128], + np.arange(-pi / 2, pi / 2, pi / 128), + decimal) + # Ditto for the 'fast' cosine: + assert_almost_equal( + h_angle[3, :128], np.arange(0, pi, pi / 128), decimal) + + # The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia + assert_almost_equal(h[1].imag, a0, decimal) + + def test_hilbert_axisN(self): + # tests for axis and N arguments + a = np.arange(18).reshape(3, 6) + # test axis + aa = hilbert(a, axis=-1) + assert_equal(hilbert(a.T, axis=0), aa.T) + # test 1d + assert_almost_equal(hilbert(a[0]), aa[0], 14) + + # test N + aan = hilbert(a, N=20, axis=-1) + assert_equal(aan.shape, [3, 20]) + assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3]) + # the next test is just a regression test, + # no idea whether numbers make sense + a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j, + 1.000000000000000e+00 - 2.047794505137069j, + 1.999999999999999e+00 - 2.244055555687583j, + 3.000000000000000e+00 - 1.262750302935009j, + 4.000000000000000e+00 - 1.066489252384493j, + 5.000000000000000e+00 + 2.918022706971047j, + 8.881784197001253e-17 + 3.845658908989067j, + -9.444121133484362e-17 + 0.985044202202061j, + -1.776356839400251e-16 + 1.332257797702019j, + -3.996802888650564e-16 + 0.501905089898885j, + 1.332267629550188e-16 + 0.668696078880782j, + -1.192678053963799e-16 + 0.235487067862679j, + -1.776356839400251e-16 + 0.286439612812121j, + 3.108624468950438e-16 + 0.031676888064907j, + 1.332267629550188e-16 - 0.019275656884536j, + -2.360035624836702e-16 - 0.1652588660287j, + 0.000000000000000e+00 - 0.332049855010597j, + 3.552713678800501e-16 - 0.403810179797771j, + 8.881784197001253e-17 - 0.751023775297729j, + 9.444121133484362e-17 - 0.79252210110103j]) + assert_almost_equal(aan[0], a0hilb, 14, 'N regression') + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_hilbert_types(self, dtype): + in_typed = np.zeros(8, dtype=dtype) + assert_equal(np.real(signal.hilbert(in_typed)).dtype, dtype) + + +class TestHilbert2: + + def test_bad_args(self): + # x must be real. + x = np.array([[1.0 + 0.0j]]) + assert_raises(ValueError, hilbert2, x) + + # x must be rank 2. + x = np.arange(24).reshape(2, 3, 4) + assert_raises(ValueError, hilbert2, x) + + # Bad value for N. + x = np.arange(16).reshape(4, 4) + assert_raises(ValueError, hilbert2, x, N=0) + assert_raises(ValueError, hilbert2, x, N=(2, 0)) + assert_raises(ValueError, hilbert2, x, N=(2,)) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_hilbert2_types(self, dtype): + in_typed = np.zeros((2, 32), dtype=dtype) + assert_equal(np.real(signal.hilbert2(in_typed)).dtype, dtype) + + +class TestEnvelope: + """Unit tests for function `._signaltools.envelope()`. """ + + @staticmethod + def assert_close(actual, desired, msg): + """Little helper to compare to arrays with proper tolerances""" + xp_assert_close(actual, desired, atol=1e-12, rtol=1e-12, err_msg=msg) + + def test_envelope_invalid_parameters(self): + """For `envelope()` Raise all exceptions that are used to verify function + parameters. """ + with pytest.raises(ValueError, + match=r"Invalid parameter axis=2 for z.shape=.*"): + envelope(np.ones(3), axis=2) + with pytest.raises(ValueError, + match=r"z.shape\[axis\] not > 0 for z.shape=.*"): + envelope(np.ones((3, 0)), axis=1) + for bp_in in [(0, 1, 2), (0, 2.), (None, 2.)]: + ts = ', '.join(map(str, bp_in)) + with pytest.raises(ValueError, + match=rf"bp_in=\({ts}\) isn't a 2-tuple of.*"): + # noinspection PyTypeChecker + envelope(np.ones(4), bp_in=bp_in) + with pytest.raises(ValueError, + match="n_out=10.0 is not a positive integer or.*"): + # noinspection PyTypeChecker + envelope(np.ones(4), n_out=10.) + for bp_in in [(-1, 3), (1, 1), (0, 10)]: + with pytest.raises(ValueError, + match=r"`-n//2 <= bp_in\[0\] < bp_in\[1\] <=.*"): + envelope(np.ones(4), bp_in=bp_in) + with pytest.raises(ValueError, match="residual='undefined' not in .*"): + # noinspection PyTypeChecker + envelope(np.ones(4), residual='undefined') + + def test_envelope_verify_parameters(self): + """Ensure that the various parametrizations produce compatible results. """ + Z, Zr_a = [4, 2, 2, 3, 0], [4, 0, 0, 6, 0, 0, 0, 0] + z = sp_fft.irfft(Z) + n = len(z) + + # the reference envelope: + ze2_0, zr_0 = envelope(z, (1, 3), residual='all', squared=True) + self.assert_close(sp_fft.rfft(ze2_0), np.array([4, 2, 0, 0, 0]).astype(complex), + msg="Envelope calculation error") + self.assert_close(sp_fft.rfft(zr_0), np.array([4, 0, 0, 3, 0]).astype(complex), + msg="Residual calculation error") + + ze_1, zr_1 = envelope(z, (1, 3), residual='all', squared=False) + self.assert_close(ze_1**2, ze2_0, + msg="Unsquared versus Squared envelope calculation error") + self.assert_close(zr_1, zr_0, + msg="Unsquared versus Squared residual calculation error") + + ze2_2, zr_2 = envelope(z, (1, 3), residual='all', squared=True, n_out=3*n) + self.assert_close(ze2_2[::3], ze2_0, + msg="3x up-sampled envelope calculation error") + self.assert_close(zr_2[::3], zr_0, + msg="3x up-sampled residual calculation error") + + ze2_3, zr_3 = envelope(z, (1, 3), residual='lowpass', squared=True) + self.assert_close(ze2_3, ze2_0, + msg="`residual='lowpass'` envelope calculation error") + self.assert_close(sp_fft.rfft(zr_3), np.array([4, 0, 0, 0, 0]).astype(complex), + msg="`residual='lowpass'` residual calculation error") + + ze2_4 = envelope(z, (1, 3), residual=None, squared=True) + self.assert_close(ze2_4, ze2_0, + msg="`residual=None` envelope calculation error") + + # compare complex analytic signal to real version + Z_a = np.copy(Z) + Z_a[1:] *= 2 + z_a = sp_fft.ifft(Z_a, n=n) # analytic signal of Z + self.assert_close(z_a.real, z, + msg="Reference analytic signal error") + ze2_a, zr_a = envelope(z_a, (1, 3), residual='all', squared=True) + self.assert_close(ze2_a, ze2_0.astype(complex), # dtypes must match + msg="Complex envelope calculation error") + self.assert_close(sp_fft.fft(zr_a), np.array(Zr_a).astype(complex), + msg="Complex residual calculation error") + + @pytest.mark.parametrize( + " Z, bp_in, Ze2_desired, Zr_desired", + [([1, 0, 2, 2, 0], (1, None), [4, 2, 0, 0, 0], [1, 0, 0, 0, 0]), + ([4, 0, 2, 0, 0], (0, None), [4, 0, 2, 0, 0], [0, 0, 0, 0, 0]), + ([4, 0, 0, 2, 0], (None, None), [4, 0, 0, 2, 0], [0, 0, 0, 0, 0]), + ([0, 0, 2, 2, 0], (1, 3), [2, 0, 0, 0, 0], [0, 0, 0, 2, 0]), + ([4, 0, 2, 2, 0], (-3, 3), [4, 0, 2, 0, 0], [0, 0, 0, 2, 0]), + ([4, 0, 3, 4, 0], (None, 1), [2, 0, 0, 0, 0], [0, 0, 3, 4, 0]), + ([4, 0, 3, 4, 0], (None, 0), [0, 0, 0, 0, 0], [4, 0, 3, 4, 0])]) + def test_envelope_real_signals(self, Z, bp_in, Ze2_desired, Zr_desired): + """Test envelope calculation with real-valued test signals. + + The comparisons are performed in the Fourier space, since it makes evaluating + the bandpass filter behavior straightforward. Note that also the squared + envelope can be easily calculated by hand, if one recalls that coefficients of + a complex-valued Fourier series representing the signal can be directly + determined by an FFT and that the absolute square of a Fourier series is again + a Fourier series. + """ + z = sp_fft.irfft(Z) + ze2, zr = envelope(z, bp_in, residual='all', squared=True) + ze2_lp, zr_lp = envelope(z, bp_in, residual='lowpass', squared=True) + Ze2, Zr, Ze2_lp, Zr_lp = (sp_fft.rfft(z_) for z_ in (ze2, zr, ze2_lp, zr_lp)) + + Ze2_desired = np.array(Ze2_desired).astype(complex) + Zr_desired = np.array(Zr_desired).astype(complex) + self.assert_close(Ze2, Ze2_desired, + msg="Envelope calculation error (residual='all')") + self.assert_close(Zr, Zr_desired, + msg="Residual calculation error (residual='all')") + + if bp_in[1] is not None: + Zr_desired[bp_in[1]:] = 0 + self.assert_close(Ze2_lp, Ze2_desired, + msg="Envelope calculation error (residual='lowpass')") + self.assert_close(Zr_lp, Zr_desired, + msg="Residual calculation error (residual='lowpass')") + + @pytest.mark.parametrize( + " Z, bp_in, Ze2_desired, Zr_desired", + [([0, 5, 0, 5, 0], (None, None), [5, 0, 10, 0, 5], [0, 0, 0, 0, 0]), + ([1, 5, 0, 5, 2], (-1, 2), [5, 0, 10, 0, 5], [1, 0, 0, 0, 2]), + ([1, 2, 6, 0, 6, 3], (-1, 2), [0, 6, 0, 12, 0, 6], [1, 2, 0, 0, 0, 3]) + ]) + def test_envelope_complex_signals(self, Z, bp_in, Ze2_desired, Zr_desired): + """Test envelope calculation with complex-valued test signals. + + We only need to test for the complex envelope here, since the ``Nones``s in the + bandpass filter were already tested in the previous test. + """ + z = sp_fft.ifft(sp_fft.ifftshift(Z)) + ze2, zr = envelope(z, bp_in, residual='all', squared=True) + Ze2, Zr = (sp_fft.fftshift(sp_fft.fft(z_)) for z_ in (ze2, zr)) + + self.assert_close(Ze2, np.array(Ze2_desired).astype(complex), + msg="Envelope calculation error") + self.assert_close(Zr, np.array(Zr_desired).astype(complex), + msg="Residual calculation error") + + def test_envelope_verify_axis_parameter(self): + """Test for multi-channel envelope calculations. """ + z = sp_fft.irfft([[1, 0, 2, 2, 0], [7, 0, 4, 4, 0]]) + Ze2_desired = np.array([[4, 2, 0, 0, 0], [16, 8, 0, 0, 0]], + dtype=complex) + Zr_desired = np.array([[1, 0, 0, 0, 0], [7, 0, 0, 0, 0]], dtype=complex) + + ze2, zr = envelope(z, squared=True, axis=1) + ye2T, yrT = envelope(z.T, squared=True, axis=0) + Ze2, Ye2, Zr, Yr = (sp_fft.rfft(z_) for z_ in (ze2, ye2T.T, zr, yrT.T)) + + self.assert_close(Ze2, Ze2_desired, msg="2d envelope calculation error") + self.assert_close(Zr, Zr_desired, msg="2d residual calculation error") + self.assert_close(Ye2, Ze2_desired, msg="Transposed 2d envelope calc. error") + self.assert_close(Yr, Zr_desired, msg="Transposed 2d residual calc. error") + + def test_envelope_verify_axis_parameter_complex(self): + """Test for multi-channel envelope calculations with complex values. """ + z = sp_fft.ifft(sp_fft.ifftshift([[1, 5, 0, 5, 2], [1, 10, 0, 10, 2]], axes=1)) + Ze2_des = np.array([[5, 0, 10, 0, 5], [20, 0, 40, 0, 20],], + dtype=complex) + Zr_des = np.array([[1, 0, 0, 0, 2], [1, 0, 0, 0, 2]], dtype=complex) + + kw = dict(bp_in=(-1, 2), residual='all', squared=True) + ze2, zr = envelope(z, axis=1, **kw) + ye2T, yrT = envelope(z.T, axis=0, **kw) + Ze2, Ye2, Zr, Yr = (sp_fft.fftshift(sp_fft.fft(z_), axes=1) + for z_ in (ze2, ye2T.T, zr, yrT.T)) + + self.assert_close(Ze2, Ze2_des, msg="2d envelope calculation error") + self.assert_close(Zr, Zr_des, msg="2d residual calculation error") + self.assert_close(Ye2, Ze2_des, msg="Transposed 2d envelope calc. error") + self.assert_close(Yr, Zr_des, msg="Transposed 2d residual calc. error") + + @pytest.mark.parametrize('X', [[4, 0, 0, 1, 2], [4, 0, 0, 2, 1, 2]]) + def test_compare_envelope_hilbert(self, X): + """Compare output of `envelope()` and `hilbert()`. """ + x = sp_fft.irfft(X) + e_hil = np.abs(hilbert(x)) + e_env = envelope(x, (None, None), residual=None) + self.assert_close(e_hil, e_env, msg="Hilbert-Envelope comparison error") + + +class TestPartialFractionExpansion: + @staticmethod + def assert_rp_almost_equal(r, p, r_true, p_true, decimal=7): + r_true = np.asarray(r_true) + p_true = np.asarray(p_true) + + distance = np.hypot(abs(p[:, None] - p_true), + abs(r[:, None] - r_true)) + + rows, cols = linear_sum_assignment(distance) + assert_almost_equal(p[rows], p_true[cols], decimal=decimal) + assert_almost_equal(r[rows], r_true[cols], decimal=decimal) + + def test_compute_factors(self): + factors, poly = _compute_factors([1, 2, 3], [3, 2, 1]) + assert_equal(len(factors), 3) + assert_almost_equal(factors[0], np.poly([2, 2, 3])) + assert_almost_equal(factors[1], np.poly([1, 1, 1, 3])) + assert_almost_equal(factors[2], np.poly([1, 1, 1, 2, 2])) + assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3])) + + factors, poly = _compute_factors([1, 2, 3], [3, 2, 1], + include_powers=True) + assert_equal(len(factors), 6) + assert_almost_equal(factors[0], np.poly([1, 1, 2, 2, 3])) + assert_almost_equal(factors[1], np.poly([1, 2, 2, 3])) + assert_almost_equal(factors[2], np.poly([2, 2, 3])) + assert_almost_equal(factors[3], np.poly([1, 1, 1, 2, 3])) + assert_almost_equal(factors[4], np.poly([1, 1, 1, 3])) + assert_almost_equal(factors[5], np.poly([1, 1, 1, 2, 2])) + assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3])) + + def test_group_poles(self): + unique, multiplicity = _group_poles( + [1.0, 1.001, 1.003, 2.0, 2.003, 3.0], 0.1, 'min') + assert_equal(unique, [1.0, 2.0, 3.0]) + assert_equal(multiplicity, [3, 2, 1]) + + def test_residue_general(self): + # Test are taken from issue #4464, note that poles in scipy are + # in increasing by absolute value order, opposite to MATLAB. + r, p, k = residue([5, 3, -2, 7], [-4, 0, 8, 3]) + assert_almost_equal(r, [1.3320, -0.6653, -1.4167], decimal=4) + assert_almost_equal(p, [-0.4093, -1.1644, 1.5737], decimal=4) + assert_almost_equal(k, [-1.2500], decimal=4) + + r, p, k = residue([-4, 8], [1, 6, 8]) + assert_almost_equal(r, [8, -12]) + assert_almost_equal(p, [-2, -4]) + assert_equal(k.size, 0) + + r, p, k = residue([4, 1], [1, -1, -2]) + assert_almost_equal(r, [1, 3]) + assert_almost_equal(p, [-1, 2]) + assert_equal(k.size, 0) + + r, p, k = residue([4, 3], [2, -3.4, 1.98, -0.406]) + self.assert_rp_almost_equal( + r, p, [-18.125 - 13.125j, -18.125 + 13.125j, 36.25], + [0.5 - 0.2j, 0.5 + 0.2j, 0.7]) + assert_equal(k.size, 0) + + r, p, k = residue([2, 1], [1, 5, 8, 4]) + self.assert_rp_almost_equal(r, p, [-1, 1, 3], [-1, -2, -2]) + assert_equal(k.size, 0) + + r, p, k = residue([3, -1.1, 0.88, -2.396, 1.348], + [1, -0.7, -0.14, 0.048]) + assert_almost_equal(r, [-3, 4, 1]) + assert_almost_equal(p, [0.2, -0.3, 0.8]) + assert_almost_equal(k, [3, 1]) + + r, p, k = residue([1], [1, 2, -3]) + assert_almost_equal(r, [0.25, -0.25]) + assert_almost_equal(p, [1, -3]) + assert_equal(k.size, 0) + + r, p, k = residue([1, 0, -5], [1, 0, 0, 0, -1]) + self.assert_rp_almost_equal(r, p, + [1, 1.5j, -1.5j, -1], [-1, -1j, 1j, 1]) + assert_equal(k.size, 0) + + r, p, k = residue([3, 8, 6], [1, 3, 3, 1]) + self.assert_rp_almost_equal(r, p, [1, 2, 3], [-1, -1, -1]) + assert_equal(k.size, 0) + + r, p, k = residue([3, -1], [1, -3, 2]) + assert_almost_equal(r, [-2, 5]) + assert_almost_equal(p, [1, 2]) + assert_equal(k.size, 0) + + r, p, k = residue([2, 3, -1], [1, -3, 2]) + assert_almost_equal(r, [-4, 13]) + assert_almost_equal(p, [1, 2]) + assert_almost_equal(k, [2]) + + r, p, k = residue([7, 2, 3, -1], [1, -3, 2]) + assert_almost_equal(r, [-11, 69]) + assert_almost_equal(p, [1, 2]) + assert_almost_equal(k, [7, 23]) + + r, p, k = residue([2, 3, -1], [1, -3, 4, -2]) + self.assert_rp_almost_equal(r, p, [4, -1 + 3.5j, -1 - 3.5j], + [1, 1 - 1j, 1 + 1j]) + assert_almost_equal(k.size, 0) + + def test_residue_leading_zeros(self): + # Leading zeros in numerator or denominator must not affect the answer. + r0, p0, k0 = residue([5, 3, -2, 7], [-4, 0, 8, 3]) + r1, p1, k1 = residue([0, 5, 3, -2, 7], [-4, 0, 8, 3]) + r2, p2, k2 = residue([5, 3, -2, 7], [0, -4, 0, 8, 3]) + r3, p3, k3 = residue([0, 0, 5, 3, -2, 7], [0, 0, 0, -4, 0, 8, 3]) + assert_almost_equal(r0, r1) + assert_almost_equal(r0, r2) + assert_almost_equal(r0, r3) + assert_almost_equal(p0, p1) + assert_almost_equal(p0, p2) + assert_almost_equal(p0, p3) + assert_almost_equal(k0, k1) + assert_almost_equal(k0, k2) + assert_almost_equal(k0, k3) + + def test_resiude_degenerate(self): + # Several tests for zero numerator and denominator. + r, p, k = residue([0, 0], [1, 6, 8]) + assert_almost_equal(r, [0, 0]) + assert_almost_equal(p, [-2, -4]) + assert_equal(k.size, 0) + + r, p, k = residue(0, 1) + assert_equal(r.size, 0) + assert_equal(p.size, 0) + assert_equal(k.size, 0) + + with pytest.raises(ValueError, match="Denominator `a` is zero."): + residue(1, 0) + + def test_residuez_general(self): + r, p, k = residuez([1, 6, 6, 2], [1, -(2 + 1j), (1 + 2j), -1j]) + self.assert_rp_almost_equal(r, p, [-2+2.5j, 7.5+7.5j, -4.5-12j], + [1j, 1, 1]) + assert_almost_equal(k, [2j]) + + r, p, k = residuez([1, 2, 1], [1, -1, 0.3561]) + self.assert_rp_almost_equal(r, p, + [-0.9041 - 5.9928j, -0.9041 + 5.9928j], + [0.5 + 0.3257j, 0.5 - 0.3257j], + decimal=4) + assert_almost_equal(k, [2.8082], decimal=4) + + r, p, k = residuez([1, -1], [1, -5, 6]) + assert_almost_equal(r, [-1, 2]) + assert_almost_equal(p, [2, 3]) + assert_equal(k.size, 0) + + r, p, k = residuez([2, 3, 4], [1, 3, 3, 1]) + self.assert_rp_almost_equal(r, p, [4, -5, 3], [-1, -1, -1]) + assert_equal(k.size, 0) + + r, p, k = residuez([1, -10, -4, 4], [2, -2, -4]) + assert_almost_equal(r, [0.5, -1.5]) + assert_almost_equal(p, [-1, 2]) + assert_almost_equal(k, [1.5, -1]) + + r, p, k = residuez([18], [18, 3, -4, -1]) + self.assert_rp_almost_equal(r, p, + [0.36, 0.24, 0.4], [0.5, -1/3, -1/3]) + assert_equal(k.size, 0) + + r, p, k = residuez([2, 3], np.polymul([1, -1/2], [1, 1/4])) + assert_almost_equal(r, [-10/3, 16/3]) + assert_almost_equal(p, [-0.25, 0.5]) + assert_equal(k.size, 0) + + r, p, k = residuez([1, -2, 1], [1, -1]) + assert_almost_equal(r, [0]) + assert_almost_equal(p, [1]) + assert_almost_equal(k, [1, -1]) + + r, p, k = residuez(1, [1, -1j]) + assert_almost_equal(r, [1]) + assert_almost_equal(p, [1j]) + assert_equal(k.size, 0) + + r, p, k = residuez(1, [1, -1, 0.25]) + assert_almost_equal(r, [0, 1]) + assert_almost_equal(p, [0.5, 0.5]) + assert_equal(k.size, 0) + + r, p, k = residuez(1, [1, -0.75, .125]) + assert_almost_equal(r, [-1, 2]) + assert_almost_equal(p, [0.25, 0.5]) + assert_equal(k.size, 0) + + r, p, k = residuez([1, 6, 2], [1, -2, 1]) + assert_almost_equal(r, [-10, 9]) + assert_almost_equal(p, [1, 1]) + assert_almost_equal(k, [2]) + + r, p, k = residuez([6, 2], [1, -2, 1]) + assert_almost_equal(r, [-2, 8]) + assert_almost_equal(p, [1, 1]) + assert_equal(k.size, 0) + + r, p, k = residuez([1, 6, 6, 2], [1, -2, 1]) + assert_almost_equal(r, [-24, 15]) + assert_almost_equal(p, [1, 1]) + assert_almost_equal(k, [10, 2]) + + r, p, k = residuez([1, 0, 1], [1, 0, 0, 0, 0, -1]) + self.assert_rp_almost_equal(r, p, + [0.2618 + 0.1902j, 0.2618 - 0.1902j, + 0.4, 0.0382 - 0.1176j, 0.0382 + 0.1176j], + [-0.8090 + 0.5878j, -0.8090 - 0.5878j, + 1.0, 0.3090 + 0.9511j, 0.3090 - 0.9511j], + decimal=4) + assert_equal(k.size, 0) + + def test_residuez_trailing_zeros(self): + # Trailing zeros in numerator or denominator must not affect the + # answer. + r0, p0, k0 = residuez([5, 3, -2, 7], [-4, 0, 8, 3]) + r1, p1, k1 = residuez([5, 3, -2, 7, 0], [-4, 0, 8, 3]) + r2, p2, k2 = residuez([5, 3, -2, 7], [-4, 0, 8, 3, 0]) + r3, p3, k3 = residuez([5, 3, -2, 7, 0, 0], [-4, 0, 8, 3, 0, 0, 0]) + assert_almost_equal(r0, r1) + assert_almost_equal(r0, r2) + assert_almost_equal(r0, r3) + assert_almost_equal(p0, p1) + assert_almost_equal(p0, p2) + assert_almost_equal(p0, p3) + assert_almost_equal(k0, k1) + assert_almost_equal(k0, k2) + assert_almost_equal(k0, k3) + + def test_residuez_degenerate(self): + r, p, k = residuez([0, 0], [1, 6, 8]) + assert_almost_equal(r, [0, 0]) + assert_almost_equal(p, [-2, -4]) + assert_equal(k.size, 0) + + r, p, k = residuez(0, 1) + assert_equal(r.size, 0) + assert_equal(p.size, 0) + assert_equal(k.size, 0) + + with pytest.raises(ValueError, match="Denominator `a` is zero."): + residuez(1, 0) + + with pytest.raises(ValueError, + match="First coefficient of determinant `a` must " + "be non-zero."): + residuez(1, [0, 1, 2, 3]) + + def test_inverse_unique_roots_different_rtypes(self): + # This test was inspired by GitHub issue 2496. + r = [3 / 10, -1 / 6, -2 / 15] + p = [0, -2, -5] + k = [] + b_expected = [0, 1, 3] + a_expected = [1, 7, 10, 0] + + # With the default tolerance, the rtype does not matter + # for this example. + for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'): + b, a = invres(r, p, k, rtype=rtype) + assert_allclose(b, b_expected) + assert_allclose(a, a_expected) + + b, a = invresz(r, p, k, rtype=rtype) + assert_allclose(b, b_expected) + assert_allclose(a, a_expected) + + def test_inverse_repeated_roots_different_rtypes(self): + r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] + p = [0, -2, -2, -5] + k = [] + b_expected = [0, 0, 1, 3] + b_expected_z = [-1/6, -2/3, 11/6, 3] + a_expected = [1, 9, 24, 20, 0] + + for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'): + b, a = invres(r, p, k, rtype=rtype) + assert_allclose(b, b_expected, atol=1e-14) + assert_allclose(a, a_expected) + + b, a = invresz(r, p, k, rtype=rtype) + assert_allclose(b, b_expected_z, atol=1e-14) + assert_allclose(a, a_expected) + + def test_inverse_bad_rtype(self): + r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] + p = [0, -2, -2, -5] + k = [] + with pytest.raises(ValueError, match="`rtype` must be one of"): + invres(r, p, k, rtype='median') + with pytest.raises(ValueError, match="`rtype` must be one of"): + invresz(r, p, k, rtype='median') + + def test_invresz_one_coefficient_bug(self): + # Regression test for issue in gh-4646. + r = [1] + p = [2] + k = [0] + b, a = invresz(r, p, k) + assert_allclose(b, [1.0]) + assert_allclose(a, [1.0, -2.0]) + + def test_invres(self): + b, a = invres([1], [1], []) + assert_almost_equal(b, [1]) + assert_almost_equal(a, [1, -1]) + + b, a = invres([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], []) + assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j]) + assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j]) + + b, a = invres([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3]) + assert_almost_equal(b, [1, -1 - 1j, 1 - 2j, 0.5 - 3j, 10]) + assert_almost_equal(a, [1, -3 - 1j, 4]) + + b, a = invres([-1, 2, 1j, 3 - 1j, 4, -2], + [-1, 2 - 1j, 2 - 1j, 3, 3, 3], []) + assert_almost_equal(b, [4 - 1j, -28 + 16j, 40 - 62j, 100 + 24j, + -292 + 219j, 192 - 268j]) + assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j, + 108 - 54j, -81 + 108j]) + + b, a = invres([-1, 1j], [1, 1], [1, 2]) + assert_almost_equal(b, [1, 0, -4, 3 + 1j]) + assert_almost_equal(a, [1, -2, 1]) + + def test_invresz(self): + b, a = invresz([1], [1], []) + assert_almost_equal(b, [1]) + assert_almost_equal(a, [1, -1]) + + b, a = invresz([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], []) + assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j]) + assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j]) + + b, a = invresz([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3]) + assert_almost_equal(b, [2.5, -3 - 1j, 1 - 2j, -1 - 3j, 12]) + assert_almost_equal(a, [1, -3 - 1j, 4]) + + b, a = invresz([-1, 2, 1j, 3 - 1j, 4, -2], + [-1, 2 - 1j, 2 - 1j, 3, 3, 3], []) + assert_almost_equal(b, [6, -50 + 11j, 100 - 72j, 80 + 58j, + -354 + 228j, 234 - 297j]) + assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j, + 108 - 54j, -81 + 108j]) + + b, a = invresz([-1, 1j], [1, 1], [1, 2]) + assert_almost_equal(b, [1j, 1, -3, 2]) + assert_almost_equal(a, [1, -2, 1]) + + def test_inverse_scalar_arguments(self): + b, a = invres(1, 1, 1) + assert_almost_equal(b, [1, 0]) + assert_almost_equal(a, [1, -1]) + + b, a = invresz(1, 1, 1) + assert_almost_equal(b, [2, -1]) + assert_almost_equal(a, [1, -1]) + + +class TestVectorstrength: + + def test_single_1dperiod(self): + events = np.array([.5]) + period = 5. + targ_strength = 1. + targ_phase = .1 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_single_2dperiod(self): + events = np.array([.5]) + period = [1, 2, 5.] + targ_strength = [1.] * 3 + targ_phase = np.array([.5, .25, .1]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_array_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_equal_1dperiod(self): + events = np.array([.25, .25, .25, .25, .25, .25]) + period = 2 + targ_strength = 1. + targ_phase = .125 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_equal_2dperiod(self): + events = np.array([.25, .25, .25, .25, .25, .25]) + period = [1, 2, ] + targ_strength = [1.] * 2 + targ_phase = np.array([.25, .125]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_spaced_1dperiod(self): + events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) + period = 1 + targ_strength = 1. + targ_phase = .1 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_spaced_2dperiod(self): + events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) + period = [1, .5] + targ_strength = [1.] * 2 + targ_phase = np.array([.1, .2]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_partial_1dperiod(self): + events = np.array([.25, .5, .75]) + period = 1 + targ_strength = 1. / 3. + targ_phase = .5 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_partial_2dperiod(self): + events = np.array([.25, .5, .75]) + period = [1., 1., 1., 1.] + targ_strength = [1. / 3.] * 4 + targ_phase = np.array([.5, .5, .5, .5]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_opposite_1dperiod(self): + events = np.array([0, .25, .5, .75]) + period = 1. + targ_strength = 0 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + + def test_opposite_2dperiod(self): + events = np.array([0, .25, .5, .75]) + period = [1.] * 10 + targ_strength = [0.] * 10 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + + def test_2d_events_ValueError(self): + events = np.array([[1, 2]]) + period = 1. + assert_raises(ValueError, vectorstrength, events, period) + + def test_2d_period_ValueError(self): + events = 1. + period = np.array([[1]]) + assert_raises(ValueError, vectorstrength, events, period) + + def test_zero_period_ValueError(self): + events = 1. + period = 0 + assert_raises(ValueError, vectorstrength, events, period) + + def test_negative_period_ValueError(self): + events = 1. + period = -1 + assert_raises(ValueError, vectorstrength, events, period) + + +def assert_allclose_cast(actual, desired, rtol=1e-7, atol=0): + """Wrap assert_allclose while casting object arrays.""" + if actual.dtype.kind == 'O': + dtype = np.array(actual.flat[0]).dtype + actual, desired = actual.astype(dtype), desired.astype(dtype) + assert_allclose(actual, desired, rtol, atol) + + +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +@pytest.mark.parametrize('func', (sosfilt, lfilter)) +def test_nonnumeric_dtypes(func): + x = [Decimal(1), Decimal(2), Decimal(3)] + b = [Decimal(1), Decimal(2), Decimal(3)] + a = [Decimal(1), Decimal(2), Decimal(3)] + x = np.array(x) + assert x.dtype.kind == 'O' + desired = lfilter(np.array(b, float), np.array(a, float), x.astype(float)) + if func is sosfilt: + actual = sosfilt([b + a], x) + else: + actual = lfilter(b, a, x) + assert all(isinstance(x, Decimal) for x in actual) + assert_allclose(actual.astype(float), desired.astype(float)) + # Degenerate cases + if func is lfilter: + args = [1., 1.] + else: + args = [tf2sos(1., 1.)] + + with pytest.raises(ValueError, match='must be at least 1-D'): + func(*args, x=1.) + + +@pytest.mark.parametrize('dt', 'fdFD') +class TestSOSFilt: + + # The test_rank* tests are pulled from _TestLinearFilter + def test_rank1(self, dt): + x = np.linspace(0, 5, 6).astype(dt) + b = np.array([1, -1]).astype(dt) + a = np.array([0.5, -0.5]).astype(dt) + + # Test simple IIR + y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(dt) + sos = tf2sos(b, a) + assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) + + # Test simple FIR + b = np.array([1, 1]).astype(dt) + # NOTE: This was changed (rel. to TestLinear...) to add a pole @zero: + a = np.array([1, 0]).astype(dt) + y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(dt) + assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) + + b = [1, 1, 0] + a = [1, 0, 0] + x = np.ones(8) + sos = np.concatenate((b, a)) + sos.shape = (1, 6) + y = sosfilt(sos, x) + assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2]) + + def test_rank2(self, dt): + shape = (4, 3) + x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) + x = x.astype(dt) + + b = np.array([1, -1]).astype(dt) + a = np.array([0.5, 0.5]).astype(dt) + + y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]], + dtype=dt) + + y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12], + [18, -16, 18]], dtype=dt) + + y = sosfilt(tf2sos(b, a), x, axis=0) + assert_array_almost_equal(y_r2_a0, y) + + y = sosfilt(tf2sos(b, a), x, axis=1) + assert_array_almost_equal(y_r2_a1, y) + + def test_rank3(self, dt): + shape = (4, 3, 2) + x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) + + b = np.array([1, -1]).astype(dt) + a = np.array([0.5, 0.5]).astype(dt) + + # Test last axis + y = sosfilt(tf2sos(b, a), x) + for i in range(x.shape[0]): + for j in range(x.shape[1]): + assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j])) + + def test_initial_conditions(self, dt): + b1, a1 = signal.butter(2, 0.25, 'low') + b2, a2 = signal.butter(2, 0.75, 'low') + b3, a3 = signal.butter(2, 0.75, 'low') + b = np.convolve(np.convolve(b1, b2), b3) + a = np.convolve(np.convolve(a1, a2), a3) + sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3])) + + x = np.random.rand(50).astype(dt) + + # Stopping filtering and continuing + y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6)) + y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]] + assert_allclose_cast(y_true, lfilter(b, a, x)) + + y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2))) + y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]] + assert_allclose_cast(y_true, y_sos) + + # Use a step function + zi = sosfilt_zi(sos) + x = np.ones(8, dt) + y, zf = sosfilt(sos, x, zi=zi) + + assert_allclose_cast(y, np.ones(8)) + assert_allclose_cast(zf, zi) + + # Initial condition shape matching + x.shape = (1, 1) + x.shape # 3D + assert_raises(ValueError, sosfilt, sos, x, zi=zi) + zi_nd = zi.copy() + zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1]) + assert_raises(ValueError, sosfilt, sos, x, + zi=zi_nd[:, :, :, [0, 1, 1]]) + y, zf = sosfilt(sos, x, zi=zi_nd) + assert_allclose_cast(y[0, 0], np.ones(8)) + assert_allclose_cast(zf[:, 0, 0, :], zi) + + def test_initial_conditions_3d_axis1(self, dt): + # Test the use of zi when sosfilt is applied to axis 1 of a 3-d input. + + # Input array is x. + x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3)) + x = x.astype(dt) + + # Design a filter in ZPK format and convert to SOS + zpk = signal.butter(6, 0.35, output='zpk') + sos = zpk2sos(*zpk) + nsections = sos.shape[0] + + # Filter along this axis. + axis = 1 + + # Initial conditions, all zeros. + shp = list(x.shape) + shp[axis] = 2 + shp = [nsections] + shp + z0 = np.zeros(shp) + + # Apply the filter to x. + yf, zf = sosfilt(sos, x, axis=axis, zi=z0) + + # Apply the filter to x in two stages. + y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0) + y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1) + + # y should equal yf, and z2 should equal zf. + y = np.concatenate((y1, y2), axis=axis) + assert_allclose_cast(y, yf, rtol=1e-10, atol=1e-13) + assert_allclose_cast(z2, zf, rtol=1e-10, atol=1e-13) + + # let's try the "step" initial condition + zi = sosfilt_zi(sos) + zi.shape = [nsections, 1, 2, 1] + zi = zi * x[:, 0:1, :] + y = sosfilt(sos, x, axis=axis, zi=zi)[0] + # check it against the TF form + b, a = zpk2tf(*zpk) + zi = lfilter_zi(b, a) + zi.shape = [1, zi.size, 1] + zi = zi * x[:, 0:1, :] + y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0] + assert_allclose_cast(y, y_tf, rtol=1e-10, atol=1e-13) + + def test_bad_zi_shape(self, dt): + # The shape of zi is checked before using any values in the + # arguments, so np.empty is fine for creating the arguments. + x = np.empty((3, 15, 3), dt) + sos = np.zeros((4, 6)) + zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3) + with pytest.raises(ValueError, match='should be all ones'): + sosfilt(sos, x, zi=zi, axis=1) + sos[:, 3] = 1. + with pytest.raises(ValueError, match='Invalid zi shape'): + sosfilt(sos, x, zi=zi, axis=1) + + def test_sosfilt_zi(self, dt): + sos = signal.butter(6, 0.2, output='sos') + zi = sosfilt_zi(sos) + + y, zf = sosfilt(sos, np.ones(40, dt), zi=zi) + assert_allclose_cast(zf, zi, rtol=1e-13) + + # Expected steady state value of the step response of this filter: + ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1)) + assert_allclose_cast(y, ss, rtol=1e-13) + + # zi as array-like + _, zf = sosfilt(sos, np.ones(40, dt), zi=zi.tolist()) + assert_allclose_cast(zf, zi, rtol=1e-13) + + @pytest.mark.thread_unsafe + def test_dtype_deprecation(self, dt): + # gh-21211 + sos = np.asarray([1, 2, 3, 1, 5, 3], dtype=object).reshape(1, 6) + x = np.asarray([2, 3, 4, 5, 3, 4, 2, 2, 1], dtype=object) + with pytest.deprecated_call(match="dtype=object is not supported"): + sosfilt(sos, x) + + +class TestDeconvolve: + + def test_basic(self): + # From docstring example + original = [0, 1, 0, 0, 1, 1, 0, 0] + impulse_response = [2, 1] + recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0] + recovered, remainder = signal.deconvolve(recorded, impulse_response) + assert_allclose(recovered, original) + + def test_n_dimensional_signal(self): + recorded = [[0, 0], [0, 0]] + impulse_response = [0, 0] + with pytest.raises(ValueError, match="signal must be 1-D."): + quotient, remainder = signal.deconvolve(recorded, impulse_response) + + def test_n_dimensional_divisor(self): + recorded = [0, 0] + impulse_response = [[0, 0], [0, 0]] + with pytest.raises(ValueError, match="divisor must be 1-D."): + quotient, remainder = signal.deconvolve(recorded, impulse_response) + + +class TestDetrend: + + def test_basic(self): + detrended = detrend(array([1, 2, 3])) + detrended_exact = array([0, 0, 0]) + assert_array_almost_equal(detrended, detrended_exact) + + def test_copy(self): + x = array([1, 1.2, 1.5, 1.6, 2.4]) + copy_array = detrend(x, overwrite_data=False) + inplace = detrend(x, overwrite_data=True) + assert_array_almost_equal(copy_array, inplace) + + @pytest.mark.parametrize('kind', ['linear', 'constant']) + @pytest.mark.parametrize('axis', [0, 1, 2]) + def test_axis(self, axis, kind): + data = np.arange(5*6*7).reshape(5, 6, 7) + detrended = detrend(data, type=kind, axis=axis) + assert detrended.shape == data.shape + + def test_bp(self): + data = [0, 1, 2] + [5, 0, -5, -10] + detrended = detrend(data, type='linear', bp=3) + assert_allclose(detrended, 0, atol=1e-14) + + # repeat with ndim > 1 and axis + data = np.asarray(data)[None, :, None] + + detrended = detrend(data, type="linear", bp=3, axis=1) + assert_allclose(detrended, 0, atol=1e-14) + + # breakpoint index > shape[axis]: raises + with assert_raises(ValueError): + detrend(data, type="linear", bp=3) + + @pytest.mark.parametrize('bp', [np.array([0, 2]), [0, 2]]) + def test_detrend_array_bp(self, bp): + # regression test for https://github.com/scipy/scipy/issues/18675 + rng = np.random.RandomState(12345) + x = rng.rand(10) + # bp = np.array([0, 2]) + + res = detrend(x, bp=bp) + res_scipy_191 = np.array([-4.44089210e-16, -2.22044605e-16, + -1.11128506e-01, -1.69470553e-01, 1.14710683e-01, 6.35468419e-02, + 3.53533144e-01, -3.67877935e-02, -2.00417675e-02, -1.94362049e-01]) + + assert_allclose(res, res_scipy_191, atol=1e-14) + + +class TestUniqueRoots: + def test_real_no_repeat(self): + p = [-1.0, -0.5, 0.3, 1.2, 10.0] + unique, multiplicity = unique_roots(p) + assert_almost_equal(unique, p, decimal=15) + assert_equal(multiplicity, np.ones(len(p))) + + def test_real_repeat(self): + p = [-1.0, -0.95, -0.89, -0.8, 0.5, 1.0, 1.05] + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min') + assert_almost_equal(unique, [-1.0, -0.89, 0.5, 1.0], decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max') + assert_almost_equal(unique, [-0.95, -0.8, 0.5, 1.05], decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg') + assert_almost_equal(unique, [-0.975, -0.845, 0.5, 1.025], decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + def test_complex_no_repeat(self): + p = [-1.0, 1.0j, 0.5 + 0.5j, -1.0 - 1.0j, 3.0 + 2.0j] + unique, multiplicity = unique_roots(p) + assert_almost_equal(unique, p, decimal=15) + assert_equal(multiplicity, np.ones(len(p))) + + def test_complex_repeat(self): + p = [-1.0, -1.0 + 0.05j, -0.95 + 0.15j, -0.90 + 0.15j, 0.0, + 0.5 + 0.5j, 0.45 + 0.55j] + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min') + assert_almost_equal(unique, [-1.0, -0.95 + 0.15j, 0.0, 0.45 + 0.55j], + decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max') + assert_almost_equal(unique, + [-1.0 + 0.05j, -0.90 + 0.15j, 0.0, 0.5 + 0.5j], + decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg') + assert_almost_equal( + unique, [-1.0 + 0.025j, -0.925 + 0.15j, 0.0, 0.475 + 0.525j], + decimal=15) + assert_equal(multiplicity, [2, 2, 1, 2]) + + def test_gh_4915(self): + p = np.roots(np.convolve(np.ones(5), np.ones(5))) + true_roots = [-(-1)**(1/5), (-1)**(4/5), -(-1)**(3/5), (-1)**(2/5)] + + unique, multiplicity = unique_roots(p) + unique = np.sort(unique) + + assert_almost_equal(np.sort(unique), true_roots, decimal=7) + assert_equal(multiplicity, [2, 2, 2, 2]) + + def test_complex_roots_extra(self): + unique, multiplicity = unique_roots([1.0, 1.0j, 1.0]) + assert_almost_equal(unique, [1.0, 1.0j], decimal=15) + assert_equal(multiplicity, [2, 1]) + + unique, multiplicity = unique_roots([1, 1 + 2e-9, 1e-9 + 1j], tol=0.1) + assert_almost_equal(unique, [1.0, 1e-9 + 1.0j], decimal=15) + assert_equal(multiplicity, [2, 1]) + + def test_single_unique_root(self): + p = np.random.rand(100) + 1j * np.random.rand(100) + unique, multiplicity = unique_roots(p, 2) + assert_almost_equal(unique, [np.min(p)], decimal=15) + assert_equal(multiplicity, [100]) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_spectral.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..12dac6300b9ef4b2eac0475f0b53517ab4867416 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_spectral.py @@ -0,0 +1,2059 @@ +import sys + +import numpy as np +from numpy.testing import (assert_, + assert_allclose, assert_array_equal, assert_equal, + assert_array_almost_equal_nulp, suppress_warnings) +import pytest +from pytest import raises as assert_raises + +from scipy import signal +from scipy.fft import fftfreq, rfftfreq, fft, irfft +from scipy.integrate import trapezoid +from scipy.signal import (periodogram, welch, lombscargle, coherence, + spectrogram, check_COLA, check_NOLA) +from scipy.signal.windows import hann +from scipy.signal._spectral_py import _spectral_helper + +# Compare ShortTimeFFT.stft() / ShortTimeFFT.istft() with stft() / istft(): +from scipy.signal.tests._scipy_spectral_test_shim import stft_compare as stft +from scipy.signal.tests._scipy_spectral_test_shim import istft_compare as istft +from scipy.signal.tests._scipy_spectral_test_shim import csd_compare as csd + + +class TestPeriodogram: + def test_real_onesided_even(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9) + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + + def test_real_onesided_odd(self): + x = np.zeros(15) + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.arange(8.0)/15.0) + q = np.ones(8) + q[0] = 0 + q *= 2.0/15.0 + assert_allclose(p, q, atol=1e-15) + + def test_real_twosided(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftfreq(16, 1.0)) + q = np.full(16, 1/16.0) + q[0] = 0 + assert_allclose(p, q) + + def test_real_spectrum(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x, scaling='spectrum') + g, q = periodogram(x, scaling='density') + assert_allclose(f, np.linspace(0, 0.5, 9)) + assert_allclose(p, q/16.0) + + def test_integer_even(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9) + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + + def test_integer_odd(self): + x = np.zeros(15, dtype=int) + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.arange(8.0)/15.0) + q = np.ones(8) + q[0] = 0 + q *= 2.0/15.0 + assert_allclose(p, q, atol=1e-15) + + def test_integer_twosided(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftfreq(16, 1.0)) + q = np.full(16, 1/16.0) + q[0] = 0 + assert_allclose(p, q) + + def test_complex(self): + x = np.zeros(16, np.complex128) + x[0] = 1.0 + 2.0j + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftfreq(16, 1.0)) + q = np.full(16, 5.0/16.0) + q[0] = 0 + assert_allclose(p, q) + + def test_unk_scaling(self): + assert_raises(ValueError, periodogram, np.zeros(4, np.complex128), + scaling='foo') + + @pytest.mark.skipif( + sys.maxsize <= 2**32, + reason="On some 32-bit tolerance issue" + ) + def test_nd_axis_m1(self): + x = np.zeros(20, dtype=np.float64) + x = x.reshape((2,1,10)) + x[:,:,0] = 1.0 + f, p = periodogram(x) + assert_array_equal(p.shape, (2, 1, 6)) + assert_array_almost_equal_nulp(p[0,0,:], p[1,0,:], 60) + f0, p0 = periodogram(x[0,0,:]) + assert_array_almost_equal_nulp(p0[np.newaxis,:], p[1,:], 60) + + @pytest.mark.skipif( + sys.maxsize <= 2**32, + reason="On some 32-bit tolerance issue" + ) + def test_nd_axis_0(self): + x = np.zeros(20, dtype=np.float64) + x = x.reshape((10,2,1)) + x[0,:,:] = 1.0 + f, p = periodogram(x, axis=0) + assert_array_equal(p.shape, (6,2,1)) + assert_array_almost_equal_nulp(p[:,0,0], p[:,1,0], 60) + f0, p0 = periodogram(x[:,0,0]) + assert_array_almost_equal_nulp(p0, p[:,1,0]) + + def test_window_external(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x, 10, 'hann') + win = signal.get_window('hann', 16) + fe, pe = periodogram(x, 10, win) + assert_array_almost_equal_nulp(p, pe) + assert_array_almost_equal_nulp(f, fe) + win_err = signal.get_window('hann', 32) + assert_raises(ValueError, periodogram, x, + 10, win_err) # win longer than signal + + def test_padded_fft(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x) + fp, pp = periodogram(x, nfft=32) + assert_allclose(f, fp[::2]) + assert_allclose(p, pp[::2]) + assert_array_equal(pp.shape, (17,)) + + def test_empty_input(self): + f, p = periodogram([]) + assert_array_equal(f.shape, (0,)) + assert_array_equal(p.shape, (0,)) + for shape in [(0,), (3,0), (0,5,2)]: + f, p = periodogram(np.empty(shape)) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + def test_empty_input_other_axis(self): + for shape in [(3,0), (0,5,2)]: + f, p = periodogram(np.empty(shape), axis=1) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + def test_short_nfft(self): + x = np.zeros(18) + x[0] = 1 + f, p = periodogram(x, nfft=16) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9) + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + + def test_nfft_is_xshape(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x, nfft=16) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9) + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + + def test_real_onesided_even_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9, 'f') + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + assert_(p.dtype == q.dtype) + + def test_real_onesided_odd_32(self): + x = np.zeros(15, 'f') + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.arange(8.0)/15.0) + q = np.ones(8, 'f') + q[0] = 0 + q *= 2.0/15.0 + assert_allclose(p, q, atol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_twosided_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftfreq(16, 1.0)) + q = np.full(16, 1/16.0, 'f') + q[0] = 0 + assert_allclose(p, q) + assert_(p.dtype == q.dtype) + + def test_complex_32(self): + x = np.zeros(16, 'F') + x[0] = 1.0 + 2.0j + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftfreq(16, 1.0)) + q = np.full(16, 5.0/16.0, 'f') + q[0] = 0 + assert_allclose(p, q) + assert_(p.dtype == q.dtype) + + def test_shorter_window_error(self): + x = np.zeros(16) + x[0] = 1 + win = signal.get_window('hann', 10) + expected_msg = ('the size of the window must be the same size ' + 'of the input on the specified axis') + with assert_raises(ValueError, match=expected_msg): + periodogram(x, window=win) + + +class TestWelch: + def test_real_onesided_even(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_onesided_odd(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, + 0.17072113]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_twosided(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.07638889]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_spectrum(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, scaling='spectrum') + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667, + 0.02083333]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_onesided_even(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_onesided_odd(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, + 0.17072113]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_twosided(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.07638889]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_complex(self): + x = np.zeros(16, np.complex128) + x[0] = 1.0 + 2.0j + x[8] = 1.0 + 2.0j + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556, + 0.55555556, 0.55555556, 0.55555556, 0.38194444]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_unk_scaling(self): + assert_raises(ValueError, welch, np.zeros(4, np.complex128), + scaling='foo', nperseg=4) + + def test_detrend_linear(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f, p = welch(x, nperseg=10, detrend='linear') + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_no_detrending(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f1, p1 = welch(x, nperseg=10, detrend=False) + f2, p2 = welch(x, nperseg=10, detrend=lambda x: x) + assert_allclose(f1, f2, atol=1e-15) + assert_allclose(p1, p2, atol=1e-15) + + def test_detrend_external(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f, p = welch(x, nperseg=10, + detrend=lambda seg: signal.detrend(seg, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_detrend_external_nd_m1(self): + x = np.arange(40, dtype=np.float64) + 0.04 + x = x.reshape((2,2,10)) + f, p = welch(x, nperseg=10, + detrend=lambda seg: signal.detrend(seg, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_detrend_external_nd_0(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((2,1,10)) + x = np.moveaxis(x, 2, 0) + f, p = welch(x, nperseg=10, axis=0, + detrend=lambda seg: signal.detrend(seg, axis=0, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_nd_axis_m1(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((2,1,10)) + f, p = welch(x, nperseg=10) + assert_array_equal(p.shape, (2, 1, 6)) + assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13) + f0, p0 = welch(x[0,0,:], nperseg=10) + assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13) + + def test_nd_axis_0(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((10,2,1)) + f, p = welch(x, nperseg=10, axis=0) + assert_array_equal(p.shape, (6,2,1)) + assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13) + f0, p0 = welch(x[:,0,0], nperseg=10) + assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13) + + def test_window_external(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, 10, 'hann', nperseg=8) + win = signal.get_window('hann', 8) + fe, pe = welch(x, 10, win, nperseg=None) + assert_array_almost_equal_nulp(p, pe) + assert_array_almost_equal_nulp(f, fe) + assert_array_equal(fe.shape, (5,)) # because win length used as nperseg + assert_array_equal(pe.shape, (5,)) + assert_raises(ValueError, welch, x, + 10, win, nperseg=4) # because nperseg != win.shape[-1] + win_err = signal.get_window('hann', 32) + assert_raises(ValueError, welch, x, + 10, win_err, nperseg=None) # win longer than signal + + def test_empty_input(self): + f, p = welch([]) + assert_array_equal(f.shape, (0,)) + assert_array_equal(p.shape, (0,)) + for shape in [(0,), (3,0), (0,5,2)]: + f, p = welch(np.empty(shape)) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + def test_empty_input_other_axis(self): + for shape in [(3,0), (0,5,2)]: + f, p = welch(np.empty(shape), axis=1) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + def test_short_data(self): + x = np.zeros(8) + x[0] = 1 + #for string-like window, input signal length < nperseg value gives + #UserWarning, sets nperseg to x.shape[-1] + with suppress_warnings() as sup: + msg = "nperseg = 256 is greater than input length = 8, using nperseg = 8" + sup.filter(UserWarning, msg) + f, p = welch(x,window='hann') # default nperseg + f1, p1 = welch(x,window='hann', nperseg=256) # user-specified nperseg + f2, p2 = welch(x, nperseg=8) # valid nperseg, doesn't give warning + assert_allclose(f, f2) + assert_allclose(p, p2) + assert_allclose(f1, f2) + assert_allclose(p1, p2) + + def test_window_long_or_nd(self): + assert_raises(ValueError, welch, np.zeros(4), 1, np.array([1,1,1,1,1])) + assert_raises(ValueError, welch, np.zeros(4), 1, + np.arange(6).reshape((2,3))) + + def test_nondefault_noverlap(self): + x = np.zeros(64) + x[::8] = 1 + f, p = welch(x, nperseg=16, noverlap=4) + q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5., + 1./6.]) + assert_allclose(p, q, atol=1e-12) + + def test_bad_noverlap(self): + assert_raises(ValueError, welch, np.zeros(4), 1, 'hann', 2, 7) + + def test_nfft_too_short(self): + assert_raises(ValueError, welch, np.ones(12), nfft=3, nperseg=4) + + def test_real_onesided_even_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_onesided_odd_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116, + 0.17072113], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_twosided_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.11111111, + 0.07638889], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_complex_32(self): + x = np.zeros(16, 'F') + x[0] = 1.0 + 2.0j + x[8] = 1.0 + 2.0j + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552, + 0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype, + f'dtype mismatch, {p.dtype}, {q.dtype}') + + def test_padded_freqs(self): + x = np.zeros(12) + + nfft = 24 + f = fftfreq(nfft, 1.0)[:nfft//2+1] + f[-1] *= -1 + fodd, _ = welch(x, nperseg=5, nfft=nfft) + feven, _ = welch(x, nperseg=6, nfft=nfft) + assert_allclose(f, fodd) + assert_allclose(f, feven) + + nfft = 25 + f = fftfreq(nfft, 1.0)[:(nfft + 1)//2] + fodd, _ = welch(x, nperseg=5, nfft=nfft) + feven, _ = welch(x, nperseg=6, nfft=nfft) + assert_allclose(f, fodd) + assert_allclose(f, feven) + + def test_window_correction(self): + A = 20 + fs = 1e4 + nperseg = int(fs//10) + fsig = 300 + ii = int(fsig*nperseg//fs) # Freq index of fsig + + tt = np.arange(fs)/fs + x = A*np.sin(2*np.pi*fsig*tt) + + for window in ['hann', 'bartlett', ('tukey', 0.1), 'flattop']: + _, p_spec = welch(x, fs=fs, nperseg=nperseg, window=window, + scaling='spectrum') + freq, p_dens = welch(x, fs=fs, nperseg=nperseg, window=window, + scaling='density') + + # Check peak height at signal frequency for 'spectrum' + assert_allclose(p_spec[ii], A**2/2.0) + # Check integrated spectrum RMS for 'density' + assert_allclose(np.sqrt(trapezoid(p_dens, freq)), A*np.sqrt(2)/2, + rtol=1e-3) + + def test_axis_rolling(self): + np.random.seed(1234) + + x_flat = np.random.randn(1024) + _, p_flat = welch(x_flat) + + for a in range(3): + newshape = [1,]*3 + newshape[a] = -1 + x = x_flat.reshape(newshape) + + _, p_plus = welch(x, axis=a) # Positive axis index + _, p_minus = welch(x, axis=a-x.ndim) # Negative axis index + + assert_equal(p_flat, p_plus.squeeze(), err_msg=a) + assert_equal(p_flat, p_minus.squeeze(), err_msg=a-x.ndim) + + def test_average(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, average='median') + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([.1, .05, 0., 1.54074396e-33, 0.]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + assert_raises(ValueError, welch, x, nperseg=8, + average='unrecognised-average') + + +class TestCSD: + def test_pad_shorter_x(self): + x = np.zeros(8) + y = np.zeros(12) + + f = np.linspace(0, 0.5, 7) + c = np.zeros(7,dtype=np.complex128) + f1, c1 = csd(x, y, nperseg=12) + + assert_allclose(f, f1) + assert_allclose(c, c1) + + def test_pad_shorter_y(self): + x = np.zeros(12) + y = np.zeros(8) + + f = np.linspace(0, 0.5, 7) + c = np.zeros(7,dtype=np.complex128) + f1, c1 = csd(x, y, nperseg=12) + + assert_allclose(f, f1) + assert_allclose(c, c1) + + def test_real_onesided_even(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_onesided_odd(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, + 0.17072113]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_twosided(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.07638889]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_spectrum(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8, scaling='spectrum') + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667, + 0.02083333]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_onesided_even(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_onesided_odd(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, + 0.17072113]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_twosided(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.07638889]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_complex(self): + x = np.zeros(16, np.complex128) + x[0] = 1.0 + 2.0j + x[8] = 1.0 + 2.0j + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556, + 0.55555556, 0.55555556, 0.55555556, 0.38194444]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_unk_scaling(self): + assert_raises(ValueError, csd, np.zeros(4, np.complex128), + np.ones(4, np.complex128), scaling='foo', nperseg=4) + + def test_detrend_linear(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f, p = csd(x, x, nperseg=10, detrend='linear') + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_no_detrending(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f1, p1 = csd(x, x, nperseg=10, detrend=False) + f2, p2 = csd(x, x, nperseg=10, detrend=lambda x: x) + assert_allclose(f1, f2, atol=1e-15) + assert_allclose(p1, p2, atol=1e-15) + + def test_detrend_external(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f, p = csd(x, x, nperseg=10, + detrend=lambda seg: signal.detrend(seg, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_detrend_external_nd_m1(self): + x = np.arange(40, dtype=np.float64) + 0.04 + x = x.reshape((2,2,10)) + f, p = csd(x, x, nperseg=10, + detrend=lambda seg: signal.detrend(seg, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_detrend_external_nd_0(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((2,1,10)) + x = np.moveaxis(x, 2, 0) + f, p = csd(x, x, nperseg=10, axis=0, + detrend=lambda seg: signal.detrend(seg, axis=0, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_nd_axis_m1(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((2,1,10)) + f, p = csd(x, x, nperseg=10) + assert_array_equal(p.shape, (2, 1, 6)) + assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13) + f0, p0 = csd(x[0,0,:], x[0,0,:], nperseg=10) + assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13) + + def test_nd_axis_0(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((10,2,1)) + f, p = csd(x, x, nperseg=10, axis=0) + assert_array_equal(p.shape, (6,2,1)) + assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13) + f0, p0 = csd(x[:,0,0], x[:,0,0], nperseg=10) + assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13) + + def test_window_external(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, 10, 'hann', 8) + win = signal.get_window('hann', 8) + fe, pe = csd(x, x, 10, win, nperseg=None) + assert_array_almost_equal_nulp(p, pe) + assert_array_almost_equal_nulp(f, fe) + assert_array_equal(fe.shape, (5,)) # because win length used as nperseg + assert_array_equal(pe.shape, (5,)) + assert_raises(ValueError, csd, x, x, + 10, win, nperseg=256) # because nperseg != win.shape[-1] + win_err = signal.get_window('hann', 32) + assert_raises(ValueError, csd, x, x, + 10, win_err, nperseg=None) # because win longer than signal + + def test_empty_input(self): + f, p = csd([],np.zeros(10)) + assert_array_equal(f.shape, (0,)) + assert_array_equal(p.shape, (0,)) + + f, p = csd(np.zeros(10),[]) + assert_array_equal(f.shape, (0,)) + assert_array_equal(p.shape, (0,)) + + for shape in [(0,), (3,0), (0,5,2)]: + f, p = csd(np.empty(shape), np.empty(shape)) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + f, p = csd(np.ones(10), np.empty((5,0))) + assert_array_equal(f.shape, (5,0)) + assert_array_equal(p.shape, (5,0)) + + f, p = csd(np.empty((5,0)), np.ones(10)) + assert_array_equal(f.shape, (5,0)) + assert_array_equal(p.shape, (5,0)) + + def test_empty_input_other_axis(self): + for shape in [(3,0), (0,5,2)]: + f, p = csd(np.empty(shape), np.empty(shape), axis=1) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + f, p = csd(np.empty((10,10,3)), np.zeros((10,0,1)), axis=1) + assert_array_equal(f.shape, (10,0,3)) + assert_array_equal(p.shape, (10,0,3)) + + f, p = csd(np.empty((10,0,1)), np.zeros((10,10,3)), axis=1) + assert_array_equal(f.shape, (10,0,3)) + assert_array_equal(p.shape, (10,0,3)) + + def test_short_data(self): + x = np.zeros(8) + x[0] = 1 + + #for string-like window, input signal length < nperseg value gives + #UserWarning, sets nperseg to x.shape[-1] + with suppress_warnings() as sup: + msg = "nperseg = 256 is greater than input length = 8, using nperseg = 8" + sup.filter(UserWarning, msg) + f, p = csd(x, x, window='hann') # default nperseg + f1, p1 = csd(x, x, window='hann', nperseg=256) # user-specified nperseg + f2, p2 = csd(x, x, nperseg=8) # valid nperseg, doesn't give warning + assert_allclose(f, f2) + assert_allclose(p, p2) + assert_allclose(f1, f2) + assert_allclose(p1, p2) + + def test_window_long_or_nd(self): + assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, + np.array([1,1,1,1,1])) + assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, + np.arange(6).reshape((2,3))) + + def test_nondefault_noverlap(self): + x = np.zeros(64) + x[::8] = 1 + f, p = csd(x, x, nperseg=16, noverlap=4) + q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5., + 1./6.]) + assert_allclose(p, q, atol=1e-12) + + def test_bad_noverlap(self): + assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, 'hann', + 2, 7) + + def test_nfft_too_short(self): + assert_raises(ValueError, csd, np.ones(12), np.zeros(12), nfft=3, + nperseg=4) + + def test_real_onesided_even_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_onesided_odd_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116, + 0.17072113], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_twosided_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.11111111, + 0.07638889], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_complex_32(self): + x = np.zeros(16, 'F') + x[0] = 1.0 + 2.0j + x[8] = 1.0 + 2.0j + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftfreq(8, 1.0)) + q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552, + 0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype, + f'dtype mismatch, {p.dtype}, {q.dtype}') + + def test_padded_freqs(self): + x = np.zeros(12) + y = np.ones(12) + + nfft = 24 + f = fftfreq(nfft, 1.0)[:nfft//2+1] + f[-1] *= -1 + fodd, _ = csd(x, y, nperseg=5, nfft=nfft) + feven, _ = csd(x, y, nperseg=6, nfft=nfft) + assert_allclose(f, fodd) + assert_allclose(f, feven) + + nfft = 25 + f = fftfreq(nfft, 1.0)[:(nfft + 1)//2] + fodd, _ = csd(x, y, nperseg=5, nfft=nfft) + feven, _ = csd(x, y, nperseg=6, nfft=nfft) + assert_allclose(f, fodd) + assert_allclose(f, feven) + + def test_copied_data(self): + x = np.random.randn(64) + y = x.copy() + + _, p_same = csd(x, x, nperseg=8, average='mean', + return_onesided=False) + _, p_copied = csd(x, y, nperseg=8, average='mean', + return_onesided=False) + assert_allclose(p_same, p_copied) + + _, p_same = csd(x, x, nperseg=8, average='median', + return_onesided=False) + _, p_copied = csd(x, y, nperseg=8, average='median', + return_onesided=False) + assert_allclose(p_same, p_copied) + + +class TestCoherence: + def test_identical_input(self): + x = np.random.randn(20) + y = np.copy(x) # So `y is x` -> False + + f = np.linspace(0, 0.5, 6) + C = np.ones(6) + f1, C1 = coherence(x, y, nperseg=10) + + assert_allclose(f, f1) + assert_allclose(C, C1) + + def test_phase_shifted_input(self): + x = np.random.randn(20) + y = -x + + f = np.linspace(0, 0.5, 6) + C = np.ones(6) + f1, C1 = coherence(x, y, nperseg=10) + + assert_allclose(f, f1) + assert_allclose(C, C1) + + +class TestSpectrogram: + def test_average_all_segments(self): + x = np.random.randn(1024) + + fs = 1.0 + window = ('tukey', 0.25) + nperseg = 16 + noverlap = 2 + + f, _, P = spectrogram(x, fs, window, nperseg, noverlap) + fw, Pw = welch(x, fs, window, nperseg, noverlap) + assert_allclose(f, fw) + assert_allclose(np.mean(P, axis=-1), Pw) + + def test_window_external(self): + x = np.random.randn(1024) + + fs = 1.0 + window = ('tukey', 0.25) + nperseg = 16 + noverlap = 2 + f, _, P = spectrogram(x, fs, window, nperseg, noverlap) + + win = signal.get_window(('tukey', 0.25), 16) + fe, _, Pe = spectrogram(x, fs, win, nperseg=None, noverlap=2) + assert_array_equal(fe.shape, (9,)) # because win length used as nperseg + assert_array_equal(Pe.shape, (9,73)) + assert_raises(ValueError, spectrogram, x, + fs, win, nperseg=8) # because nperseg != win.shape[-1] + win_err = signal.get_window(('tukey', 0.25), 2048) + assert_raises(ValueError, spectrogram, x, + fs, win_err, nperseg=None) # win longer than signal + + def test_short_data(self): + x = np.random.randn(1024) + fs = 1.0 + + #for string-like window, input signal length < nperseg value gives + #UserWarning, sets nperseg to x.shape[-1] + f, _, p = spectrogram(x, fs, window=('tukey',0.25)) # default nperseg + with suppress_warnings() as sup: + sup.filter(UserWarning, + "nperseg = 1025 is greater than input length = 1024, " + "using nperseg = 1024",) + f1, _, p1 = spectrogram(x, fs, window=('tukey',0.25), + nperseg=1025) # user-specified nperseg + f2, _, p2 = spectrogram(x, fs, nperseg=256) # to compare w/default + f3, _, p3 = spectrogram(x, fs, nperseg=1024) # compare w/user-spec'd + assert_allclose(f, f2) + assert_allclose(p, p2) + assert_allclose(f1, f3) + assert_allclose(p1, p3) + +class TestLombscargle: + def test_frequency(self): + """Test if frequency location of peak corresponds to frequency of + generated input signal. + """ + + # Input parameters + ampl = 2. + w = 1. + phi = 0.5 * np.pi + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + + # Randomly select a fraction of an array with timesteps + rng = np.random.RandomState(2353425) + r = rng.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + y = ampl * np.sin(w*t + phi) + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + P = lombscargle(t, y, f) + + # Check if difference between found frequency maximum and input + # frequency is less than accuracy + delta = f[1] - f[0] + assert(w - f[np.argmax(P)] < (delta/2.)) + + # also, check that it works with weights + P = lombscargle(t, y, f, weights=np.ones_like(t, dtype=f.dtype)) + + # Check if difference between found frequency maximum and input + # frequency is less than accuracy + delta = f[1] - f[0] + assert(w - f[np.argmax(P)] < (delta/2.)) + + + def test_amplitude(self): + # Test if height of peak in unnormalized Lomb-Scargle periodogram + # corresponds to amplitude of the generated input signal. + + # Input parameters + ampl = 2. + w = 1. + phi = 0.5 * np.pi + nin = 1000 + nout = 1000 + p = 0.7 # Fraction of points to select + + # Randomly select a fraction of an array with timesteps + rng = np.random.RandomState(2353425) + r = rng.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + y = ampl * np.sin(w*t + phi) + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + pgram = lombscargle(t, y, f) + + # convert to the amplitude + pgram = np.sqrt(4.0 * pgram / t.shape[0]) + + # Check if amplitude is correct (this will not exactly match, due to + # numerical differences when data is removed) + assert_allclose(pgram[f==w], ampl, rtol=5e-2) + + def test_precenter(self): + # Test if precenter gives the same result as manually precentering + # (for a very simple offset) + + # Input parameters + ampl = 2. + w = 1. + phi = 0.5 * np.pi + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + offset = 0.15 # Offset to be subtracted in pre-centering + + # Randomly select a fraction of an array with timesteps + rng = np.random.RandomState(2353425) + r = rng.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + y = ampl * np.sin(w*t + phi) + offset + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + pgram = lombscargle(t, y, f, precenter=True) + pgram2 = lombscargle(t, y - y.mean(), f, precenter=False) + + # check if centering worked + assert_allclose(pgram, pgram2) + + # do this again, but with floating_mean=True + + # Calculate Lomb-Scargle periodogram + pgram = lombscargle(t, y, f, precenter=True, floating_mean=True) + pgram2 = lombscargle(t, y - y.mean(), f, precenter=False, floating_mean=True) + + # check if centering worked + assert_allclose(pgram, pgram2) + + def test_normalize(self): + # Test normalize option of Lomb-Scarge. + + # Input parameters + ampl = 2. + w = 1. + phi = 0.5 * np.pi + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + + # Randomly select a fraction of an array with timesteps + rng = np.random.RandomState(2353425) + r = rng.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + y = ampl * np.sin(w*t + phi) + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + pgram = lombscargle(t, y, f) + pgram2 = lombscargle(t, y, f, normalize=True) + + # Calculate the scale to convert from unnormalized to normalized + weights = np.ones_like(t)/float(t.shape[0]) + YY_hat = (weights * y * y).sum() + YY = YY_hat # correct formula for floating_mean=False + scale_to_use = 2/(YY*t.shape[0]) + + # check if normalization works as expected + assert_allclose(pgram * scale_to_use, pgram2) + assert_allclose(np.max(pgram2), 1.0) + + def test_wrong_shape(self): + + # different length t and y + t = np.linspace(0, 1, 1) + y = np.linspace(0, 1, 2) + f = np.linspace(0, 1, 3) + 0.1 + assert_raises(ValueError, lombscargle, t, y, f) + + # t is 2D, with both axes length > 1 + t = np.repeat(np.expand_dims(np.linspace(0, 1, 2), 1), 2, axis=1) + y = np.linspace(0, 1, 2) + f = np.linspace(0, 1, 3) + 0.1 + assert_raises(ValueError, lombscargle, t, y, f) + + # y is 2D, with both axes length > 1 + t = np.linspace(0, 1, 2) + y = np.repeat(np.expand_dims(np.linspace(0, 1, 2), 1), 2, axis=1) + f = np.linspace(0, 1, 3) + 0.1 + assert_raises(ValueError, lombscargle, t, y, f) + + # f is 2D, with both axes length > 1 + t = np.linspace(0, 1, 2) + y = np.linspace(0, 1, 2) + f = np.repeat(np.expand_dims(np.linspace(0, 1, 3), 1) + 0.1, 2, axis=1) + assert_raises(ValueError, lombscargle, t, y, f) + + # weights is 2D, with both axes length > 1 + t = np.linspace(0, 1, 2) + y = np.linspace(0, 1, 2) + f = np.linspace(0, 1, 3) + 0.1 + weights = np.repeat(np.expand_dims(np.linspace(0, 1, 2), 1), 2, axis=1) + assert_raises(ValueError, lombscargle, t, y, f, weights=weights) + + def test_lombscargle_atan_vs_atan2(self): + # https://github.com/scipy/scipy/issues/3787 + # This raised a ZeroDivisionError. + t = np.linspace(0, 10, 1000, endpoint=False) + y = np.sin(4*t) + f = np.linspace(0, 50, 500, endpoint=False) + 0.1 + lombscargle(t, y, f*2*np.pi) + + def test_wrong_shape_weights(self): + # Weights must be the same shape as t + + t = np.linspace(0, 1, 1) + y = np.linspace(0, 1, 1) + f = np.linspace(0, 1, 3) + 0.1 + weights = np.linspace(1, 2, 2) + assert_raises(ValueError, lombscargle, t, y, f, weights=weights) + + def test_zero_division_weights(self): + # Weights cannot sum to 0 + + t = np.zeros(1) + y = np.zeros(1) + f = np.ones(1) + weights = np.zeros(1) + assert_raises(ValueError, lombscargle, t, y, f, weights=weights) + + def test_normalize_parameter(self): + # Test the validity of the normalize parameter input + + # Input parameters + ampl = 2. + w = 1. + phi = 0 + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + + # Randomly select a fraction of an array with timesteps + rng = np.random.RandomState(2353425) + r = rng.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + y = ampl * np.sin(w*t + phi) + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # check each of the valid inputs + pgram_false = lombscargle(t, y, f, normalize=False) + pgram_true = lombscargle(t, y, f, normalize=True) + pgram_power = lombscargle(t, y, f, normalize='power') + pgram_norm = lombscargle(t, y, f, normalize='normalize') + pgram_amp = lombscargle(t, y, f, normalize='amplitude') + + # validate the results that should be the same + assert_allclose(pgram_false, pgram_power) + assert_allclose(pgram_true, pgram_norm) + + # validate that the power and norm outputs are proper wrt each other + weights = np.ones_like(y)/float(y.shape[0]) + YY_hat = (weights * y * y).sum() + YY = YY_hat # correct formula for floating_mean=False + assert_allclose(pgram_power * 2.0 / (float(t.shape[0]) * YY), pgram_norm) + + # validate that the amp output is correct for the given input + f_i = np.where(f==w)[0][0] + assert_allclose(np.abs(pgram_amp[f_i]), ampl) + + # check invalid inputs + # 1) a string that is not allowed + assert_raises(ValueError, lombscargle, t, y, f, normalize='lomb') + # 2) something besides a bool or str + assert_raises(ValueError, lombscargle, t, y, f, normalize=2) + + def test_offset_removal(self): + # Verify that the amplitude is the same, even with an offset + # must use floating_mean=True, otherwise it will not remove an offset + + # Input parameters + ampl = 2. + w = 1. + phi = 0.5 * np.pi + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + offset = 2.15 # Large offset + + # Randomly select a fraction of an array with timesteps + rng = np.random.RandomState(2353425) + r = rng.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + y = ampl * np.sin(w*t + phi) + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + pgram = lombscargle(t, y, f, floating_mean=True) + pgram_offset = lombscargle(t, y + offset, f, floating_mean=True) + + # check if offset removal works as expected + assert_allclose(pgram, pgram_offset) + + def test_floating_mean_false(self): + # Verify that when disabling the floating_mean, the calculations are correct + + # Input parameters + ampl = 2. + w = 1. + phi = 0 + nin = 1000 + nout = 1000 + p = 0.7 # Fraction of points to select + offset = 2 # Large offset + + # Randomly select a fraction of an array with timesteps + rng = np.random.RandomState(2353425) + r = rng.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a cos wave for the selected times + y = ampl * np.cos(w*t + phi) + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + pgram = lombscargle(t, y, f, normalize=True, floating_mean=False) + pgram_offset = lombscargle(t, y + offset, f, normalize=True, + floating_mean=False) + + # check if disabling floating_mean works as expected + # nearly-zero for no offset, exact value will change based on seed + assert(pgram[0] < 0.01) + # significant value with offset, exact value will change based on seed + assert(pgram_offset[0] > 0.5) + + def test_amplitude_is_correct(self): + # Verify that the amplitude is correct (when normalize='amplitude') + + # Input parameters + ampl = 2. + w = 1. + phi = 0.12 + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + offset = 2.15 # Large offset + + # Randomly select a fraction of an array with timesteps + rng = np.random.RandomState(2353425) + r = rng.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + y = ampl * np.cos(w*t + phi) + offset + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Get the index of where the exact result should be + f_indx = np.where(f==w)[0][0] + + # Calculate Lomb-Scargle periodogram (amplitude + phase) + pgram = lombscargle(t, y, f, normalize='amplitude', floating_mean=True) + + # Check if amplitude is correct + assert_allclose(np.abs(pgram[f_indx]), ampl) + + # Check if phase is correct + # (phase angle is the negative of the phase offset) + assert_allclose(-np.angle(pgram[f_indx]), phi) + + def test_negative_weight(self): + # Test that a negative weight produces an error + + t = np.zeros(1) + y = np.zeros(1) + f = np.ones(1) + weights = -np.ones(1) + assert_raises(ValueError, lombscargle, t, y, f, weights=weights) + + def test_list_input(self): + # Test that input can be passsed in as lists and with a numerical issue + # https://github.com/scipy/scipy/issues/8787 + + t = [1.98201652e+09, 1.98201752e+09, 1.98201852e+09, 1.98201952e+09, + 1.98202052e+09, 1.98202152e+09, 1.98202252e+09, 1.98202352e+09, + 1.98202452e+09, 1.98202552e+09, 1.98202652e+09, 1.98202752e+09, + 1.98202852e+09, 1.98202952e+09, 1.98203052e+09, 1.98203152e+09, + 1.98203252e+09, 1.98203352e+09, 1.98203452e+09, 1.98203552e+09, + 1.98205452e+09, 1.98205552e+09, 1.98205652e+09, 1.98205752e+09, + 1.98205852e+09, 1.98205952e+09, 1.98206052e+09, 1.98206152e+09, + 1.98206252e+09, 1.98206352e+09, 1.98206452e+09, 1.98206552e+09, + 1.98206652e+09, 1.98206752e+09, 1.98206852e+09, 1.98206952e+09, + 1.98207052e+09, 1.98207152e+09, 1.98207252e+09, 1.98207352e+09, + 1.98209652e+09, 1.98209752e+09, 1.98209852e+09, 1.98209952e+09, + 1.98210052e+09, 1.98210152e+09, 1.98210252e+09, 1.98210352e+09, + 1.98210452e+09, 1.98210552e+09, 1.98210652e+09, 1.98210752e+09, + 1.98210852e+09, 1.98210952e+09, 1.98211052e+09, 1.98211152e+09, + 1.98211252e+09, 1.98211352e+09, 1.98211452e+09, 1.98211552e+09, + 1.98217252e+09, 1.98217352e+09, 1.98217452e+09, 1.98217552e+09, + 1.98217652e+09, 1.98217752e+09, 1.98217852e+09, 1.98217952e+09, + 1.98218052e+09, 1.98218152e+09, 1.98218252e+09, 1.98218352e+09, + 1.98218452e+09, 1.98218552e+09, 1.98218652e+09, 1.98218752e+09, + 1.98218852e+09, 1.98218952e+09, 1.98219052e+09, 1.98219152e+09, + 1.98219352e+09, 1.98219452e+09, 1.98219552e+09, 1.98219652e+09, + 1.98219752e+09, 1.98219852e+09, 1.98219952e+09, 1.98220052e+09, + 1.98220152e+09, 1.98220252e+09, 1.98220352e+09, 1.98220452e+09, + 1.98220552e+09, 1.98220652e+09, 1.98220752e+09, 1.98220852e+09, + 1.98220952e+09, 1.98221052e+09, 1.98221152e+09, 1.98221252e+09, + 1.98222752e+09, 1.98222852e+09, 1.98222952e+09, 1.98223052e+09, + 1.98223152e+09, 1.98223252e+09, 1.98223352e+09, 1.98223452e+09, + 1.98223552e+09, 1.98223652e+09, 1.98223752e+09, 1.98223852e+09, + 1.98223952e+09, 1.98224052e+09, 1.98224152e+09, 1.98224252e+09, + 1.98224352e+09, 1.98224452e+09, 1.98224552e+09, 1.98224652e+09, + 1.98224752e+09] + y = [2.97600000e+03, 3.18200000e+03, 3.74900000e+03, 4.53500000e+03, + 5.43300000e+03, 6.38000000e+03, 7.34000000e+03, 8.29200000e+03, + 9.21900000e+03, 1.01120000e+04, 1.09620000e+04, 1.17600000e+04, + 1.25010000e+04, 1.31790000e+04, 1.37900000e+04, 1.43290000e+04, + 1.47940000e+04, 1.51800000e+04, 1.54870000e+04, 1.57110000e+04, + 5.74200000e+03, 4.82300000e+03, 3.99100000e+03, 3.33600000e+03, + 2.99600000e+03, 3.08400000e+03, 3.56700000e+03, 4.30700000e+03, + 5.18200000e+03, 6.11900000e+03, 7.07900000e+03, 8.03400000e+03, + 8.97000000e+03, 9.87300000e+03, 1.07350000e+04, 1.15480000e+04, + 1.23050000e+04, 1.30010000e+04, 1.36300000e+04, 1.41890000e+04, + 6.00000000e+03, 5.06800000e+03, 4.20500000e+03, 3.49000000e+03, + 3.04900000e+03, 3.01600000e+03, 3.40400000e+03, 4.08800000e+03, + 4.93500000e+03, 5.86000000e+03, 6.81700000e+03, 7.77500000e+03, + 8.71800000e+03, 9.63100000e+03, 1.05050000e+04, 1.13320000e+04, + 1.21050000e+04, 1.28170000e+04, 1.34660000e+04, 1.40440000e+04, + 1.32730000e+04, 1.26040000e+04, 1.18720000e+04, 1.10820000e+04, + 1.02400000e+04, 9.35300000e+03, 8.43000000e+03, 7.48100000e+03, + 6.52100000e+03, 5.57000000e+03, 4.66200000e+03, 3.85400000e+03, + 3.24600000e+03, 2.97900000e+03, 3.14700000e+03, 3.68800000e+03, + 4.45900000e+03, 5.35000000e+03, 6.29400000e+03, 7.25400000e+03, + 9.13800000e+03, 1.00340000e+04, 1.08880000e+04, 1.16910000e+04, + 1.24370000e+04, 1.31210000e+04, 1.37380000e+04, 1.42840000e+04, + 1.47550000e+04, 1.51490000e+04, 1.54630000e+04, 1.56950000e+04, + 1.58430000e+04, 1.59070000e+04, 1.58860000e+04, 1.57800000e+04, + 1.55910000e+04, 1.53190000e+04, 1.49650000e+04, 1.45330000e+04, + 3.01000000e+03, 3.05900000e+03, 3.51200000e+03, 4.23400000e+03, + 5.10000000e+03, 6.03400000e+03, 6.99300000e+03, 7.95000000e+03, + 8.88800000e+03, 9.79400000e+03, 1.06600000e+04, 1.14770000e+04, + 1.22400000e+04, 1.29410000e+04, 1.35770000e+04, 1.41430000e+04, + 1.46350000e+04, 1.50500000e+04, 1.53850000e+04, 1.56400000e+04, + 1.58110000e+04] + + periods = np.linspace(400, 120, 1000) + angular_freq = 2 * np.pi / periods + + lombscargle(t, y, angular_freq, precenter=True, normalize=True) + + def test_zero_freq(self): + # Verify that function works when freqs includes 0 + # The value at f=0 will depend on the seed + + # Input parameters + ampl = 2. + w = 1. + phi = 0.12 + nin = 100 + nout = 1001 + p = 0.7 # Fraction of points to select + offset = 0 + + # Randomly select a fraction of an array with timesteps + rng = np.random.RandomState(2353425) + r = rng.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + y = ampl * np.cos(w*t + phi) + offset + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0, 10., nout) + + # Calculate Lomb-Scargle periodogram + pgram = lombscargle(t, y, f, normalize=True, floating_mean=True) + + # exact value will change based on seed + # testing to make sure it is very small + assert(pgram[0] < 1e-4) + + def test_simple_div_zero(self): + # these are bare-minimum examples that would, without the eps adjustments, + # cause division-by-zero errors + + # first, test with example that will cause first SS sum to be 0.0 + t = [t + 1 for t in range(0, 32)] + y = np.ones(len(t)) + freqs = [2.0*np.pi] * 2 # must have 2+ elements + lombscargle(t, y, freqs) + + # second, test with example that will cause first CC sum to be 0.0 + t = [t*4 + 1 for t in range(0, 32)] + y = np.ones(len(t)) + freqs = [np.pi/2.0] * 2 # must have 2+ elements + + lombscargle(t, y, freqs) + + +class TestSTFT: + @pytest.mark.thread_unsafe + def test_input_validation(self): + + def chk_VE(match): + """Assert for a ValueError matching regexp `match`. + + This little wrapper allows a more concise code layout. + """ + return pytest.raises(ValueError, match=match) + + # Checks for check_COLA(): + with chk_VE('nperseg must be a positive integer'): + check_COLA('hann', -10, 0) + with chk_VE('noverlap must be less than nperseg.'): + check_COLA('hann', 10, 20) + with chk_VE('window must be 1-D'): + check_COLA(np.ones((2, 2)), 10, 0) + with chk_VE('window must have length of nperseg'): + check_COLA(np.ones(20), 10, 0) + + # Checks for check_NOLA(): + with chk_VE('nperseg must be a positive integer'): + check_NOLA('hann', -10, 0) + with chk_VE('noverlap must be less than nperseg'): + check_NOLA('hann', 10, 20) + with chk_VE('window must be 1-D'): + check_NOLA(np.ones((2, 2)), 10, 0) + with chk_VE('window must have length of nperseg'): + check_NOLA(np.ones(20), 10, 0) + with chk_VE('noverlap must be a nonnegative integer'): + check_NOLA('hann', 64, -32) + + x = np.zeros(1024) + z = stft(x)[2] + + # Checks for stft(): + with chk_VE('window must be 1-D'): + stft(x, window=np.ones((2, 2))) + with chk_VE('value specified for nperseg is different ' + + 'from length of window'): + stft(x, window=np.ones(10), nperseg=256) + with chk_VE('nperseg must be a positive integer'): + stft(x, nperseg=-256) + with chk_VE('noverlap must be less than nperseg.'): + stft(x, nperseg=256, noverlap=1024) + with chk_VE('nfft must be greater than or equal to nperseg.'): + stft(x, nperseg=256, nfft=8) + + # Checks for istft(): + with chk_VE('Input stft must be at least 2d!'): + istft(x) + with chk_VE('window must be 1-D'): + istft(z, window=np.ones((2, 2))) + with chk_VE('window must have length of 256'): + istft(z, window=np.ones(10), nperseg=256) + with chk_VE('nperseg must be a positive integer'): + istft(z, nperseg=-256) + with chk_VE('noverlap must be less than nperseg.'): + istft(z, nperseg=256, noverlap=1024) + with chk_VE('nfft must be greater than or equal to nperseg.'): + istft(z, nperseg=256, nfft=8) + with pytest.warns(UserWarning, match="NOLA condition failed, " + + "STFT may not be invertible"): + istft(z, nperseg=256, noverlap=0, window='hann') + with chk_VE('Must specify differing time and frequency axes!'): + istft(z, time_axis=0, freq_axis=0) + + # Checks for _spectral_helper(): + with chk_VE("Unknown value for mode foo, must be one of: " + + r"\{'psd', 'stft'\}"): + _spectral_helper(x, x, mode='foo') + with chk_VE("x and y must be equal if mode is 'stft'"): + _spectral_helper(x[:512], x[512:], mode='stft') + with chk_VE("Unknown boundary option 'foo', must be one of: " + + r"\['even', 'odd', 'constant', 'zeros', None\]"): + _spectral_helper(x, x, boundary='foo') + + scaling = "not_valid" + with chk_VE(fr"Parameter {scaling=} not in \['spectrum', 'psd'\]!"): + stft(x, scaling=scaling) + with chk_VE(fr"Parameter {scaling=} not in \['spectrum', 'psd'\]!"): + istft(z, scaling=scaling) + + def test_check_COLA(self): + settings = [ + ('boxcar', 10, 0), + ('boxcar', 10, 9), + ('bartlett', 51, 26), + ('hann', 256, 128), + ('hann', 256, 192), + ('blackman', 300, 200), + (('tukey', 0.5), 256, 64), + ('hann', 256, 255), + ] + + for setting in settings: + msg = '{}, {}, {}'.format(*setting) + assert_equal(True, check_COLA(*setting), err_msg=msg) + + def test_check_NOLA(self): + settings_pass = [ + ('boxcar', 10, 0), + ('boxcar', 10, 9), + ('boxcar', 10, 7), + ('bartlett', 51, 26), + ('bartlett', 51, 10), + ('hann', 256, 128), + ('hann', 256, 192), + ('hann', 256, 37), + ('blackman', 300, 200), + ('blackman', 300, 123), + (('tukey', 0.5), 256, 64), + (('tukey', 0.5), 256, 38), + ('hann', 256, 255), + ('hann', 256, 39), + ] + for setting in settings_pass: + msg = '{}, {}, {}'.format(*setting) + assert_equal(True, check_NOLA(*setting), err_msg=msg) + + w_fail = np.ones(16) + w_fail[::2] = 0 + settings_fail = [ + (w_fail, len(w_fail), len(w_fail) // 2), + ('hann', 64, 0), + ] + for setting in settings_fail: + msg = '{}, {}, {}'.format(*setting) + assert_equal(False, check_NOLA(*setting), err_msg=msg) + + def test_average_all_segments(self): + rng = np.random.RandomState(1234) + x = rng.randn(1024) + + fs = 1.0 + window = 'hann' + nperseg = 16 + noverlap = 8 + + # Compare twosided, because onesided welch doubles non-DC terms to + # account for power at negative frequencies. stft doesn't do this, + # because it breaks invertibility. + f, _, Z = stft(x, fs, window, nperseg, noverlap, padded=False, + return_onesided=False, boundary=None) + fw, Pw = welch(x, fs, window, nperseg, noverlap, return_onesided=False, + scaling='spectrum', detrend=False) + + assert_allclose(f, fw) + assert_allclose(np.mean(np.abs(Z)**2, axis=-1), Pw) + + def test_permute_axes(self): + rng = np.random.RandomState(1234) + x = rng.randn(1024) + + fs = 1.0 + window = 'hann' + nperseg = 16 + noverlap = 8 + + f1, t1, Z1 = stft(x, fs, window, nperseg, noverlap) + f2, t2, Z2 = stft(x.reshape((-1, 1, 1)), fs, window, nperseg, noverlap, + axis=0) + + t3, x1 = istft(Z1, fs, window, nperseg, noverlap) + t4, x2 = istft(Z2.T, fs, window, nperseg, noverlap, time_axis=0, + freq_axis=-1) + + assert_allclose(f1, f2) + assert_allclose(t1, t2) + assert_allclose(t3, t4) + assert_allclose(Z1, Z2[:, 0, 0, :]) + assert_allclose(x1, x2[:, 0, 0]) + + @pytest.mark.parametrize('scaling', ['spectrum', 'psd']) + def test_roundtrip_real(self, scaling): + rng = np.random.RandomState(1234) + + settings = [ + ('boxcar', 100, 10, 0), # Test no overlap + ('boxcar', 100, 10, 9), # Test high overlap + ('bartlett', 101, 51, 26), # Test odd nperseg + ('hann', 1024, 256, 128), # Test defaults + (('tukey', 0.5), 1152, 256, 64), # Test Tukey + ('hann', 1024, 256, 255), # Test overlapped hann + ] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*rng.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=False, + scaling=scaling) + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, scaling=scaling) + + msg = f'{window}, {noverlap}' + assert_allclose(t, tr, err_msg=msg) + assert_allclose(x, xr, err_msg=msg) + + @pytest.mark.thread_unsafe + def test_roundtrip_not_nola(self): + rng = np.random.RandomState(1234) + + w_fail = np.ones(16) + w_fail[::2] = 0 + settings = [ + (w_fail, 256, len(w_fail), len(w_fail) // 2), + ('hann', 256, 64, 0), + ] + + for window, N, nperseg, noverlap in settings: + msg = f'{window}, {N}, {nperseg}, {noverlap}' + assert not check_NOLA(window, nperseg, noverlap), msg + + t = np.arange(N) + x = 10 * rng.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True, + boundary='zeros') + with pytest.warns(UserWarning, match='NOLA'): + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, boundary=True) + + assert np.allclose(t, tr[:len(t)]), msg + assert not np.allclose(x, xr[:len(x)]), msg + + def test_roundtrip_nola_not_cola(self): + rng = np.random.RandomState(1234) + + settings = [ + ('boxcar', 100, 10, 3), # NOLA True, COLA False + ('bartlett', 101, 51, 37), # NOLA True, COLA False + ('hann', 1024, 256, 127), # NOLA True, COLA False + (('tukey', 0.5), 1152, 256, 14), # NOLA True, COLA False + ('hann', 1024, 256, 5), # NOLA True, COLA False + ] + + for window, N, nperseg, noverlap in settings: + msg = f'{window}, {nperseg}, {noverlap}' + assert check_NOLA(window, nperseg, noverlap), msg + assert not check_COLA(window, nperseg, noverlap), msg + + t = np.arange(N) + x = 10 * rng.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True, + boundary='zeros') + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, boundary=True) + + msg = f'{window}, {noverlap}' + assert_allclose(t, tr[:len(t)], err_msg=msg) + assert_allclose(x, xr[:len(x)], err_msg=msg) + + def test_roundtrip_float32(self): + rng = np.random.RandomState(1234) + + settings = [('hann', 1024, 256, 128)] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*rng.randn(t.size) + x = x.astype(np.float32) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=False) + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window) + + msg = f'{window}, {noverlap}' + assert_allclose(t, t, err_msg=msg) + assert_allclose(x, xr, err_msg=msg, rtol=1e-4, atol=1e-5) + assert_(x.dtype == xr.dtype) + + @pytest.mark.thread_unsafe + @pytest.mark.parametrize('scaling', ['spectrum', 'psd']) + def test_roundtrip_complex(self, scaling): + rng = np.random.RandomState(1234) + + settings = [ + ('boxcar', 100, 10, 0), # Test no overlap + ('boxcar', 100, 10, 9), # Test high overlap + ('bartlett', 101, 51, 26), # Test odd nperseg + ('hann', 1024, 256, 128), # Test defaults + (('tukey', 0.5), 1152, 256, 64), # Test Tukey + ('hann', 1024, 256, 255), # Test overlapped hann + ] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*rng.randn(t.size) + 10j*rng.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=False, + return_onesided=False, scaling=scaling) + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, input_onesided=False, + scaling=scaling) + + msg = f'{window}, {nperseg}, {noverlap}' + assert_allclose(t, tr, err_msg=msg) + assert_allclose(x, xr, err_msg=msg) + + # Check that asking for onesided switches to twosided + with suppress_warnings() as sup: + sup.filter(UserWarning, + "Input data is complex, switching to return_onesided=False") + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=False, + return_onesided=True, scaling=scaling) + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, input_onesided=False, scaling=scaling) + + msg = f'{window}, {nperseg}, {noverlap}' + assert_allclose(t, tr, err_msg=msg) + assert_allclose(x, xr, err_msg=msg) + + def test_roundtrip_boundary_extension(self): + rng = np.random.RandomState(1234) + + # Test against boxcar, since window is all ones, and thus can be fully + # recovered with no boundary extension + + settings = [ + ('boxcar', 100, 10, 0), # Test no overlap + ('boxcar', 100, 10, 9), # Test high overlap + ] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*rng.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True, + boundary=None) + + _, xr = istft(zz, noverlap=noverlap, window=window, boundary=False) + + for boundary in ['even', 'odd', 'constant', 'zeros']: + _, _, zz_ext = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True, + boundary=boundary) + + _, xr_ext = istft(zz_ext, noverlap=noverlap, window=window, + boundary=True) + + msg = f'{window}, {noverlap}, {boundary}' + assert_allclose(x, xr, err_msg=msg) + assert_allclose(x, xr_ext, err_msg=msg) + + def test_roundtrip_padded_signal(self): + rng = np.random.RandomState(1234) + + settings = [ + ('boxcar', 101, 10, 0), + ('hann', 1000, 256, 128), + ] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*rng.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True) + + tr, xr = istft(zz, noverlap=noverlap, window=window) + + msg = f'{window}, {noverlap}' + # Account for possible zero-padding at the end + assert_allclose(t, tr[:t.size], err_msg=msg) + assert_allclose(x, xr[:x.size], err_msg=msg) + + def test_roundtrip_padded_FFT(self): + rng = np.random.RandomState(1234) + + settings = [ + ('hann', 1024, 256, 128, 512), + ('hann', 1024, 256, 128, 501), + ('boxcar', 100, 10, 0, 33), + (('tukey', 0.5), 1152, 256, 64, 1024), + ] + + for window, N, nperseg, noverlap, nfft in settings: + t = np.arange(N) + x = 10*rng.randn(t.size) + xc = x*np.exp(1j*np.pi/4) + + # real signal + _, _, z = stft(x, nperseg=nperseg, noverlap=noverlap, nfft=nfft, + window=window, detrend=None, padded=True) + + # complex signal + _, _, zc = stft(xc, nperseg=nperseg, noverlap=noverlap, nfft=nfft, + window=window, detrend=None, padded=True, + return_onesided=False) + + tr, xr = istft(z, nperseg=nperseg, noverlap=noverlap, nfft=nfft, + window=window) + + tr, xcr = istft(zc, nperseg=nperseg, noverlap=noverlap, nfft=nfft, + window=window, input_onesided=False) + + msg = f'{window}, {noverlap}' + assert_allclose(t, tr, err_msg=msg) + assert_allclose(x, xr, err_msg=msg) + assert_allclose(xc, xcr, err_msg=msg) + + def test_axis_rolling(self): + rng = np.random.RandomState(1234) + + x_flat = rng.randn(1024) + _, _, z_flat = stft(x_flat) + + for a in range(3): + newshape = [1,]*3 + newshape[a] = -1 + x = x_flat.reshape(newshape) + + _, _, z_plus = stft(x, axis=a) # Positive axis index + _, _, z_minus = stft(x, axis=a-x.ndim) # Negative axis index + + assert_equal(z_flat, z_plus.squeeze(), err_msg=a) + assert_equal(z_flat, z_minus.squeeze(), err_msg=a-x.ndim) + + # z_flat has shape [n_freq, n_time] + + # Test vs. transpose + _, x_transpose_m = istft(z_flat.T, time_axis=-2, freq_axis=-1) + _, x_transpose_p = istft(z_flat.T, time_axis=0, freq_axis=1) + + assert_allclose(x_flat, x_transpose_m, err_msg='istft transpose minus') + assert_allclose(x_flat, x_transpose_p, err_msg='istft transpose plus') + + def test_roundtrip_scaling(self): + """Verify behavior of scaling parameter. """ + # Create 1024 sample cosine signal with amplitude 2: + X = np.zeros(513, dtype=complex) + X[256] = 1024 + x = np.fft.irfft(X) + power_x = sum(x**2) / len(x) # power of signal x is 2 + + # Calculate magnitude-scaled STFT: + Zs = stft(x, boundary='even', scaling='spectrum')[2] + + # Test round trip: + x1 = istft(Zs, boundary=True, scaling='spectrum')[1] + assert_allclose(x1, x) + + # For a Hann-windowed 256 sample length FFT, we expect a peak at + # frequency 64 (since it is 1/4 the length of X) with a height of 1 + # (half the amplitude). A Hann window of a perfectly centered sine has + # the magnitude [..., 0, 0, 0.5, 1, 0.5, 0, 0, ...]. + # Note that in this case the 'even' padding works for the beginning + # but not for the end of the STFT. + assert_allclose(abs(Zs[63, :-1]), 0.5) + assert_allclose(abs(Zs[64, :-1]), 1) + assert_allclose(abs(Zs[65, :-1]), 0.5) + # All other values should be zero: + Zs[63:66, :-1] = 0 + # Note since 'rtol' does not have influence here, atol needs to be set: + assert_allclose(Zs[:, :-1], 0, atol=np.finfo(Zs.dtype).resolution) + + # Calculate two-sided psd-scaled STFT: + # - using 'even' padding since signal is axis symmetric - this ensures + # stationary behavior on the boundaries + # - using the two-sided transform allows determining the spectral + # power by `sum(abs(Zp[:, k])**2) / len(f)` for the k-th time slot. + Zp = stft(x, return_onesided=False, boundary='even', scaling='psd')[2] + + # Calculate spectral power of Zd by summing over the frequency axis: + psd_Zp = np.sum(Zp.real**2 + Zp.imag**2, axis=0) / Zp.shape[0] + # Spectral power of Zp should be equal to the signal's power: + assert_allclose(psd_Zp, power_x) + + # Test round trip: + x1 = istft(Zp, input_onesided=False, boundary=True, scaling='psd')[1] + assert_allclose(x1, x) + + # The power of the one-sided psd-scaled STFT can be determined + # analogously (note that the two sides are not of equal shape): + Zp0 = stft(x, return_onesided=True, boundary='even', scaling='psd')[2] + + # Since x is real, its Fourier transform is conjugate symmetric, i.e., + # the missing 'second side' can be expressed through the 'first side': + Zp1 = np.conj(Zp0[-2:0:-1, :]) # 'second side' is conjugate reversed + assert_allclose(Zp[:129, :], Zp0) + assert_allclose(Zp[129:, :], Zp1) + + # Calculate the spectral power: + s2 = (np.sum(Zp0.real ** 2 + Zp0.imag ** 2, axis=0) + + np.sum(Zp1.real ** 2 + Zp1.imag ** 2, axis=0)) + psd_Zp01 = s2 / (Zp0.shape[0] + Zp1.shape[0]) + assert_allclose(psd_Zp01, power_x) + + # Test round trip: + x1 = istft(Zp0, input_onesided=True, boundary=True, scaling='psd')[1] + assert_allclose(x1, x) + + +class TestSampledSpectralRepresentations: + """Check energy/power relations from `Spectral Analysis` section in the user guide. + + A 32 sample cosine signal is used to compare the numerical to the expected results + stated in :ref:`tutorial_SpectralAnalysis` in + file ``doc/source/tutorial/signal.rst`` + """ + n: int = 32 #: number of samples + T: float = 1/16 #: sampling interval + a_ref: float = 3 #: amplitude of reference + l_a: int = 3 #: index in fft for defining frequency of test signal + + x_ref: np.ndarray #: reference signal + X_ref: np.ndarray #: two-sided FFT of x_ref + E_ref: float #: energy of signal + P_ref: float #: power of signal + + def setup_method(self): + """Create Cosine signal with amplitude a from spectrum. """ + f = rfftfreq(self.n, self.T) + X_ref = np.zeros_like(f) + self.l_a = 3 + X_ref[self.l_a] = self.a_ref/2 * self.n # set amplitude + self.x_ref = irfft(X_ref) + self.X_ref = fft(self.x_ref) + + # Closed form expression for continuous-time signal: + self.E_ref = self.tau * self.a_ref**2 / 2 # energy of signal + self.P_ref = self.a_ref**2 / 2 # power of signal + + @property + def tau(self) -> float: + """Duration of signal. """ + return self.n * self.T + + @property + def delta_f(self) -> float: + """Bin width """ + return 1 / (self.n * self.T) + + def test_reference_signal(self): + """Test energy and power formulas. """ + # Verify that amplitude is a: + assert_allclose(2*self.a_ref, np.ptp(self.x_ref), rtol=0.1) + # Verify that energy expression for sampled signal: + assert_allclose(self.T * sum(self.x_ref ** 2), self.E_ref) + + # Verify that spectral energy and power formulas are correct: + sum_X_ref_squared = sum(self.X_ref.real**2 + self.X_ref.imag**2) + assert_allclose(self.T/self.n * sum_X_ref_squared, self.E_ref) + assert_allclose(1/self.n**2 * sum_X_ref_squared, self.P_ref) + + def test_windowed_DFT(self): + """Verify spectral representations of windowed DFT. + + Furthermore, the scalings of `periodogram` and `welch` are verified. + """ + w = hann(self.n, sym=False) + c_amp, c_rms = abs(sum(w)), np.sqrt(sum(w.real**2 + w.imag**2)) + Xw = fft(self.x_ref*w) # unnormalized windowed DFT + + # Verify that the *spectrum* peak is consistent: + assert_allclose(self.tau * Xw[self.l_a] / c_amp, self.a_ref * self.tau / 2) + # Verify that the *amplitude spectrum* peak is consistent: + assert_allclose(Xw[self.l_a] / c_amp, self.a_ref/2) + + # Verify spectral power/energy equals signal's power/energy: + X_ESD = self.tau * self.T * abs(Xw / c_rms)**2 # Energy Spectral Density + X_PSD = self.T * abs(Xw / c_rms)**2 # Power Spectral Density + assert_allclose(self.delta_f * sum(X_ESD), self.E_ref) + assert_allclose(self.delta_f * sum(X_PSD), self.P_ref) + + # Verify scalings of periodogram: + kw = dict(fs=1/self.T, window=w, detrend=False, return_onesided=False) + _, P_mag = periodogram(self.x_ref, scaling='spectrum', **kw) + _, P_psd = periodogram(self.x_ref, scaling='density', **kw) + + # Verify that periodogram calculates a squared magnitude spectrum: + float_res = np.finfo(P_mag.dtype).resolution + assert_allclose(P_mag, abs(Xw/c_amp)**2, atol=float_res*max(P_mag)) + # Verify that periodogram calculates a PSD: + assert_allclose(P_psd, X_PSD, atol=float_res*max(P_psd)) + + # Ensure that scaling of welch is the same as of periodogram: + kw = dict(nperseg=len(self.x_ref), noverlap=0, **kw) + assert_allclose(welch(self.x_ref, scaling='spectrum', **kw)[1], P_mag, + atol=float_res*max(P_mag)) + assert_allclose(welch(self.x_ref, scaling='density', **kw)[1], P_psd, + atol=float_res*max(P_psd)) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_waveforms.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_waveforms.py new file mode 100644 index 0000000000000000000000000000000000000000..b30f7b9ceba80822f05ecfcabaad76fb87d5f335 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_waveforms.py @@ -0,0 +1,380 @@ +import numpy as np +from pytest import raises as assert_raises +from scipy._lib._array_api import ( + assert_almost_equal, xp_assert_equal, xp_assert_close +) + +import scipy.signal._waveforms as waveforms + + +# These chirp_* functions are the instantaneous frequencies of the signals +# returned by chirp(). + +def chirp_linear(t, f0, f1, t1): + f = f0 + (f1 - f0) * t / t1 + return f + + +def chirp_quadratic(t, f0, f1, t1, vertex_zero=True): + if vertex_zero: + f = f0 + (f1 - f0) * t**2 / t1**2 + else: + f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2 + return f + + +def chirp_geometric(t, f0, f1, t1): + f = f0 * (f1/f0)**(t/t1) + return f + + +def chirp_hyperbolic(t, f0, f1, t1): + f = f0*f1*t1 / ((f0 - f1)*t + f1*t1) + return f + + +def compute_frequency(t, theta): + """ + Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t). + """ + # Assume theta and t are 1-D NumPy arrays. + # Assume that t is uniformly spaced. + dt = t[1] - t[0] + f = np.diff(theta)/(2*np.pi) / dt + tf = 0.5*(t[1:] + t[:-1]) + return tf, f + + +class TestChirp: + + def test_linear_at_zero(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear') + assert_almost_equal(w, 1.0) + + def test_linear_freq_01(self): + method = 'linear' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 100) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_linear_freq_02(self): + method = 'linear' + f0 = 200.0 + f1 = 100.0 + t1 = 10.0 + t = np.linspace(0, t1, 100) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_linear_complex_power(self): + method = 'linear' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 100) + w_real = waveforms.chirp(t, f0, t1, f1, method, complex=False) + w_complex = waveforms.chirp(t, f0, t1, f1, method, complex=True) + w_pwr_r = np.var(w_real) + w_pwr_c = np.var(w_complex) + + # Making sure that power of the real part is not affected with + # complex conversion operation + err = w_pwr_r - np.real(w_pwr_c) + + assert(err < 1e-6) + + def test_linear_complex_at_zero(self): + w = waveforms.chirp(t=0, f0=-10.0, f1=1.0, t1=1.0, method='linear', + complex=True) + xp_assert_close(w, 1.0+0.0j) # dtype must match + + def test_quadratic_at_zero(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic') + assert_almost_equal(w, 1.0) + + def test_quadratic_at_zero2(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic', + vertex_zero=False) + assert_almost_equal(w, 1.0) + + def test_quadratic_complex_at_zero(self): + w = waveforms.chirp(t=0, f0=-1.0, f1=2.0, t1=1.0, method='quadratic', + complex=True) + xp_assert_close(w, 1.0+0j) + + def test_quadratic_freq_01(self): + method = 'quadratic' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 2000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_quadratic_freq_02(self): + method = 'quadratic' + f0 = 20.0 + f1 = 10.0 + t1 = 10.0 + t = np.linspace(0, t1, 2000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_logarithmic_at_zero(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic') + assert_almost_equal(w, 1.0) + + def test_logarithmic_freq_01(self): + method = 'logarithmic' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 10000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_logarithmic_freq_02(self): + method = 'logarithmic' + f0 = 200.0 + f1 = 100.0 + t1 = 10.0 + t = np.linspace(0, t1, 10000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_logarithmic_freq_03(self): + method = 'logarithmic' + f0 = 100.0 + f1 = 100.0 + t1 = 10.0 + t = np.linspace(0, t1, 10000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) + assert abserr < 1e-6 + + def test_hyperbolic_at_zero(self): + w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic') + assert_almost_equal(w, 1.0) + + def test_hyperbolic_freq_01(self): + method = 'hyperbolic' + t1 = 1.0 + t = np.linspace(0, t1, 10000) + # f0 f1 + cases = [[10.0, 1.0], + [1.0, 10.0], + [-10.0, -1.0], + [-1.0, -10.0]] + for f0, f1 in cases: + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + expected = chirp_hyperbolic(tf, f0, f1, t1) + xp_assert_close(f, expected, atol=1e-7) + + def test_hyperbolic_zero_freq(self): + # f0=0 or f1=0 must raise a ValueError. + method = 'hyperbolic' + t1 = 1.0 + t = np.linspace(0, t1, 5) + assert_raises(ValueError, waveforms.chirp, t, 0, t1, 1, method) + assert_raises(ValueError, waveforms.chirp, t, 1, t1, 0, method) + + def test_unknown_method(self): + method = "foo" + f0 = 10.0 + f1 = 20.0 + t1 = 1.0 + t = np.linspace(0, t1, 10) + assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method) + + def test_integer_t1(self): + f0 = 10.0 + f1 = 20.0 + t = np.linspace(-1, 1, 11) + t1 = 3.0 + float_result = waveforms.chirp(t, f0, t1, f1) + t1 = 3 + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 't1=3' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_f0(self): + f1 = 20.0 + t1 = 3.0 + t = np.linspace(-1, 1, 11) + f0 = 10.0 + float_result = waveforms.chirp(t, f0, t1, f1) + f0 = 10 + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 'f0=10' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_f1(self): + f0 = 10.0 + t1 = 3.0 + t = np.linspace(-1, 1, 11) + f1 = 20.0 + float_result = waveforms.chirp(t, f0, t1, f1) + f1 = 20 + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 'f1=20' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_all(self): + f0 = 10 + t1 = 3 + f1 = 20 + t = np.linspace(-1, 1, 11) + float_result = waveforms.chirp(t, float(f0), float(t1), float(f1)) + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + +class TestSweepPoly: + + def test_sweep_poly_quad1(self): + p = np.poly1d([1.0, 0.0, 1.0]) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + def test_sweep_poly_const(self): + p = np.poly1d(2.0) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + def test_sweep_poly_linear(self): + p = np.poly1d([-1.0, 10.0]) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + def test_sweep_poly_quad2(self): + p = np.poly1d([1.0, 0.0, -2.0]) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + def test_sweep_poly_cubic(self): + p = np.poly1d([2.0, 1.0, 0.0, -2.0]) + t = np.linspace(0, 2.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + def test_sweep_poly_cubic2(self): + """Use an array of coefficients instead of a poly1d.""" + p = np.array([2.0, 1.0, 0.0, -2.0]) + t = np.linspace(0, 2.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = np.poly1d(p)(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + def test_sweep_poly_cubic3(self): + """Use a list of coefficients instead of a poly1d.""" + p = [2.0, 1.0, 0.0, -2.0] + t = np.linspace(0, 2.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = np.poly1d(p)(tf) + abserr = np.max(np.abs(f - expected)) + assert abserr < 1e-6 + + +class TestGaussPulse: + + def test_integer_fc(self): + float_result = waveforms.gausspulse('cutoff', fc=1000.0) + int_result = waveforms.gausspulse('cutoff', fc=1000) + err_msg = "Integer input 'fc=1000' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_bw(self): + float_result = waveforms.gausspulse('cutoff', bw=1.0) + int_result = waveforms.gausspulse('cutoff', bw=1) + err_msg = "Integer input 'bw=1' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_bwr(self): + float_result = waveforms.gausspulse('cutoff', bwr=-6.0) + int_result = waveforms.gausspulse('cutoff', bwr=-6) + err_msg = "Integer input 'bwr=-6' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_tpr(self): + float_result = waveforms.gausspulse('cutoff', tpr=-60.0) + int_result = waveforms.gausspulse('cutoff', tpr=-60) + err_msg = "Integer input 'tpr=-60' gives wrong result" + xp_assert_equal(int_result, float_result, err_msg=err_msg) + + +class TestUnitImpulse: + + def test_no_index(self): + xp_assert_equal(waveforms.unit_impulse(7), + np.asarray([1.0, 0, 0, 0, 0, 0, 0])) + xp_assert_equal(waveforms.unit_impulse((3, 3)), + np.asarray([[1.0, 0, 0], [0, 0, 0], [0, 0, 0]])) + + def test_index(self): + xp_assert_equal(waveforms.unit_impulse(10, 3), + np.asarray([0.0, 0, 0, 1, 0, 0, 0, 0, 0, 0])) + xp_assert_equal(waveforms.unit_impulse((3, 3), (1, 1)), + np.asarray([[0.0, 0, 0], [0, 1, 0], [0, 0, 0]])) + + # Broadcasting + imp = waveforms.unit_impulse((4, 4), 2) + xp_assert_equal(imp, np.asarray([[0.0, 0, 0, 0], + [0.0, 0, 0, 0], + [0.0, 0, 1, 0], + [0.0, 0, 0, 0]])) + + def test_mid(self): + xp_assert_equal(waveforms.unit_impulse((3, 3), 'mid'), + np.asarray([[0.0, 0, 0], [0, 1, 0], [0, 0, 0]])) + xp_assert_equal(waveforms.unit_impulse(9, 'mid'), + np.asarray([0.0, 0, 0, 0, 1, 0, 0, 0, 0])) + + def test_dtype(self): + imp = waveforms.unit_impulse(7) + assert np.issubdtype(imp.dtype, np.floating) + + imp = waveforms.unit_impulse(5, 3, dtype=int) + assert np.issubdtype(imp.dtype, np.integer) + + imp = waveforms.unit_impulse((5, 2), (3, 1), dtype=complex) + assert np.issubdtype(imp.dtype, np.complexfloating) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_wavelets.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_wavelets.py new file mode 100644 index 0000000000000000000000000000000000000000..7a357d2eaf4a530930d612358b8ca69a18b5248e --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_wavelets.py @@ -0,0 +1,59 @@ +import numpy as np +from numpy.testing import assert_array_equal, assert_array_almost_equal + +import scipy.signal._wavelets as wavelets + + +class TestWavelets: + def test_ricker(self): + w = wavelets._ricker(1.0, 1) + expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25)) + assert_array_equal(w, expected) + + lengths = [5, 11, 15, 51, 101] + for length in lengths: + w = wavelets._ricker(length, 1.0) + assert len(w) == length + max_loc = np.argmax(w) + assert max_loc == (length // 2) + + points = 100 + w = wavelets._ricker(points, 2.0) + half_vec = np.arange(0, points // 2) + # Wavelet should be symmetric + assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)]) + + # Check zeros + aas = [5, 10, 15, 20, 30] + points = 99 + for a in aas: + w = wavelets._ricker(points, a) + vec = np.arange(0, points) - (points - 1.0) / 2 + exp_zero1 = np.argmin(np.abs(vec - a)) + exp_zero2 = np.argmin(np.abs(vec + a)) + assert_array_almost_equal(w[exp_zero1], 0) + assert_array_almost_equal(w[exp_zero2], 0) + + def test_cwt(self): + widths = [1.0] + def delta_wavelet(s, t): + return np.array([1]) + len_data = 100 + test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0) + + # Test delta function input gives same data as output + cwt_dat = wavelets._cwt(test_data, delta_wavelet, widths) + assert cwt_dat.shape == (len(widths), len_data) + assert_array_almost_equal(test_data, cwt_dat.flatten()) + + # Check proper shape on output + widths = [1, 3, 4, 5, 10] + cwt_dat = wavelets._cwt(test_data, wavelets._ricker, widths) + assert cwt_dat.shape == (len(widths), len_data) + + widths = [len_data * 10] + # Note: this wavelet isn't defined quite right, but is fine for this test + def flat_wavelet(l, w): + return np.full(w, 1 / w) + cwt_dat = wavelets._cwt(test_data, flat_wavelet, widths) + assert_array_almost_equal(cwt_dat, np.mean(test_data)) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_windows.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_windows.py new file mode 100644 index 0000000000000000000000000000000000000000..75c4da5327f0c1a806f72865687927bc7a380c6d --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/tests/test_windows.py @@ -0,0 +1,846 @@ +import numpy as np +from numpy import array +from numpy.testing import (assert_array_almost_equal, assert_array_equal, + assert_allclose, + assert_equal, assert_, assert_array_less, + suppress_warnings) +from pytest import raises as assert_raises + +from scipy.fft import fft +from scipy.signal import windows, get_window, resample + + +window_funcs = [ + ('boxcar', ()), + ('triang', ()), + ('parzen', ()), + ('bohman', ()), + ('blackman', ()), + ('nuttall', ()), + ('blackmanharris', ()), + ('flattop', ()), + ('bartlett', ()), + ('barthann', ()), + ('hamming', ()), + ('kaiser', (1,)), + ('dpss', (2,)), + ('gaussian', (0.5,)), + ('general_gaussian', (1.5, 2)), + ('chebwin', (1,)), + ('cosine', ()), + ('hann', ()), + ('exponential', ()), + ('taylor', ()), + ('tukey', (0.5,)), + ('lanczos', ()), + ] + + +class TestBartHann: + + def test_basic(self): + assert_allclose(windows.barthann(6, sym=True), + [0, 0.35857354213752, 0.8794264578624801, + 0.8794264578624801, 0.3585735421375199, 0], + rtol=1e-15, atol=1e-15) + assert_allclose(windows.barthann(7), + [0, 0.27, 0.73, 1.0, 0.73, 0.27, 0], + rtol=1e-15, atol=1e-15) + assert_allclose(windows.barthann(6, False), + [0, 0.27, 0.73, 1.0, 0.73, 0.27], + rtol=1e-15, atol=1e-15) + + +class TestBartlett: + + def test_basic(self): + assert_allclose(windows.bartlett(6), [0, 0.4, 0.8, 0.8, 0.4, 0]) + assert_allclose(windows.bartlett(7), [0, 1/3, 2/3, 1.0, 2/3, 1/3, 0]) + assert_allclose(windows.bartlett(6, False), + [0, 1/3, 2/3, 1.0, 2/3, 1/3]) + + +class TestBlackman: + + def test_basic(self): + assert_allclose(windows.blackman(6, sym=False), + [0, 0.13, 0.63, 1.0, 0.63, 0.13], atol=1e-14) + assert_allclose(windows.blackman(7, sym=False), + [0, 0.09045342435412804, 0.4591829575459636, + 0.9203636180999081, 0.9203636180999081, + 0.4591829575459636, 0.09045342435412804], atol=1e-8) + assert_allclose(windows.blackman(6), + [0, 0.2007701432625305, 0.8492298567374694, + 0.8492298567374694, 0.2007701432625305, 0], + atol=1e-14) + assert_allclose(windows.blackman(7, True), + [0, 0.13, 0.63, 1.0, 0.63, 0.13, 0], atol=1e-14) + + +class TestBlackmanHarris: + + def test_basic(self): + assert_allclose(windows.blackmanharris(6, False), + [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645]) + assert_allclose(windows.blackmanharris(7, sym=False), + [6.0e-05, 0.03339172347815117, 0.332833504298565, + 0.8893697722232837, 0.8893697722232838, + 0.3328335042985652, 0.03339172347815122]) + assert_allclose(windows.blackmanharris(6), + [6.0e-05, 0.1030114893456638, 0.7938335106543362, + 0.7938335106543364, 0.1030114893456638, 6.0e-05]) + assert_allclose(windows.blackmanharris(7, sym=True), + [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645, + 6.0e-05]) + + +class TestTaylor: + + def test_normalized(self): + """Tests windows of small length that are normalized to 1. See the + documentation for the Taylor window for more information on + normalization. + """ + assert_allclose(windows.taylor(1, 2, 15), 1.0) + assert_allclose( + windows.taylor(5, 2, 15), + np.array([0.75803341, 0.90757699, 1.0, 0.90757699, 0.75803341]) + ) + assert_allclose( + windows.taylor(6, 2, 15), + np.array([ + 0.7504082, 0.86624416, 0.98208011, 0.98208011, 0.86624416, + 0.7504082 + ]) + ) + + def test_non_normalized(self): + """Test windows of small length that are not normalized to 1. See + the documentation for the Taylor window for more information on + normalization. + """ + assert_allclose( + windows.taylor(5, 2, 15, norm=False), + np.array([ + 0.87508054, 1.04771499, 1.15440894, 1.04771499, 0.87508054 + ]) + ) + assert_allclose( + windows.taylor(6, 2, 15, norm=False), + np.array([ + 0.86627793, 1.0, 1.13372207, 1.13372207, 1.0, 0.86627793 + ]) + ) + + def test_correctness(self): + """This test ensures the correctness of the implemented Taylor + Windowing function. A Taylor Window of 1024 points is created, its FFT + is taken, and the Peak Sidelobe Level (PSLL) and 3dB and 18dB bandwidth + are found and checked. + + A publication from Sandia National Laboratories was used as reference + for the correctness values [1]_. + + References + ----- + .. [1] Armin Doerry, "Catalog of Window Taper Functions for + Sidelobe Control", 2017. + https://www.researchgate.net/profile/Armin_Doerry/publication/316281181_Catalog_of_Window_Taper_Functions_for_Sidelobe_Control/links/58f92cb2a6fdccb121c9d54d/Catalog-of-Window-Taper-Functions-for-Sidelobe-Control.pdf + """ + M_win = 1024 + N_fft = 131072 + # Set norm=False for correctness as the values obtained from the + # scientific publication do not normalize the values. Normalizing + # changes the sidelobe level from the desired value. + w = windows.taylor(M_win, nbar=4, sll=35, norm=False, sym=False) + f = fft(w, N_fft) + spec = 20 * np.log10(np.abs(f / np.amax(f))) + + first_zero = np.argmax(np.diff(spec) > 0) + + PSLL = np.amax(spec[first_zero:-first_zero]) + + BW_3dB = 2*np.argmax(spec <= -3.0102999566398121) / N_fft * M_win + BW_18dB = 2*np.argmax(spec <= -18.061799739838872) / N_fft * M_win + + assert_allclose(PSLL, -35.1672, atol=1) + assert_allclose(BW_3dB, 1.1822, atol=0.1) + assert_allclose(BW_18dB, 2.6112, atol=0.1) + + +class TestBohman: + + def test_basic(self): + assert_allclose(windows.bohman(6), + [0, 0.1791238937062839, 0.8343114522576858, + 0.8343114522576858, 0.1791238937062838, 0]) + assert_allclose(windows.bohman(7, sym=True), + [0, 0.1089977810442293, 0.6089977810442293, 1.0, + 0.6089977810442295, 0.1089977810442293, 0]) + assert_allclose(windows.bohman(6, False), + [0, 0.1089977810442293, 0.6089977810442293, 1.0, + 0.6089977810442295, 0.1089977810442293]) + + +class TestBoxcar: + + def test_basic(self): + assert_allclose(windows.boxcar(6), [1, 1, 1, 1, 1, 1]) + assert_allclose(windows.boxcar(7), [1, 1, 1, 1, 1, 1, 1]) + assert_allclose(windows.boxcar(6, False), [1, 1, 1, 1, 1, 1]) + + +cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348, + 0.198891, 0.235450, 0.274846, 0.316836, + 0.361119, 0.407338, 0.455079, 0.503883, + 0.553248, 0.602637, 0.651489, 0.699227, + 0.745266, 0.789028, 0.829947, 0.867485, + 0.901138, 0.930448, 0.955010, 0.974482, + 0.988591, 0.997138, 1.000000, 0.997138, + 0.988591, 0.974482, 0.955010, 0.930448, + 0.901138, 0.867485, 0.829947, 0.789028, + 0.745266, 0.699227, 0.651489, 0.602637, + 0.553248, 0.503883, 0.455079, 0.407338, + 0.361119, 0.316836, 0.274846, 0.235450, + 0.198891, 0.165348, 0.134941, 0.107729, + 0.200938]) + +cheb_even_true = array([0.203894, 0.107279, 0.133904, + 0.163608, 0.196338, 0.231986, + 0.270385, 0.311313, 0.354493, + 0.399594, 0.446233, 0.493983, + 0.542378, 0.590916, 0.639071, + 0.686302, 0.732055, 0.775783, + 0.816944, 0.855021, 0.889525, + 0.920006, 0.946060, 0.967339, + 0.983557, 0.994494, 1.000000, + 1.000000, 0.994494, 0.983557, + 0.967339, 0.946060, 0.920006, + 0.889525, 0.855021, 0.816944, + 0.775783, 0.732055, 0.686302, + 0.639071, 0.590916, 0.542378, + 0.493983, 0.446233, 0.399594, + 0.354493, 0.311313, 0.270385, + 0.231986, 0.196338, 0.163608, + 0.133904, 0.107279, 0.203894]) + + +class TestChebWin: + + def test_basic(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + assert_allclose(windows.chebwin(6, 100), + [0.1046401879356917, 0.5075781475823447, 1.0, 1.0, + 0.5075781475823447, 0.1046401879356917]) + assert_allclose(windows.chebwin(7, 100), + [0.05650405062850233, 0.316608530648474, + 0.7601208123539079, 1.0, 0.7601208123539079, + 0.316608530648474, 0.05650405062850233]) + assert_allclose(windows.chebwin(6, 10), + [1.0, 0.6071201674458373, 0.6808391469897297, + 0.6808391469897297, 0.6071201674458373, 1.0]) + assert_allclose(windows.chebwin(7, 10), + [1.0, 0.5190521247588651, 0.5864059018130382, + 0.6101519801307441, 0.5864059018130382, + 0.5190521247588651, 1.0]) + assert_allclose(windows.chebwin(6, 10, False), + [1.0, 0.5190521247588651, 0.5864059018130382, + 0.6101519801307441, 0.5864059018130382, + 0.5190521247588651]) + + def test_cheb_odd_high_attenuation(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + cheb_odd = windows.chebwin(53, at=-40) + assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4) + + def test_cheb_even_high_attenuation(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + cheb_even = windows.chebwin(54, at=40) + assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4) + + def test_cheb_odd_low_attenuation(self): + cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405, + 0.610151, 0.586405, 0.519052, + 1.000000]) + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + cheb_odd = windows.chebwin(7, at=10) + assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4) + + def test_cheb_even_low_attenuation(self): + cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027, + 0.541338, 0.541338, 0.51027, + 0.451924, 1.000000]) + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + cheb_even = windows.chebwin(8, at=-10) + assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4) + + +exponential_data = { + (4, None, 0.2, False): + array([4.53999297624848542e-05, + 6.73794699908546700e-03, 1.00000000000000000e+00, + 6.73794699908546700e-03]), + (4, None, 0.2, True): array([0.00055308437014783, 0.0820849986238988, + 0.0820849986238988, 0.00055308437014783]), + (4, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1., + 0.36787944117144233]), + (4, None, 1.0, True): array([0.22313016014842982, 0.60653065971263342, + 0.60653065971263342, 0.22313016014842982]), + (4, 2, 0.2, False): + array([4.53999297624848542e-05, 6.73794699908546700e-03, + 1.00000000000000000e+00, 6.73794699908546700e-03]), + (4, 2, 0.2, True): None, + (4, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1., + 0.36787944117144233]), + (4, 2, 1.0, True): None, + (5, None, 0.2, True): + array([4.53999297624848542e-05, + 6.73794699908546700e-03, 1.00000000000000000e+00, + 6.73794699908546700e-03, 4.53999297624848542e-05]), + (5, None, 1.0, True): array([0.1353352832366127, 0.36787944117144233, 1., + 0.36787944117144233, 0.1353352832366127]), + (5, 2, 0.2, True): None, + (5, 2, 1.0, True): None +} + + +def test_exponential(): + for k, v in exponential_data.items(): + if v is None: + assert_raises(ValueError, windows.exponential, *k) + else: + win = windows.exponential(*k) + assert_allclose(win, v, rtol=1e-14) + + +class TestFlatTop: + + def test_basic(self): + assert_allclose(windows.flattop(6, sym=False), + [-0.000421051, -0.051263156, 0.19821053, 1.0, + 0.19821053, -0.051263156]) + assert_allclose(windows.flattop(7, sym=False), + [-0.000421051, -0.03684078115492348, + 0.01070371671615342, 0.7808739149387698, + 0.7808739149387698, 0.01070371671615342, + -0.03684078115492348]) + assert_allclose(windows.flattop(6), + [-0.000421051, -0.0677142520762119, 0.6068721525762117, + 0.6068721525762117, -0.0677142520762119, + -0.000421051]) + assert_allclose(windows.flattop(7, True), + [-0.000421051, -0.051263156, 0.19821053, 1.0, + 0.19821053, -0.051263156, -0.000421051]) + + +class TestGaussian: + + def test_basic(self): + assert_allclose(windows.gaussian(6, 1.0), + [0.04393693362340742, 0.3246524673583497, + 0.8824969025845955, 0.8824969025845955, + 0.3246524673583497, 0.04393693362340742]) + assert_allclose(windows.gaussian(7, 1.2), + [0.04393693362340742, 0.2493522087772962, + 0.7066482778577162, 1.0, 0.7066482778577162, + 0.2493522087772962, 0.04393693362340742]) + assert_allclose(windows.gaussian(7, 3), + [0.6065306597126334, 0.8007374029168081, + 0.9459594689067654, 1.0, 0.9459594689067654, + 0.8007374029168081, 0.6065306597126334]) + assert_allclose(windows.gaussian(6, 3, False), + [0.6065306597126334, 0.8007374029168081, + 0.9459594689067654, 1.0, 0.9459594689067654, + 0.8007374029168081]) + + +class TestGeneralCosine: + + def test_basic(self): + assert_allclose(windows.general_cosine(5, [0.5, 0.3, 0.2]), + [0.4, 0.3, 1, 0.3, 0.4]) + assert_allclose(windows.general_cosine(4, [0.5, 0.3, 0.2], sym=False), + [0.4, 0.3, 1, 0.3]) + + +class TestGeneralHamming: + + def test_basic(self): + assert_allclose(windows.general_hamming(5, 0.7), + [0.4, 0.7, 1.0, 0.7, 0.4]) + assert_allclose(windows.general_hamming(5, 0.75, sym=False), + [0.5, 0.6727457514, 0.9522542486, + 0.9522542486, 0.6727457514]) + assert_allclose(windows.general_hamming(6, 0.75, sym=True), + [0.5, 0.6727457514, 0.9522542486, + 0.9522542486, 0.6727457514, 0.5]) + + +class TestHamming: + + def test_basic(self): + assert_allclose(windows.hamming(6, False), + [0.08, 0.31, 0.77, 1.0, 0.77, 0.31]) + assert_allclose(windows.hamming(7, sym=False), + [0.08, 0.2531946911449826, 0.6423596296199047, + 0.9544456792351128, 0.9544456792351128, + 0.6423596296199047, 0.2531946911449826]) + assert_allclose(windows.hamming(6), + [0.08, 0.3978521825875242, 0.9121478174124757, + 0.9121478174124757, 0.3978521825875242, 0.08]) + assert_allclose(windows.hamming(7, sym=True), + [0.08, 0.31, 0.77, 1.0, 0.77, 0.31, 0.08]) + + +class TestHann: + + def test_basic(self): + assert_allclose(windows.hann(6, sym=False), + [0, 0.25, 0.75, 1.0, 0.75, 0.25], + rtol=1e-15, atol=1e-15) + assert_allclose(windows.hann(7, sym=False), + [0, 0.1882550990706332, 0.6112604669781572, + 0.9504844339512095, 0.9504844339512095, + 0.6112604669781572, 0.1882550990706332], + rtol=1e-15, atol=1e-15) + assert_allclose(windows.hann(6, True), + [0, 0.3454915028125263, 0.9045084971874737, + 0.9045084971874737, 0.3454915028125263, 0], + rtol=1e-15, atol=1e-15) + assert_allclose(windows.hann(7), + [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0], + rtol=1e-15, atol=1e-15) + + +class TestKaiser: + + def test_basic(self): + assert_allclose(windows.kaiser(6, 0.5), + [0.9403061933191572, 0.9782962393705389, + 0.9975765035372042, 0.9975765035372042, + 0.9782962393705389, 0.9403061933191572]) + assert_allclose(windows.kaiser(7, 0.5), + [0.9403061933191572, 0.9732402256999829, + 0.9932754654413773, 1.0, 0.9932754654413773, + 0.9732402256999829, 0.9403061933191572]) + assert_allclose(windows.kaiser(6, 2.7), + [0.2603047507678832, 0.6648106293528054, + 0.9582099802511439, 0.9582099802511439, + 0.6648106293528054, 0.2603047507678832]) + assert_allclose(windows.kaiser(7, 2.7), + [0.2603047507678832, 0.5985765418119844, + 0.8868495172060835, 1.0, 0.8868495172060835, + 0.5985765418119844, 0.2603047507678832]) + assert_allclose(windows.kaiser(6, 2.7, False), + [0.2603047507678832, 0.5985765418119844, + 0.8868495172060835, 1.0, 0.8868495172060835, + 0.5985765418119844]) + + +class TestKaiserBesselDerived: + + def test_basic(self): + M = 100 + w = windows.kaiser_bessel_derived(M, beta=4.0) + w2 = windows.get_window(('kaiser bessel derived', 4.0), + M, fftbins=False) + assert_allclose(w, w2) + + # Test for Princen-Bradley condition + assert_allclose(w[:M // 2] ** 2 + w[-M // 2:] ** 2, 1.) + + # Test actual values from other implementations + # M = 2: sqrt(2) / 2 + # M = 4: 0.518562710536, 0.855039598640 + # M = 6: 0.436168993154, 0.707106781187, 0.899864772847 + # Ref:https://github.com/scipy/scipy/pull/4747#issuecomment-172849418 + assert_allclose(windows.kaiser_bessel_derived(2, beta=np.pi / 2)[:1], + np.sqrt(2) / 2) + + assert_allclose(windows.kaiser_bessel_derived(4, beta=np.pi / 2)[:2], + [0.518562710536, 0.855039598640]) + + assert_allclose(windows.kaiser_bessel_derived(6, beta=np.pi / 2)[:3], + [0.436168993154, 0.707106781187, 0.899864772847]) + + def test_exceptions(self): + M = 100 + # Assert ValueError for odd window length + msg = ("Kaiser-Bessel Derived windows are only defined for even " + "number of points") + with assert_raises(ValueError, match=msg): + windows.kaiser_bessel_derived(M + 1, beta=4.) + + # Assert ValueError for non-symmetric setting + msg = ("Kaiser-Bessel Derived windows are only defined for " + "symmetric shapes") + with assert_raises(ValueError, match=msg): + windows.kaiser_bessel_derived(M + 1, beta=4., sym=False) + + +class TestNuttall: + + def test_basic(self): + assert_allclose(windows.nuttall(6, sym=False), + [0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298, + 0.0613345]) + assert_allclose(windows.nuttall(7, sym=False), + [0.0003628, 0.03777576895352025, 0.3427276199688195, + 0.8918518610776603, 0.8918518610776603, + 0.3427276199688196, 0.0377757689535203]) + assert_allclose(windows.nuttall(6), + [0.0003628, 0.1105152530498718, 0.7982580969501282, + 0.7982580969501283, 0.1105152530498719, 0.0003628]) + assert_allclose(windows.nuttall(7, True), + [0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298, + 0.0613345, 0.0003628]) + + +class TestParzen: + + def test_basic(self): + assert_allclose(windows.parzen(6), + [0.009259259259259254, 0.25, 0.8611111111111112, + 0.8611111111111112, 0.25, 0.009259259259259254]) + assert_allclose(windows.parzen(7, sym=True), + [0.00583090379008747, 0.1574344023323616, + 0.6501457725947521, 1.0, 0.6501457725947521, + 0.1574344023323616, 0.00583090379008747]) + assert_allclose(windows.parzen(6, False), + [0.00583090379008747, 0.1574344023323616, + 0.6501457725947521, 1.0, 0.6501457725947521, + 0.1574344023323616]) + + +class TestTriang: + + def test_basic(self): + + assert_allclose(windows.triang(6, True), + [1/6, 1/2, 5/6, 5/6, 1/2, 1/6]) + assert_allclose(windows.triang(7), + [1/4, 1/2, 3/4, 1, 3/4, 1/2, 1/4]) + assert_allclose(windows.triang(6, sym=False), + [1/4, 1/2, 3/4, 1, 3/4, 1/2]) + + +tukey_data = { + (4, 0.5, True): array([0.0, 1.0, 1.0, 0.0]), + (4, 0.9, True): array([0.0, 0.84312081893436686, + 0.84312081893436686, 0.0]), + (4, 1.0, True): array([0.0, 0.75, 0.75, 0.0]), + (4, 0.5, False): array([0.0, 1.0, 1.0, 1.0]), + (4, 0.9, False): array([0.0, 0.58682408883346526, + 1.0, 0.58682408883346526]), + (4, 1.0, False): array([0.0, 0.5, 1.0, 0.5]), + (5, 0.0, True): array([1.0, 1.0, 1.0, 1.0, 1.0]), + (5, 0.8, True): array([0.0, 0.69134171618254492, + 1.0, 0.69134171618254492, 0.0]), + (5, 1.0, True): array([0.0, 0.5, 1.0, 0.5, 0.0]), + + (6, 0): [1, 1, 1, 1, 1, 1], + (7, 0): [1, 1, 1, 1, 1, 1, 1], + (6, .25): [0, 1, 1, 1, 1, 0], + (7, .25): [0, 1, 1, 1, 1, 1, 0], + (6,): [0, 0.9045084971874737, 1.0, 1.0, 0.9045084971874735, 0], + (7,): [0, 0.75, 1.0, 1.0, 1.0, 0.75, 0], + (6, .75): [0, 0.5522642316338269, 1.0, 1.0, 0.5522642316338267, 0], + (7, .75): [0, 0.4131759111665348, 0.9698463103929542, 1.0, + 0.9698463103929542, 0.4131759111665347, 0], + (6, 1): [0, 0.3454915028125263, 0.9045084971874737, 0.9045084971874737, + 0.3454915028125263, 0], + (7, 1): [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0], +} + + +class TestTukey: + + def test_basic(self): + # Test against hardcoded data + for k, v in tukey_data.items(): + if v is None: + assert_raises(ValueError, windows.tukey, *k) + else: + win = windows.tukey(*k) + assert_allclose(win, v, rtol=1e-15, atol=1e-15) + + def test_extremes(self): + # Test extremes of alpha correspond to boxcar and hann + tuk0 = windows.tukey(100, 0) + box0 = windows.boxcar(100) + assert_array_almost_equal(tuk0, box0) + + tuk1 = windows.tukey(100, 1) + han1 = windows.hann(100) + assert_array_almost_equal(tuk1, han1) + + +dpss_data = { + # All values from MATLAB: + # * taper[1] of (3, 1.4, 3) sign-flipped + # * taper[3] of (5, 1.5, 5) sign-flipped + (4, 0.1, 2): ([[0.497943898, 0.502047681, 0.502047681, 0.497943898], [0.670487993, 0.224601537, -0.224601537, -0.670487993]], [0.197961815, 0.002035474]), # noqa: E501 + (3, 1.4, 3): ([[0.410233151, 0.814504464, 0.410233151], [0.707106781, 0.0, -0.707106781], [0.575941629, -0.580157287, 0.575941629]], [0.999998093, 0.998067480, 0.801934426]), # noqa: E501 + (5, 1.5, 5): ([[0.1745071052, 0.4956749177, 0.669109327, 0.495674917, 0.174507105], [0.4399493348, 0.553574369, 0.0, -0.553574369, -0.439949334], [0.631452756, 0.073280238, -0.437943884, 0.073280238, 0.631452756], [0.553574369, -0.439949334, 0.0, 0.439949334, -0.553574369], [0.266110290, -0.498935248, 0.600414741, -0.498935248, 0.266110290147157]], [0.999728571, 0.983706916, 0.768457889, 0.234159338, 0.013947282907567]), # noqa: E501 + (100, 2, 4): ([[0.0030914414, 0.0041266922, 0.005315076, 0.006665149, 0.008184854, 0.0098814158, 0.011761239, 0.013829809, 0.016091597, 0.018549973, 0.02120712, 0.02406396, 0.027120092, 0.030373728, 0.033821651, 0.037459181, 0.041280145, 0.045276872, 0.049440192, 0.053759447, 0.058222524, 0.062815894, 0.067524661, 0.072332638, 0.077222418, 0.082175473, 0.087172252, 0.092192299, 0.097214376, 0.1022166, 0.10717657, 0.11207154, 0.11687856, 0.12157463, 0.12613686, 0.13054266, 0.13476986, 0.13879691, 0.14260302, 0.14616832, 0.14947401, 0.1525025, 0.15523755, 0.15766438, 0.15976981, 0.16154233, 0.16297223, 0.16405162, 0.16477455, 0.16513702, 0.16513702, 0.16477455, 0.16405162, 0.16297223, 0.16154233, 0.15976981, 0.15766438, 0.15523755, 0.1525025, 0.14947401, 0.14616832, 0.14260302, 0.13879691, 0.13476986, 0.13054266, 0.12613686, 0.12157463, 0.11687856, 0.11207154, 0.10717657, 0.1022166, 0.097214376, 0.092192299, 0.087172252, 0.082175473, 0.077222418, 0.072332638, 0.067524661, 0.062815894, 0.058222524, 0.053759447, 0.049440192, 0.045276872, 0.041280145, 0.037459181, 0.033821651, 0.030373728, 0.027120092, 0.02406396, 0.02120712, 0.018549973, 0.016091597, 0.013829809, 0.011761239, 0.0098814158, 0.008184854, 0.006665149, 0.005315076, 0.0041266922, 0.0030914414], [0.018064449, 0.022040342, 0.026325013, 0.030905288, 0.035764398, 0.040881982, 0.046234148, 0.051793558, 0.057529559, 0.063408356, 0.069393216, 0.075444716, 0.081521022, 0.087578202, 0.093570567, 0.099451049, 0.10517159, 0.11068356, 0.11593818, 0.12088699, 0.12548227, 0.12967752, 0.1334279, 0.13669069, 0.13942569, 0.1415957, 0.14316686, 0.14410905, 0.14439626, 0.14400686, 0.14292389, 0.1411353, 0.13863416, 0.13541876, 0.13149274, 0.12686516, 0.12155045, 0.1155684, 0.10894403, 0.10170748, 0.093893752, 0.08554251, 0.076697768, 0.067407559, 0.057723559, 0.04770068, 0.037396627, 0.026871428, 0.016186944, 0.0054063557, -0.0054063557, -0.016186944, -0.026871428, -0.037396627, -0.04770068, -0.057723559, -0.067407559, -0.076697768, -0.08554251, -0.093893752, -0.10170748, -0.10894403, -0.1155684, -0.12155045, -0.12686516, -0.13149274, -0.13541876, -0.13863416, -0.1411353, -0.14292389, -0.14400686, -0.14439626, -0.14410905, -0.14316686, -0.1415957, -0.13942569, -0.13669069, -0.1334279, -0.12967752, -0.12548227, -0.12088699, -0.11593818, -0.11068356, -0.10517159, -0.099451049, -0.093570567, -0.087578202, -0.081521022, -0.075444716, -0.069393216, -0.063408356, -0.057529559, -0.051793558, -0.046234148, -0.040881982, -0.035764398, -0.030905288, -0.026325013, -0.022040342, -0.018064449], [0.064817553, 0.072567801, 0.080292992, 0.087918235, 0.095367076, 0.10256232, 0.10942687, 0.1158846, 0.12186124, 0.12728523, 0.13208858, 0.13620771, 0.13958427, 0.14216587, 0.14390678, 0.14476863, 0.1447209, 0.14374148, 0.14181704, 0.13894336, 0.13512554, 0.13037812, 0.1247251, 0.11819984, 0.11084487, 0.10271159, 0.093859853, 0.084357497, 0.074279719, 0.063708406, 0.052731374, 0.041441525, 0.029935953, 0.018314987, 0.0066811877, -0.0048616765, -0.016209689, -0.027259848, -0.037911124, -0.048065512, -0.05762905, -0.066512804, -0.0746338, -0.081915903, -0.088290621, -0.09369783, -0.098086416, -0.10141482, -0.10365146, -0.10477512, -0.10477512, -0.10365146, -0.10141482, -0.098086416, -0.09369783, -0.088290621, -0.081915903, -0.0746338, -0.066512804, -0.05762905, -0.048065512, -0.037911124, -0.027259848, -0.016209689, -0.0048616765, 0.0066811877, 0.018314987, 0.029935953, 0.041441525, 0.052731374, 0.063708406, 0.074279719, 0.084357497, 0.093859853, 0.10271159, 0.11084487, 0.11819984, 0.1247251, 0.13037812, 0.13512554, 0.13894336, 0.14181704, 0.14374148, 0.1447209, 0.14476863, 0.14390678, 0.14216587, 0.13958427, 0.13620771, 0.13208858, 0.12728523, 0.12186124, 0.1158846, 0.10942687, 0.10256232, 0.095367076, 0.087918235, 0.080292992, 0.072567801, 0.064817553], [0.14985551, 0.15512305, 0.15931467, 0.16236806, 0.16423291, 0.16487165, 0.16426009, 0.1623879, 0.1592589, 0.15489114, 0.14931693, 0.14258255, 0.13474785, 0.1258857, 0.11608124, 0.10543095, 0.094041635, 0.082029213, 0.069517411, 0.056636348, 0.043521028, 0.030309756, 0.017142511, 0.0041592774, -0.0085016282, -0.020705223, -0.032321494, -0.043226982, -0.053306291, -0.062453515, -0.070573544, -0.077583253, -0.083412547, -0.088005244, -0.091319802, -0.093329861, -0.094024602, -0.093408915, -0.091503383, -0.08834406, -0.08398207, -0.078483012, -0.071926192, -0.064403681, -0.056019215, -0.046886954, -0.037130106, -0.026879442, -0.016271713, -0.005448, 0.005448, 0.016271713, 0.026879442, 0.037130106, 0.046886954, 0.056019215, 0.064403681, 0.071926192, 0.078483012, 0.08398207, 0.08834406, 0.091503383, 0.093408915, 0.094024602, 0.093329861, 0.091319802, 0.088005244, 0.083412547, 0.077583253, 0.070573544, 0.062453515, 0.053306291, 0.043226982, 0.032321494, 0.020705223, 0.0085016282, -0.0041592774, -0.017142511, -0.030309756, -0.043521028, -0.056636348, -0.069517411, -0.082029213, -0.094041635, -0.10543095, -0.11608124, -0.1258857, -0.13474785, -0.14258255, -0.14931693, -0.15489114, -0.1592589, -0.1623879, -0.16426009, -0.16487165, -0.16423291, -0.16236806, -0.15931467, -0.15512305, -0.14985551]], [0.999943140, 0.997571533, 0.959465463, 0.721862496]), # noqa: E501 +} + + +class TestDPSS: + + def test_basic(self): + # Test against hardcoded data + for k, v in dpss_data.items(): + win, ratios = windows.dpss(*k, return_ratios=True) + assert_allclose(win, v[0], atol=1e-7, err_msg=k) + assert_allclose(ratios, v[1], rtol=1e-5, atol=1e-7, err_msg=k) + + def test_unity(self): + # Test unity value handling (gh-2221) + for M in range(1, 21): + # corrected w/approximation (default) + win = windows.dpss(M, M / 2.1) + expected = M % 2 # one for odd, none for even + assert_equal(np.isclose(win, 1.).sum(), expected, + err_msg=f'{win}') + # corrected w/subsample delay (slower) + win_sub = windows.dpss(M, M / 2.1, norm='subsample') + if M > 2: + # @M=2 the subsample doesn't do anything + assert_equal(np.isclose(win_sub, 1.).sum(), expected, + err_msg=f'{win_sub}') + assert_allclose(win, win_sub, rtol=0.03) # within 3% + # not the same, l2-norm + win_2 = windows.dpss(M, M / 2.1, norm=2) + expected = 1 if M == 1 else 0 + assert_equal(np.isclose(win_2, 1.).sum(), expected, + err_msg=f'{win_2}') + + def test_extremes(self): + # Test extremes of alpha + lam = windows.dpss(31, 6, 4, return_ratios=True)[1] + assert_array_almost_equal(lam, 1.) + lam = windows.dpss(31, 7, 4, return_ratios=True)[1] + assert_array_almost_equal(lam, 1.) + lam = windows.dpss(31, 8, 4, return_ratios=True)[1] + assert_array_almost_equal(lam, 1.) + + def test_degenerate(self): + # Test failures + assert_raises(ValueError, windows.dpss, 4, 1.5, -1) # Bad Kmax + assert_raises(ValueError, windows.dpss, 4, 1.5, -5) + assert_raises(TypeError, windows.dpss, 4, 1.5, 1.1) + assert_raises(ValueError, windows.dpss, 3, 1.5, 3) # NW must be < N/2. + assert_raises(ValueError, windows.dpss, 3, -1, 3) # NW must be pos + assert_raises(ValueError, windows.dpss, 3, 0, 3) + assert_raises(ValueError, windows.dpss, -1, 1, 3) # negative M + + +class TestLanczos: + + def test_basic(self): + # Analytical results: + # sinc(x) = sinc(-x) + # sinc(pi) = 0, sinc(0) = 1 + # Hand computation on WolframAlpha: + # sinc(2 pi / 3) = 0.413496672 + # sinc(pi / 3) = 0.826993343 + # sinc(3 pi / 5) = 0.504551152 + # sinc(pi / 5) = 0.935489284 + assert_allclose(windows.lanczos(6, sym=False), + [0., 0.413496672, + 0.826993343, 1., 0.826993343, + 0.413496672], + atol=1e-9) + assert_allclose(windows.lanczos(6), + [0., 0.504551152, + 0.935489284, 0.935489284, + 0.504551152, 0.], + atol=1e-9) + assert_allclose(windows.lanczos(7, sym=True), + [0., 0.413496672, + 0.826993343, 1., 0.826993343, + 0.413496672, 0.], + atol=1e-9) + + def test_array_size(self): + for n in [0, 10, 11]: + assert_equal(len(windows.lanczos(n, sym=False)), n) + assert_equal(len(windows.lanczos(n, sym=True)), n) + + +class TestGetWindow: + + def test_boxcar(self): + w = windows.get_window('boxcar', 12) + assert_array_equal(w, np.ones_like(w)) + + # window is a tuple of len 1 + w = windows.get_window(('boxcar',), 16) + assert_array_equal(w, np.ones_like(w)) + + def test_cheb_odd(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + w = windows.get_window(('chebwin', -40), 53, fftbins=False) + assert_array_almost_equal(w, cheb_odd_true, decimal=4) + + def test_cheb_even(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + w = windows.get_window(('chebwin', 40), 54, fftbins=False) + assert_array_almost_equal(w, cheb_even_true, decimal=4) + + def test_dpss(self): + win1 = windows.get_window(('dpss', 3), 64, fftbins=False) + win2 = windows.dpss(64, 3) + assert_array_almost_equal(win1, win2, decimal=4) + + def test_kaiser_float(self): + win1 = windows.get_window(7.2, 64) + win2 = windows.kaiser(64, 7.2, False) + assert_allclose(win1, win2) + + def test_invalid_inputs(self): + # Window is not a float, tuple, or string + assert_raises(ValueError, windows.get_window, set('hann'), 8) + + # Unknown window type error + assert_raises(ValueError, windows.get_window, 'broken', 4) + + def test_array_as_window(self): + # GitHub issue 3603 + osfactor = 128 + sig = np.arange(128) + + win = windows.get_window(('kaiser', 8.0), osfactor // 2) + with assert_raises(ValueError, match='must have the same length'): + resample(sig, len(sig) * osfactor, window=win) + + def test_general_cosine(self): + assert_allclose(get_window(('general_cosine', [0.5, 0.3, 0.2]), 4), + [0.4, 0.3, 1, 0.3]) + assert_allclose(get_window(('general_cosine', [0.5, 0.3, 0.2]), 4, + fftbins=False), + [0.4, 0.55, 0.55, 0.4]) + + def test_general_hamming(self): + assert_allclose(get_window(('general_hamming', 0.7), 5), + [0.4, 0.6072949, 0.9427051, 0.9427051, 0.6072949]) + assert_allclose(get_window(('general_hamming', 0.7), 5, fftbins=False), + [0.4, 0.7, 1.0, 0.7, 0.4]) + + def test_lanczos(self): + assert_allclose(get_window('lanczos', 6), + [0., 0.413496672, 0.826993343, 1., 0.826993343, + 0.413496672], atol=1e-9) + assert_allclose(get_window('lanczos', 6, fftbins=False), + [0., 0.504551152, 0.935489284, 0.935489284, + 0.504551152, 0.], atol=1e-9) + assert_allclose(get_window('lanczos', 6), get_window('sinc', 6)) + + +def test_windowfunc_basics(): + for window_name, params in window_funcs: + window = getattr(windows, window_name) + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + # Check symmetry for odd and even lengths + w1 = window(8, *params, sym=True) + w2 = window(7, *params, sym=False) + assert_array_almost_equal(w1[:-1], w2) + + w1 = window(9, *params, sym=True) + w2 = window(8, *params, sym=False) + assert_array_almost_equal(w1[:-1], w2) + + # Check that functions run and output lengths are correct + assert_equal(len(window(6, *params, sym=True)), 6) + assert_equal(len(window(6, *params, sym=False)), 6) + assert_equal(len(window(7, *params, sym=True)), 7) + assert_equal(len(window(7, *params, sym=False)), 7) + + # Check invalid lengths + assert_raises(ValueError, window, 5.5, *params) + assert_raises(ValueError, window, -7, *params) + + # Check degenerate cases + assert_array_equal(window(0, *params, sym=True), []) + assert_array_equal(window(0, *params, sym=False), []) + assert_array_equal(window(1, *params, sym=True), [1]) + assert_array_equal(window(1, *params, sym=False), [1]) + + # Check dtype + assert_(window(0, *params, sym=True).dtype == 'float') + assert_(window(0, *params, sym=False).dtype == 'float') + assert_(window(1, *params, sym=True).dtype == 'float') + assert_(window(1, *params, sym=False).dtype == 'float') + assert_(window(6, *params, sym=True).dtype == 'float') + assert_(window(6, *params, sym=False).dtype == 'float') + + # Check normalization + assert_array_less(window(10, *params, sym=True), 1.01) + assert_array_less(window(10, *params, sym=False), 1.01) + assert_array_less(window(9, *params, sym=True), 1.01) + assert_array_less(window(9, *params, sym=False), 1.01) + + # Check that DFT-even spectrum is purely real for odd and even + assert_allclose(fft(window(10, *params, sym=False)).imag, + 0, atol=1e-14) + assert_allclose(fft(window(11, *params, sym=False)).imag, + 0, atol=1e-14) + + +def test_needs_params(): + for winstr in ['kaiser', 'ksr', 'kaiser_bessel_derived', 'kbd', + 'gaussian', 'gauss', 'gss', + 'general gaussian', 'general_gaussian', + 'general gauss', 'general_gauss', 'ggs', + 'dss', 'dpss', 'general cosine', 'general_cosine', + 'chebwin', 'cheb', 'general hamming', 'general_hamming', + ]: + assert_raises(ValueError, get_window, winstr, 7) + + +def test_not_needs_params(): + for winstr in ['barthann', + 'bartlett', + 'blackman', + 'blackmanharris', + 'bohman', + 'boxcar', + 'cosine', + 'flattop', + 'hamming', + 'nuttall', + 'parzen', + 'taylor', + 'exponential', + 'poisson', + 'tukey', + 'tuk', + 'triangle', + 'lanczos', + 'sinc', + ]: + win = get_window(winstr, 7) + assert_equal(len(win), 7) + + +def test_symmetric(): + + for win in [windows.lanczos]: + # Even sampling points + w = win(4096) + error = np.max(np.abs(w-np.flip(w))) + assert_equal(error, 0.0) + + # Odd sampling points + w = win(4097) + error = np.max(np.abs(w-np.flip(w))) + assert_equal(error, 0.0) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/waveforms.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/waveforms.py new file mode 100644 index 0000000000000000000000000000000000000000..30e71348d04276a66470a4053d97cefc60f7136e --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/waveforms.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly', + 'unit_impulse', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="waveforms", + private_modules=["_waveforms"], all=__all__, + attribute=name) diff --git a/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/wavelets.py b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/wavelets.py new file mode 100644 index 0000000000000000000000000000000000000000..fc897a2483536df7e995faaa29af621e25fe38c7 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/scipy/signal/wavelets.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="wavelets", + private_modules=["_wavelets"], all=__all__, + attribute=name)