ZTWHHH commited on
Commit
54e75e2
·
verified ·
1 Parent(s): a664d8b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__init__.py +27 -0
  2. infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__pycache__/__init__.cpython-310.pyc +0 -0
  3. infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__pycache__/_differentiate.cpython-310.pyc +0 -0
  4. infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/_differentiate.py +1129 -0
  5. infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__init__.py +0 -0
  6. infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  7. infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__pycache__/test_differentiate.cpython-310.pyc +0 -0
  8. infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/test_differentiate.py +695 -0
  9. infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/__pycache__/__init__.cpython-310.pyc +0 -0
  10. infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/__pycache__/_arffread.cpython-310.pyc +0 -0
  11. infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/nodata.arff +11 -0
  12. infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/test10.arff +0 -0
  13. infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/test2.arff +15 -0
  14. infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/single_empty_string.mat +0 -0
  15. infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_empty_struct.mat +0 -0
  16. infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat +0 -0
  17. infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat +0 -0
  18. infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat +0 -0
  19. infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat +0 -0
  20. infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat +0 -0
  21. infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsimplecell.mat +0 -0
  22. infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat +0 -0
  23. infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat +0 -0
  24. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/__init__.py +327 -0
  25. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_arraytools.py +264 -0
  26. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_czt.py +575 -0
  27. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_filter_design.py +0 -0
  28. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_fir_filter_design.py +1286 -0
  29. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_lti_conversion.py +533 -0
  30. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_ltisys.py +0 -0
  31. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_max_len_seq.py +139 -0
  32. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_max_len_seq_inner.cpython-310-x86_64-linux-gnu.so +0 -0
  33. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_peak_finding.py +1310 -0
  34. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_savitzky_golay.py +357 -0
  35. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_short_time_fft.py +1710 -0
  36. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_signaltools.py +0 -0
  37. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_sigtools.cpython-310-x86_64-linux-gnu.so +0 -0
  38. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spectral_py.py +2291 -0
  39. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so +0 -0
  40. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline.pyi +34 -0
  41. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline_filters.py +808 -0
  42. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_upfirdn.py +216 -0
  43. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_waveforms.py +696 -0
  44. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_wavelets.py +29 -0
  45. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/bsplines.py +21 -0
  46. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/filter_design.py +28 -0
  47. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/fir_filter_design.py +20 -0
  48. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/lti_conversion.py +20 -0
  49. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/ltisys.py +25 -0
  50. infer_4_30_0/lib/python3.10/site-packages/scipy/signal/signaltools.py +27 -0
infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ==============================================================
3
+ Finite Difference Differentiation (:mod:`scipy.differentiate`)
4
+ ==============================================================
5
+
6
+ .. currentmodule:: scipy.differentiate
7
+
8
+ SciPy ``differentiate`` provides functions for performing finite difference
9
+ numerical differentiation of black-box functions.
10
+
11
+ .. autosummary::
12
+ :toctree: generated/
13
+
14
+ derivative
15
+ jacobian
16
+ hessian
17
+
18
+ """
19
+
20
+
21
+ from ._differentiate import *
22
+
23
+ __all__ = ['derivative', 'jacobian', 'hessian']
24
+
25
+ from scipy._lib._testutils import PytestTester
26
+ test = PytestTester(__name__)
27
+ del PytestTester
infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (811 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/__pycache__/_differentiate.cpython-310.pyc ADDED
Binary file (40.1 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/_differentiate.py ADDED
@@ -0,0 +1,1129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="attr-defined"
2
+ import warnings
3
+ import numpy as np
4
+ import scipy._lib._elementwise_iterative_method as eim
5
+ from scipy._lib._util import _RichResult
6
+ from scipy._lib._array_api import array_namespace, xp_sign, xp_copy, xp_take_along_axis
7
+
8
+ _EERRORINCREASE = -1 # used in derivative
9
+
10
+ def _derivative_iv(f, x, args, tolerances, maxiter, order, initial_step,
11
+ step_factor, step_direction, preserve_shape, callback):
12
+ # Input validation for `derivative`
13
+ xp = array_namespace(x)
14
+
15
+ if not callable(f):
16
+ raise ValueError('`f` must be callable.')
17
+
18
+ if not np.iterable(args):
19
+ args = (args,)
20
+
21
+ tolerances = {} if tolerances is None else tolerances
22
+ atol = tolerances.get('atol', None)
23
+ rtol = tolerances.get('rtol', None)
24
+
25
+ # tolerances are floats, not arrays; OK to use NumPy
26
+ message = 'Tolerances and step parameters must be non-negative scalars.'
27
+ tols = np.asarray([atol if atol is not None else 1,
28
+ rtol if rtol is not None else 1,
29
+ step_factor])
30
+ if (not np.issubdtype(tols.dtype, np.number) or np.any(tols < 0)
31
+ or np.any(np.isnan(tols)) or tols.shape != (3,)):
32
+ raise ValueError(message)
33
+ step_factor = float(tols[2])
34
+
35
+ maxiter_int = int(maxiter)
36
+ if maxiter != maxiter_int or maxiter <= 0:
37
+ raise ValueError('`maxiter` must be a positive integer.')
38
+
39
+ order_int = int(order)
40
+ if order_int != order or order <= 0:
41
+ raise ValueError('`order` must be a positive integer.')
42
+
43
+ step_direction = xp.asarray(step_direction)
44
+ initial_step = xp.asarray(initial_step)
45
+ temp = xp.broadcast_arrays(x, step_direction, initial_step)
46
+ x, step_direction, initial_step = temp
47
+
48
+ message = '`preserve_shape` must be True or False.'
49
+ if preserve_shape not in {True, False}:
50
+ raise ValueError(message)
51
+
52
+ if callback is not None and not callable(callback):
53
+ raise ValueError('`callback` must be callable.')
54
+
55
+ return (f, x, args, atol, rtol, maxiter_int, order_int, initial_step,
56
+ step_factor, step_direction, preserve_shape, callback)
57
+
58
+
59
+ def derivative(f, x, *, args=(), tolerances=None, maxiter=10,
60
+ order=8, initial_step=0.5, step_factor=2.0,
61
+ step_direction=0, preserve_shape=False, callback=None):
62
+ """Evaluate the derivative of a elementwise, real scalar function numerically.
63
+
64
+ For each element of the output of `f`, `derivative` approximates the first
65
+ derivative of `f` at the corresponding element of `x` using finite difference
66
+ differentiation.
67
+
68
+ This function works elementwise when `x`, `step_direction`, and `args` contain
69
+ (broadcastable) arrays.
70
+
71
+ Parameters
72
+ ----------
73
+ f : callable
74
+ The function whose derivative is desired. The signature must be::
75
+
76
+ f(xi: ndarray, *argsi) -> ndarray
77
+
78
+ where each element of ``xi`` is a finite real number and ``argsi`` is a tuple,
79
+ which may contain an arbitrary number of arrays that are broadcastable with
80
+ ``xi``. `f` must be an elementwise function: each scalar element ``f(xi)[j]``
81
+ must equal ``f(xi[j])`` for valid indices ``j``. It must not mutate the array
82
+ ``xi`` or the arrays in ``argsi``.
83
+ x : float array_like
84
+ Abscissae at which to evaluate the derivative. Must be broadcastable with
85
+ `args` and `step_direction`.
86
+ args : tuple of array_like, optional
87
+ Additional positional array arguments to be passed to `f`. Arrays
88
+ must be broadcastable with one another and the arrays of `init`.
89
+ If the callable for which the root is desired requires arguments that are
90
+ not broadcastable with `x`, wrap that callable with `f` such that `f`
91
+ accepts only `x` and broadcastable ``*args``.
92
+ tolerances : dictionary of floats, optional
93
+ Absolute and relative tolerances. Valid keys of the dictionary are:
94
+
95
+ - ``atol`` - absolute tolerance on the derivative
96
+ - ``rtol`` - relative tolerance on the derivative
97
+
98
+ Iteration will stop when ``res.error < atol + rtol * abs(res.df)``. The default
99
+ `atol` is the smallest normal number of the appropriate dtype, and
100
+ the default `rtol` is the square root of the precision of the
101
+ appropriate dtype.
102
+ order : int, default: 8
103
+ The (positive integer) order of the finite difference formula to be
104
+ used. Odd integers will be rounded up to the next even integer.
105
+ initial_step : float array_like, default: 0.5
106
+ The (absolute) initial step size for the finite difference derivative
107
+ approximation.
108
+ step_factor : float, default: 2.0
109
+ The factor by which the step size is *reduced* in each iteration; i.e.
110
+ the step size in iteration 1 is ``initial_step/step_factor``. If
111
+ ``step_factor < 1``, subsequent steps will be greater than the initial
112
+ step; this may be useful if steps smaller than some threshold are
113
+ undesirable (e.g. due to subtractive cancellation error).
114
+ maxiter : int, default: 10
115
+ The maximum number of iterations of the algorithm to perform. See
116
+ Notes.
117
+ step_direction : integer array_like
118
+ An array representing the direction of the finite difference steps (for
119
+ use when `x` lies near to the boundary of the domain of the function.)
120
+ Must be broadcastable with `x` and all `args`.
121
+ Where 0 (default), central differences are used; where negative (e.g.
122
+ -1), steps are non-positive; and where positive (e.g. 1), all steps are
123
+ non-negative.
124
+ preserve_shape : bool, default: False
125
+ In the following, "arguments of `f`" refers to the array ``xi`` and
126
+ any arrays within ``argsi``. Let ``shape`` be the broadcasted shape
127
+ of `x` and all elements of `args` (which is conceptually
128
+ distinct from ``xi` and ``argsi`` passed into `f`).
129
+
130
+ - When ``preserve_shape=False`` (default), `f` must accept arguments
131
+ of *any* broadcastable shapes.
132
+
133
+ - When ``preserve_shape=True``, `f` must accept arguments of shape
134
+ ``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of
135
+ abscissae at which the function is being evaluated.
136
+
137
+ In either case, for each scalar element ``xi[j]`` within ``xi``, the array
138
+ returned by `f` must include the scalar ``f(xi[j])`` at the same index.
139
+ Consequently, the shape of the output is always the shape of the input
140
+ ``xi``.
141
+
142
+ See Examples.
143
+ callback : callable, optional
144
+ An optional user-supplied function to be called before the first
145
+ iteration and after each iteration.
146
+ Called as ``callback(res)``, where ``res`` is a ``_RichResult``
147
+ similar to that returned by `derivative` (but containing the current
148
+ iterate's values of all variables). If `callback` raises a
149
+ ``StopIteration``, the algorithm will terminate immediately and
150
+ `derivative` will return a result. `callback` must not mutate
151
+ `res` or its attributes.
152
+
153
+ Returns
154
+ -------
155
+ res : _RichResult
156
+ An object similar to an instance of `scipy.optimize.OptimizeResult` with the
157
+ following attributes. The descriptions are written as though the values will
158
+ be scalars; however, if `f` returns an array, the outputs will be
159
+ arrays of the same shape.
160
+
161
+ success : bool array
162
+ ``True`` where the algorithm terminated successfully (status ``0``);
163
+ ``False`` otherwise.
164
+ status : int array
165
+ An integer representing the exit status of the algorithm.
166
+
167
+ - ``0`` : The algorithm converged to the specified tolerances.
168
+ - ``-1`` : The error estimate increased, so iteration was terminated.
169
+ - ``-2`` : The maximum number of iterations was reached.
170
+ - ``-3`` : A non-finite value was encountered.
171
+ - ``-4`` : Iteration was terminated by `callback`.
172
+ - ``1`` : The algorithm is proceeding normally (in `callback` only).
173
+
174
+ df : float array
175
+ The derivative of `f` at `x`, if the algorithm terminated
176
+ successfully.
177
+ error : float array
178
+ An estimate of the error: the magnitude of the difference between
179
+ the current estimate of the derivative and the estimate in the
180
+ previous iteration.
181
+ nit : int array
182
+ The number of iterations of the algorithm that were performed.
183
+ nfev : int array
184
+ The number of points at which `f` was evaluated.
185
+ x : float array
186
+ The value at which the derivative of `f` was evaluated
187
+ (after broadcasting with `args` and `step_direction`).
188
+
189
+ See Also
190
+ --------
191
+ jacobian, hessian
192
+
193
+ Notes
194
+ -----
195
+ The implementation was inspired by jacobi [1]_, numdifftools [2]_, and
196
+ DERIVEST [3]_, but the implementation follows the theory of Taylor series
197
+ more straightforwardly (and arguably naively so).
198
+ In the first iteration, the derivative is estimated using a finite
199
+ difference formula of order `order` with maximum step size `initial_step`.
200
+ Each subsequent iteration, the maximum step size is reduced by
201
+ `step_factor`, and the derivative is estimated again until a termination
202
+ condition is reached. The error estimate is the magnitude of the difference
203
+ between the current derivative approximation and that of the previous
204
+ iteration.
205
+
206
+ The stencils of the finite difference formulae are designed such that
207
+ abscissae are "nested": after `f` is evaluated at ``order + 1``
208
+ points in the first iteration, `f` is evaluated at only two new points
209
+ in each subsequent iteration; ``order - 1`` previously evaluated function
210
+ values required by the finite difference formula are reused, and two
211
+ function values (evaluations at the points furthest from `x`) are unused.
212
+
213
+ Step sizes are absolute. When the step size is small relative to the
214
+ magnitude of `x`, precision is lost; for example, if `x` is ``1e20``, the
215
+ default initial step size of ``0.5`` cannot be resolved. Accordingly,
216
+ consider using larger initial step sizes for large magnitudes of `x`.
217
+
218
+ The default tolerances are challenging to satisfy at points where the
219
+ true derivative is exactly zero. If the derivative may be exactly zero,
220
+ consider specifying an absolute tolerance (e.g. ``atol=1e-12``) to
221
+ improve convergence.
222
+
223
+ References
224
+ ----------
225
+ .. [1] Hans Dembinski (@HDembinski). jacobi.
226
+ https://github.com/HDembinski/jacobi
227
+ .. [2] Per A. Brodtkorb and John D'Errico. numdifftools.
228
+ https://numdifftools.readthedocs.io/en/latest/
229
+ .. [3] John D'Errico. DERIVEST: Adaptive Robust Numerical Differentiation.
230
+ https://www.mathworks.com/matlabcentral/fileexchange/13490-adaptive-robust-numerical-differentiation
231
+ .. [4] Numerical Differentition. Wikipedia.
232
+ https://en.wikipedia.org/wiki/Numerical_differentiation
233
+
234
+ Examples
235
+ --------
236
+ Evaluate the derivative of ``np.exp`` at several points ``x``.
237
+
238
+ >>> import numpy as np
239
+ >>> from scipy.differentiate import derivative
240
+ >>> f = np.exp
241
+ >>> df = np.exp # true derivative
242
+ >>> x = np.linspace(1, 2, 5)
243
+ >>> res = derivative(f, x)
244
+ >>> res.df # approximation of the derivative
245
+ array([2.71828183, 3.49034296, 4.48168907, 5.75460268, 7.3890561 ])
246
+ >>> res.error # estimate of the error
247
+ array([7.13740178e-12, 9.16600129e-12, 1.17594823e-11, 1.51061386e-11,
248
+ 1.94262384e-11])
249
+ >>> abs(res.df - df(x)) # true error
250
+ array([2.53130850e-14, 3.55271368e-14, 5.77315973e-14, 5.59552404e-14,
251
+ 6.92779167e-14])
252
+
253
+ Show the convergence of the approximation as the step size is reduced.
254
+ Each iteration, the step size is reduced by `step_factor`, so for
255
+ sufficiently small initial step, each iteration reduces the error by a
256
+ factor of ``1/step_factor**order`` until finite precision arithmetic
257
+ inhibits further improvement.
258
+
259
+ >>> import matplotlib.pyplot as plt
260
+ >>> iter = list(range(1, 12)) # maximum iterations
261
+ >>> hfac = 2 # step size reduction per iteration
262
+ >>> hdir = [-1, 0, 1] # compare left-, central-, and right- steps
263
+ >>> order = 4 # order of differentiation formula
264
+ >>> x = 1
265
+ >>> ref = df(x)
266
+ >>> errors = [] # true error
267
+ >>> for i in iter:
268
+ ... res = derivative(f, x, maxiter=i, step_factor=hfac,
269
+ ... step_direction=hdir, order=order,
270
+ ... # prevent early termination
271
+ ... tolerances=dict(atol=0, rtol=0))
272
+ ... errors.append(abs(res.df - ref))
273
+ >>> errors = np.array(errors)
274
+ >>> plt.semilogy(iter, errors[:, 0], label='left differences')
275
+ >>> plt.semilogy(iter, errors[:, 1], label='central differences')
276
+ >>> plt.semilogy(iter, errors[:, 2], label='right differences')
277
+ >>> plt.xlabel('iteration')
278
+ >>> plt.ylabel('error')
279
+ >>> plt.legend()
280
+ >>> plt.show()
281
+ >>> (errors[1, 1] / errors[0, 1], 1 / hfac**order)
282
+ (0.06215223140159822, 0.0625)
283
+
284
+ The implementation is vectorized over `x`, `step_direction`, and `args`.
285
+ The function is evaluated once before the first iteration to perform input
286
+ validation and standardization, and once per iteration thereafter.
287
+
288
+ >>> def f(x, p):
289
+ ... f.nit += 1
290
+ ... return x**p
291
+ >>> f.nit = 0
292
+ >>> def df(x, p):
293
+ ... return p*x**(p-1)
294
+ >>> x = np.arange(1, 5)
295
+ >>> p = np.arange(1, 6).reshape((-1, 1))
296
+ >>> hdir = np.arange(-1, 2).reshape((-1, 1, 1))
297
+ >>> res = derivative(f, x, args=(p,), step_direction=hdir, maxiter=1)
298
+ >>> np.allclose(res.df, df(x, p))
299
+ True
300
+ >>> res.df.shape
301
+ (3, 5, 4)
302
+ >>> f.nit
303
+ 2
304
+
305
+ By default, `preserve_shape` is False, and therefore the callable
306
+ `f` may be called with arrays of any broadcastable shapes.
307
+ For example:
308
+
309
+ >>> shapes = []
310
+ >>> def f(x, c):
311
+ ... shape = np.broadcast_shapes(x.shape, c.shape)
312
+ ... shapes.append(shape)
313
+ ... return np.sin(c*x)
314
+ >>>
315
+ >>> c = [1, 5, 10, 20]
316
+ >>> res = derivative(f, 0, args=(c,))
317
+ >>> shapes
318
+ [(4,), (4, 8), (4, 2), (3, 2), (2, 2), (1, 2)]
319
+
320
+ To understand where these shapes are coming from - and to better
321
+ understand how `derivative` computes accurate results - note that
322
+ higher values of ``c`` correspond with higher frequency sinusoids.
323
+ The higher frequency sinusoids make the function's derivative change
324
+ faster, so more function evaluations are required to achieve the target
325
+ accuracy:
326
+
327
+ >>> res.nfev
328
+ array([11, 13, 15, 17], dtype=int32)
329
+
330
+ The initial ``shape``, ``(4,)``, corresponds with evaluating the
331
+ function at a single abscissa and all four frequencies; this is used
332
+ for input validation and to determine the size and dtype of the arrays
333
+ that store results. The next shape corresponds with evaluating the
334
+ function at an initial grid of abscissae and all four frequencies.
335
+ Successive calls to the function evaluate the function at two more
336
+ abscissae, increasing the effective order of the approximation by two.
337
+ However, in later function evaluations, the function is evaluated at
338
+ fewer frequencies because the corresponding derivative has already
339
+ converged to the required tolerance. This saves function evaluations to
340
+ improve performance, but it requires the function to accept arguments of
341
+ any shape.
342
+
343
+ "Vector-valued" functions are unlikely to satisfy this requirement.
344
+ For example, consider
345
+
346
+ >>> def f(x):
347
+ ... return [x, np.sin(3*x), x+np.sin(10*x), np.sin(20*x)*(x-1)**2]
348
+
349
+ This integrand is not compatible with `derivative` as written; for instance,
350
+ the shape of the output will not be the same as the shape of ``x``. Such a
351
+ function *could* be converted to a compatible form with the introduction of
352
+ additional parameters, but this would be inconvenient. In such cases,
353
+ a simpler solution would be to use `preserve_shape`.
354
+
355
+ >>> shapes = []
356
+ >>> def f(x):
357
+ ... shapes.append(x.shape)
358
+ ... x0, x1, x2, x3 = x
359
+ ... return [x0, np.sin(3*x1), x2+np.sin(10*x2), np.sin(20*x3)*(x3-1)**2]
360
+ >>>
361
+ >>> x = np.zeros(4)
362
+ >>> res = derivative(f, x, preserve_shape=True)
363
+ >>> shapes
364
+ [(4,), (4, 8), (4, 2), (4, 2), (4, 2), (4, 2)]
365
+
366
+ Here, the shape of ``x`` is ``(4,)``. With ``preserve_shape=True``, the
367
+ function may be called with argument ``x`` of shape ``(4,)`` or ``(4, n)``,
368
+ and this is what we observe.
369
+
370
+ """
371
+ # TODO (followup):
372
+ # - investigate behavior at saddle points
373
+ # - multivariate functions?
374
+ # - relative steps?
375
+ # - show example of `np.vectorize`
376
+
377
+ res = _derivative_iv(f, x, args, tolerances, maxiter, order, initial_step,
378
+ step_factor, step_direction, preserve_shape, callback)
379
+ (func, x, args, atol, rtol, maxiter, order,
380
+ h0, fac, hdir, preserve_shape, callback) = res
381
+
382
+ # Initialization
383
+ # Since f(x) (no step) is not needed for central differences, it may be
384
+ # possible to eliminate this function evaluation. However, it's useful for
385
+ # input validation and standardization, and everything else is designed to
386
+ # reduce function calls, so let's keep it simple.
387
+ temp = eim._initialize(func, (x,), args, preserve_shape=preserve_shape)
388
+ func, xs, fs, args, shape, dtype, xp = temp
389
+
390
+ finfo = xp.finfo(dtype)
391
+ atol = finfo.smallest_normal if atol is None else atol
392
+ rtol = finfo.eps**0.5 if rtol is None else rtol # keep same as `hessian`
393
+
394
+ x, f = xs[0], fs[0]
395
+ df = xp.full_like(f, xp.nan)
396
+
397
+ # Ideally we'd broadcast the shape of `hdir` in `_elementwise_algo_init`, but
398
+ # it's simpler to do it here than to generalize `_elementwise_algo_init` further.
399
+ # `hdir` and `x` are already broadcasted in `_derivative_iv`, so we know
400
+ # that `hdir` can be broadcasted to the final shape. Same with `h0`.
401
+ hdir = xp.broadcast_to(hdir, shape)
402
+ hdir = xp.reshape(hdir, (-1,))
403
+ hdir = xp.astype(xp_sign(hdir), dtype)
404
+ h0 = xp.broadcast_to(h0, shape)
405
+ h0 = xp.reshape(h0, (-1,))
406
+ h0 = xp.astype(h0, dtype)
407
+ h0[h0 <= 0] = xp.asarray(xp.nan, dtype=dtype)
408
+
409
+ status = xp.full_like(x, eim._EINPROGRESS, dtype=xp.int32) # in progress
410
+ nit, nfev = 0, 1 # one function evaluations performed above
411
+ # Boolean indices of left, central, right, and (all) one-sided steps
412
+ il = hdir < 0
413
+ ic = hdir == 0
414
+ ir = hdir > 0
415
+ io = il | ir
416
+
417
+ # Most of these attributes are reasonably obvious, but:
418
+ # - `fs` holds all the function values of all active `x`. The zeroth
419
+ # axis corresponds with active points `x`, the first axis corresponds
420
+ # with the different steps (in the order described in
421
+ # `_derivative_weights`).
422
+ # - `terms` (which could probably use a better name) is half the `order`,
423
+ # which is always even.
424
+ work = _RichResult(x=x, df=df, fs=f[:, xp.newaxis], error=xp.nan, h=h0,
425
+ df_last=xp.nan, error_last=xp.nan, fac=fac,
426
+ atol=atol, rtol=rtol, nit=nit, nfev=nfev,
427
+ status=status, dtype=dtype, terms=(order+1)//2,
428
+ hdir=hdir, il=il, ic=ic, ir=ir, io=io,
429
+ # Store the weights in an object so they can't get compressed
430
+ # Using RichResult to allow dot notation, but a dict would work
431
+ diff_state=_RichResult(central=[], right=[], fac=None))
432
+
433
+ # This is the correspondence between terms in the `work` object and the
434
+ # final result. In this case, the mapping is trivial. Note that `success`
435
+ # is prepended automatically.
436
+ res_work_pairs = [('status', 'status'), ('df', 'df'), ('error', 'error'),
437
+ ('nit', 'nit'), ('nfev', 'nfev'), ('x', 'x')]
438
+
439
+ def pre_func_eval(work):
440
+ """Determine the abscissae at which the function needs to be evaluated.
441
+
442
+ See `_derivative_weights` for a description of the stencil (pattern
443
+ of the abscissae).
444
+
445
+ In the first iteration, there is only one stored function value in
446
+ `work.fs`, `f(x)`, so we need to evaluate at `order` new points. In
447
+ subsequent iterations, we evaluate at two new points. Note that
448
+ `work.x` is always flattened into a 1D array after broadcasting with
449
+ all `args`, so we add a new axis at the end and evaluate all point
450
+ in one call to the function.
451
+
452
+ For improvement:
453
+ - Consider measuring the step size actually taken, since ``(x + h) - x``
454
+ is not identically equal to `h` with floating point arithmetic.
455
+ - Adjust the step size automatically if `x` is too big to resolve the
456
+ step.
457
+ - We could probably save some work if there are no central difference
458
+ steps or no one-sided steps.
459
+ """
460
+ n = work.terms # half the order
461
+ h = work.h[:, xp.newaxis] # step size
462
+ c = work.fac # step reduction factor
463
+ d = c**0.5 # square root of step reduction factor (one-sided stencil)
464
+ # Note - no need to be careful about dtypes until we allocate `x_eval`
465
+
466
+ if work.nit == 0:
467
+ hc = h / c**xp.arange(n, dtype=work.dtype)
468
+ hc = xp.concat((-xp.flip(hc, axis=-1), hc), axis=-1)
469
+ else:
470
+ hc = xp.concat((-h, h), axis=-1) / c**(n-1)
471
+
472
+ if work.nit == 0:
473
+ hr = h / d**xp.arange(2*n, dtype=work.dtype)
474
+ else:
475
+ hr = xp.concat((h, h/d), axis=-1) / c**(n-1)
476
+
477
+ n_new = 2*n if work.nit == 0 else 2 # number of new abscissae
478
+ x_eval = xp.zeros((work.hdir.shape[0], n_new), dtype=work.dtype)
479
+ il, ic, ir = work.il, work.ic, work.ir
480
+ x_eval[ir] = work.x[ir][:, xp.newaxis] + hr[ir]
481
+ x_eval[ic] = work.x[ic][:, xp.newaxis] + hc[ic]
482
+ x_eval[il] = work.x[il][:, xp.newaxis] - hr[il]
483
+ return x_eval
484
+
485
+ def post_func_eval(x, f, work):
486
+ """ Estimate the derivative and error from the function evaluations
487
+
488
+ As in `pre_func_eval`: in the first iteration, there is only one stored
489
+ function value in `work.fs`, `f(x)`, so we need to add the `order` new
490
+ points. In subsequent iterations, we add two new points. The tricky
491
+ part is getting the order to match that of the weights, which is
492
+ described in `_derivative_weights`.
493
+
494
+ For improvement:
495
+ - Change the order of the weights (and steps in `pre_func_eval`) to
496
+ simplify `work_fc` concatenation and eliminate `fc` concatenation.
497
+ - It would be simple to do one-step Richardson extrapolation with `df`
498
+ and `df_last` to increase the order of the estimate and/or improve
499
+ the error estimate.
500
+ - Process the function evaluations in a more numerically favorable
501
+ way. For instance, combining the pairs of central difference evals
502
+ into a second-order approximation and using Richardson extrapolation
503
+ to produce a higher order approximation seemed to retain accuracy up
504
+ to very high order.
505
+ - Alternatively, we could use `polyfit` like Jacobi. An advantage of
506
+ fitting polynomial to more points than necessary is improved noise
507
+ tolerance.
508
+ """
509
+ n = work.terms
510
+ n_new = n if work.nit == 0 else 1
511
+ il, ic, io = work.il, work.ic, work.io
512
+
513
+ # Central difference
514
+ # `work_fc` is *all* the points at which the function has been evaluated
515
+ # `fc` is the points we're using *this iteration* to produce the estimate
516
+ work_fc = (f[ic][:, :n_new], work.fs[ic], f[ic][:, -n_new:])
517
+ work_fc = xp.concat(work_fc, axis=-1)
518
+ if work.nit == 0:
519
+ fc = work_fc
520
+ else:
521
+ fc = (work_fc[:, :n], work_fc[:, n:n+1], work_fc[:, -n:])
522
+ fc = xp.concat(fc, axis=-1)
523
+
524
+ # One-sided difference
525
+ work_fo = xp.concat((work.fs[io], f[io]), axis=-1)
526
+ if work.nit == 0:
527
+ fo = work_fo
528
+ else:
529
+ fo = xp.concat((work_fo[:, 0:1], work_fo[:, -2*n:]), axis=-1)
530
+
531
+ work.fs = xp.zeros((ic.shape[0], work.fs.shape[-1] + 2*n_new), dtype=work.dtype)
532
+ work.fs[ic] = work_fc
533
+ work.fs[io] = work_fo
534
+
535
+ wc, wo = _derivative_weights(work, n, xp)
536
+ work.df_last = xp.asarray(work.df, copy=True)
537
+ work.df[ic] = fc @ wc / work.h[ic]
538
+ work.df[io] = fo @ wo / work.h[io]
539
+ work.df[il] *= -1
540
+
541
+ work.h /= work.fac
542
+ work.error_last = work.error
543
+ # Simple error estimate - the difference in derivative estimates between
544
+ # this iteration and the last. This is typically conservative because if
545
+ # convergence has begin, the true error is much closer to the difference
546
+ # between the current estimate and the *next* error estimate. However,
547
+ # we could use Richarson extrapolation to produce an error estimate that
548
+ # is one order higher, and take the difference between that and
549
+ # `work.df` (which would just be constant factor that depends on `fac`.)
550
+ work.error = xp.abs(work.df - work.df_last)
551
+
552
+ def check_termination(work):
553
+ """Terminate due to convergence, non-finite values, or error increase"""
554
+ stop = xp.astype(xp.zeros_like(work.df), xp.bool)
555
+
556
+ i = work.error < work.atol + work.rtol*abs(work.df)
557
+ work.status[i] = eim._ECONVERGED
558
+ stop[i] = True
559
+
560
+ if work.nit > 0:
561
+ i = ~((xp.isfinite(work.x) & xp.isfinite(work.df)) | stop)
562
+ work.df[i], work.status[i] = xp.nan, eim._EVALUEERR
563
+ stop[i] = True
564
+
565
+ # With infinite precision, there is a step size below which
566
+ # all smaller step sizes will reduce the error. But in floating point
567
+ # arithmetic, catastrophic cancellation will begin to cause the error
568
+ # to increase again. This heuristic tries to avoid step sizes that are
569
+ # too small. There may be more theoretically sound approaches for
570
+ # detecting a step size that minimizes the total error, but this
571
+ # heuristic seems simple and effective.
572
+ i = (work.error > work.error_last*10) & ~stop
573
+ work.status[i] = _EERRORINCREASE
574
+ stop[i] = True
575
+
576
+ return stop
577
+
578
+ def post_termination_check(work):
579
+ return
580
+
581
+ def customize_result(res, shape):
582
+ return shape
583
+
584
+ return eim._loop(work, callback, shape, maxiter, func, args, dtype,
585
+ pre_func_eval, post_func_eval, check_termination,
586
+ post_termination_check, customize_result, res_work_pairs,
587
+ xp, preserve_shape)
588
+
589
+
590
+ def _derivative_weights(work, n, xp):
591
+ # This produces the weights of the finite difference formula for a given
592
+ # stencil. In experiments, use of a second-order central difference formula
593
+ # with Richardson extrapolation was more accurate numerically, but it was
594
+ # more complicated, and it would have become even more complicated when
595
+ # adding support for one-sided differences. However, now that all the
596
+ # function evaluation values are stored, they can be processed in whatever
597
+ # way is desired to produce the derivative estimate. We leave alternative
598
+ # approaches to future work. To be more self-contained, here is the theory
599
+ # for deriving the weights below.
600
+ #
601
+ # Recall that the Taylor expansion of a univariate, scalar-values function
602
+ # about a point `x` may be expressed as:
603
+ # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3)
604
+ # Suppose we evaluate f(x), f(x+h), and f(x-h). We have:
605
+ # f(x) = f(x)
606
+ # f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3)
607
+ # f(x - h) = f(x) - f'(x)*h + f''(x)/2!*h**2 + O(h**3)
608
+ # We can solve for weights `wi` such that:
609
+ # w1*f(x) = w1*(f(x))
610
+ # + w2*f(x + h) = w2*(f(x) + f'(x)*h + f''(x)/2!*h**2) + O(h**3)
611
+ # + w3*f(x - h) = w3*(f(x) - f'(x)*h + f''(x)/2!*h**2) + O(h**3)
612
+ # = 0 + f'(x)*h + 0 + O(h**3)
613
+ # Then
614
+ # f'(x) ~ (w1*f(x) + w2*f(x+h) + w3*f(x-h))/h
615
+ # is a finite difference derivative approximation with error O(h**2),
616
+ # and so it is said to be a "second-order" approximation. Under certain
617
+ # conditions (e.g. well-behaved function, `h` sufficiently small), the
618
+ # error in the approximation will decrease with h**2; that is, if `h` is
619
+ # reduced by a factor of 2, the error is reduced by a factor of 4.
620
+ #
621
+ # By default, we use eighth-order formulae. Our central-difference formula
622
+ # uses abscissae:
623
+ # x-h/c**3, x-h/c**2, x-h/c, x-h, x, x+h, x+h/c, x+h/c**2, x+h/c**3
624
+ # where `c` is the step factor. (Typically, the step factor is greater than
625
+ # one, so the outermost points - as written above - are actually closest to
626
+ # `x`.) This "stencil" is chosen so that each iteration, the step can be
627
+ # reduced by the factor `c`, and most of the function evaluations can be
628
+ # reused with the new step size. For example, in the next iteration, we
629
+ # will have:
630
+ # x-h/c**4, x-h/c**3, x-h/c**2, x-h/c, x, x+h/c, x+h/c**2, x+h/c**3, x+h/c**4
631
+ # We do not reuse `x-h` and `x+h` for the new derivative estimate.
632
+ # While this would increase the order of the formula and thus the
633
+ # theoretical convergence rate, it is also less stable numerically.
634
+ # (As noted above, there are other ways of processing the values that are
635
+ # more stable. Thus, even now we store `f(x-h)` and `f(x+h)` in `work.fs`
636
+ # to simplify future development of this sort of improvement.)
637
+ #
638
+ # The (right) one-sided formula is produced similarly using abscissae
639
+ # x, x+h, x+h/d, x+h/d**2, ..., x+h/d**6, x+h/d**7, x+h/d**7
640
+ # where `d` is the square root of `c`. (The left one-sided formula simply
641
+ # uses -h.) When the step size is reduced by factor `c = d**2`, we have
642
+ # abscissae:
643
+ # x, x+h/d**2, x+h/d**3..., x+h/d**8, x+h/d**9, x+h/d**9
644
+ # `d` is chosen as the square root of `c` so that the rate of the step-size
645
+ # reduction is the same per iteration as in the central difference case.
646
+ # Note that because the central difference formulas are inherently of even
647
+ # order, for simplicity, we use only even-order formulas for one-sided
648
+ # differences, too.
649
+
650
+ # It's possible for the user to specify `fac` in, say, double precision but
651
+ # `x` and `args` in single precision. `fac` gets converted to single
652
+ # precision, but we should always use double precision for the intermediate
653
+ # calculations here to avoid additional error in the weights.
654
+ fac = float(work.fac)
655
+
656
+ # Note that if the user switches back to floating point precision with
657
+ # `x` and `args`, then `fac` will not necessarily equal the (lower
658
+ # precision) cached `_derivative_weights.fac`, and the weights will
659
+ # need to be recalculated. This could be fixed, but it's late, and of
660
+ # low consequence.
661
+
662
+ diff_state = work.diff_state
663
+ if fac != diff_state.fac:
664
+ diff_state.central = []
665
+ diff_state.right = []
666
+ diff_state.fac = fac
667
+
668
+ if len(diff_state.central) != 2*n + 1:
669
+ # Central difference weights. Consider refactoring this; it could
670
+ # probably be more compact.
671
+ # Note: Using NumPy here is OK; we convert to xp-type at the end
672
+ i = np.arange(-n, n + 1)
673
+ p = np.abs(i) - 1. # center point has power `p` -1, but sign `s` is 0
674
+ s = np.sign(i)
675
+
676
+ h = s / fac ** p
677
+ A = np.vander(h, increasing=True).T
678
+ b = np.zeros(2*n + 1)
679
+ b[1] = 1
680
+ weights = np.linalg.solve(A, b)
681
+
682
+ # Enforce identities to improve accuracy
683
+ weights[n] = 0
684
+ for i in range(n):
685
+ weights[-i-1] = -weights[i]
686
+
687
+ # Cache the weights. We only need to calculate them once unless
688
+ # the step factor changes.
689
+ diff_state.central = weights
690
+
691
+ # One-sided difference weights. The left one-sided weights (with
692
+ # negative steps) are simply the negative of the right one-sided
693
+ # weights, so no need to compute them separately.
694
+ i = np.arange(2*n + 1)
695
+ p = i - 1.
696
+ s = np.sign(i)
697
+
698
+ h = s / np.sqrt(fac) ** p
699
+ A = np.vander(h, increasing=True).T
700
+ b = np.zeros(2 * n + 1)
701
+ b[1] = 1
702
+ weights = np.linalg.solve(A, b)
703
+
704
+ diff_state.right = weights
705
+
706
+ return (xp.asarray(diff_state.central, dtype=work.dtype),
707
+ xp.asarray(diff_state.right, dtype=work.dtype))
708
+
709
+
710
+ def jacobian(f, x, *, tolerances=None, maxiter=10, order=8, initial_step=0.5,
711
+ step_factor=2.0, step_direction=0):
712
+ r"""Evaluate the Jacobian of a function numerically.
713
+
714
+ Parameters
715
+ ----------
716
+ f : callable
717
+ The function whose Jacobian is desired. The signature must be::
718
+
719
+ f(xi: ndarray) -> ndarray
720
+
721
+ where each element of ``xi`` is a finite real. If the function to be
722
+ differentiated accepts additional arguments, wrap it (e.g. using
723
+ `functools.partial` or ``lambda``) and pass the wrapped callable
724
+ into `jacobian`. `f` must not mutate the array ``xi``. See Notes
725
+ regarding vectorization and the dimensionality of the input and output.
726
+ x : float array_like
727
+ Points at which to evaluate the Jacobian. Must have at least one dimension.
728
+ See Notes regarding the dimensionality and vectorization.
729
+ tolerances : dictionary of floats, optional
730
+ Absolute and relative tolerances. Valid keys of the dictionary are:
731
+
732
+ - ``atol`` - absolute tolerance on the derivative
733
+ - ``rtol`` - relative tolerance on the derivative
734
+
735
+ Iteration will stop when ``res.error < atol + rtol * abs(res.df)``. The default
736
+ `atol` is the smallest normal number of the appropriate dtype, and
737
+ the default `rtol` is the square root of the precision of the
738
+ appropriate dtype.
739
+ maxiter : int, default: 10
740
+ The maximum number of iterations of the algorithm to perform. See
741
+ Notes.
742
+ order : int, default: 8
743
+ The (positive integer) order of the finite difference formula to be
744
+ used. Odd integers will be rounded up to the next even integer.
745
+ initial_step : float array_like, default: 0.5
746
+ The (absolute) initial step size for the finite difference derivative
747
+ approximation. Must be broadcastable with `x` and `step_direction`.
748
+ step_factor : float, default: 2.0
749
+ The factor by which the step size is *reduced* in each iteration; i.e.
750
+ the step size in iteration 1 is ``initial_step/step_factor``. If
751
+ ``step_factor < 1``, subsequent steps will be greater than the initial
752
+ step; this may be useful if steps smaller than some threshold are
753
+ undesirable (e.g. due to subtractive cancellation error).
754
+ step_direction : integer array_like
755
+ An array representing the direction of the finite difference steps (e.g.
756
+ for use when `x` lies near to the boundary of the domain of the function.)
757
+ Must be broadcastable with `x` and `initial_step`.
758
+ Where 0 (default), central differences are used; where negative (e.g.
759
+ -1), steps are non-positive; and where positive (e.g. 1), all steps are
760
+ non-negative.
761
+
762
+ Returns
763
+ -------
764
+ res : _RichResult
765
+ An object similar to an instance of `scipy.optimize.OptimizeResult` with the
766
+ following attributes. The descriptions are written as though the values will
767
+ be scalars; however, if `f` returns an array, the outputs will be
768
+ arrays of the same shape.
769
+
770
+ success : bool array
771
+ ``True`` where the algorithm terminated successfully (status ``0``);
772
+ ``False`` otherwise.
773
+ status : int array
774
+ An integer representing the exit status of the algorithm.
775
+
776
+ - ``0`` : The algorithm converged to the specified tolerances.
777
+ - ``-1`` : The error estimate increased, so iteration was terminated.
778
+ - ``-2`` : The maximum number of iterations was reached.
779
+ - ``-3`` : A non-finite value was encountered.
780
+
781
+ df : float array
782
+ The Jacobian of `f` at `x`, if the algorithm terminated
783
+ successfully.
784
+ error : float array
785
+ An estimate of the error: the magnitude of the difference between
786
+ the current estimate of the Jacobian and the estimate in the
787
+ previous iteration.
788
+ nit : int array
789
+ The number of iterations of the algorithm that were performed.
790
+ nfev : int array
791
+ The number of points at which `f` was evaluated.
792
+
793
+ Each element of an attribute is associated with the corresponding
794
+ element of `df`. For instance, element ``i`` of `nfev` is the
795
+ number of points at which `f` was evaluated for the sake of
796
+ computing element ``i`` of `df`.
797
+
798
+ See Also
799
+ --------
800
+ derivative, hessian
801
+
802
+ Notes
803
+ -----
804
+ Suppose we wish to evaluate the Jacobian of a function
805
+ :math:`f: \mathbf{R}^m \rightarrow \mathbf{R}^n`. Assign to variables
806
+ ``m`` and ``n`` the positive integer values of :math:`m` and :math:`n`,
807
+ respectively, and let ``...`` represent an arbitrary tuple of integers.
808
+ If we wish to evaluate the Jacobian at a single point, then:
809
+
810
+ - argument `x` must be an array of shape ``(m,)``
811
+ - argument `f` must be vectorized to accept an array of shape ``(m, ...)``.
812
+ The first axis represents the :math:`m` inputs of :math:`f`; the remainder
813
+ are for evaluating the function at multiple points in a single call.
814
+ - argument `f` must return an array of shape ``(n, ...)``. The first
815
+ axis represents the :math:`n` outputs of :math:`f`; the remainder
816
+ are for the result of evaluating the function at multiple points.
817
+ - attribute ``df`` of the result object will be an array of shape ``(n, m)``,
818
+ the Jacobian.
819
+
820
+ This function is also vectorized in the sense that the Jacobian can be
821
+ evaluated at ``k`` points in a single call. In this case, `x` would be an
822
+ array of shape ``(m, k)``, `f` would accept an array of shape
823
+ ``(m, k, ...)`` and return an array of shape ``(n, k, ...)``, and the ``df``
824
+ attribute of the result would have shape ``(n, m, k)``.
825
+
826
+ Suppose the desired callable ``f_not_vectorized`` is not vectorized; it can
827
+ only accept an array of shape ``(m,)``. A simple solution to satisfy the required
828
+ interface is to wrap ``f_not_vectorized`` as follows::
829
+
830
+ def f(x):
831
+ return np.apply_along_axis(f_not_vectorized, axis=0, arr=x)
832
+
833
+ Alternatively, suppose the desired callable ``f_vec_q`` is vectorized, but
834
+ only for 2-D arrays of shape ``(m, q)``. To satisfy the required interface,
835
+ consider::
836
+
837
+ def f(x):
838
+ m, batch = x.shape[0], x.shape[1:] # x.shape is (m, ...)
839
+ x = np.reshape(x, (m, -1)) # `-1` is short for q = prod(batch)
840
+ res = f_vec_q(x) # pass shape (m, q) to function
841
+ n = res.shape[0]
842
+ return np.reshape(res, (n,) + batch) # return shape (n, ...)
843
+
844
+ Then pass the wrapped callable ``f`` as the first argument of `jacobian`.
845
+
846
+ References
847
+ ----------
848
+ .. [1] Jacobian matrix and determinant, *Wikipedia*,
849
+ https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant
850
+
851
+ Examples
852
+ --------
853
+ The Rosenbrock function maps from :math:`\mathbf{R}^m \rightarrow \mathbf{R}`;
854
+ the SciPy implementation `scipy.optimize.rosen` is vectorized to accept an
855
+ array of shape ``(m, p)`` and return an array of shape ``p``. Suppose we wish
856
+ to evaluate the Jacobian (AKA the gradient because the function returns a scalar)
857
+ at ``[0.5, 0.5, 0.5]``.
858
+
859
+ >>> import numpy as np
860
+ >>> from scipy.differentiate import jacobian
861
+ >>> from scipy.optimize import rosen, rosen_der
862
+ >>> m = 3
863
+ >>> x = np.full(m, 0.5)
864
+ >>> res = jacobian(rosen, x)
865
+ >>> ref = rosen_der(x) # reference value of the gradient
866
+ >>> res.df, ref
867
+ (array([-51., -1., 50.]), array([-51., -1., 50.]))
868
+
869
+ As an example of a function with multiple outputs, consider Example 4
870
+ from [1]_.
871
+
872
+ >>> def f(x):
873
+ ... x1, x2, x3 = x
874
+ ... return [x1, 5*x3, 4*x2**2 - 2*x3, x3*np.sin(x1)]
875
+
876
+ The true Jacobian is given by:
877
+
878
+ >>> def df(x):
879
+ ... x1, x2, x3 = x
880
+ ... one = np.ones_like(x1)
881
+ ... return [[one, 0*one, 0*one],
882
+ ... [0*one, 0*one, 5*one],
883
+ ... [0*one, 8*x2, -2*one],
884
+ ... [x3*np.cos(x1), 0*one, np.sin(x1)]]
885
+
886
+ Evaluate the Jacobian at an arbitrary point.
887
+
888
+ >>> rng = np.random.default_rng(389252938452)
889
+ >>> x = rng.random(size=3)
890
+ >>> res = jacobian(f, x)
891
+ >>> ref = df(x)
892
+ >>> res.df.shape == (4, 3)
893
+ True
894
+ >>> np.allclose(res.df, ref)
895
+ True
896
+
897
+ Evaluate the Jacobian at 10 arbitrary points in a single call.
898
+
899
+ >>> x = rng.random(size=(3, 10))
900
+ >>> res = jacobian(f, x)
901
+ >>> ref = df(x)
902
+ >>> res.df.shape == (4, 3, 10)
903
+ True
904
+ >>> np.allclose(res.df, ref)
905
+ True
906
+
907
+ """
908
+ xp = array_namespace(x)
909
+ x = xp.asarray(x)
910
+ int_dtype = xp.isdtype(x.dtype, 'integral')
911
+ x0 = xp.asarray(x, dtype=xp.asarray(1.0).dtype) if int_dtype else x
912
+
913
+ if x0.ndim < 1:
914
+ message = "Argument `x` must be at least 1-D."
915
+ raise ValueError(message)
916
+
917
+ m = x0.shape[0]
918
+ i = xp.arange(m)
919
+
920
+ def wrapped(x):
921
+ p = () if x.ndim == x0.ndim else (x.shape[-1],) # number of abscissae
922
+
923
+ new_shape = (m, m) + x0.shape[1:] + p
924
+ xph = xp.expand_dims(x0, axis=1)
925
+ if x.ndim != x0.ndim:
926
+ xph = xp.expand_dims(xph, axis=-1)
927
+ xph = xp_copy(xp.broadcast_to(xph, new_shape), xp=xp)
928
+ xph[i, i] = x
929
+ return f(xph)
930
+
931
+ res = derivative(wrapped, x, tolerances=tolerances,
932
+ maxiter=maxiter, order=order, initial_step=initial_step,
933
+ step_factor=step_factor, preserve_shape=True,
934
+ step_direction=step_direction)
935
+
936
+ del res.x # the user knows `x`, and the way it gets broadcasted is meaningless here
937
+ return res
938
+
939
+
940
+ def hessian(f, x, *, tolerances=None, maxiter=10,
941
+ order=8, initial_step=0.5, step_factor=2.0):
942
+ r"""Evaluate the Hessian of a function numerically.
943
+
944
+ Parameters
945
+ ----------
946
+ f : callable
947
+ The function whose Hessian is desired. The signature must be::
948
+
949
+ f(xi: ndarray) -> ndarray
950
+
951
+ where each element of ``xi`` is a finite real. If the function to be
952
+ differentiated accepts additional arguments, wrap it (e.g. using
953
+ `functools.partial` or ``lambda``) and pass the wrapped callable
954
+ into `hessian`. `f` must not mutate the array ``xi``. See Notes
955
+ regarding vectorization and the dimensionality of the input and output.
956
+ x : float array_like
957
+ Points at which to evaluate the Hessian. Must have at least one dimension.
958
+ See Notes regarding the dimensionality and vectorization.
959
+ tolerances : dictionary of floats, optional
960
+ Absolute and relative tolerances. Valid keys of the dictionary are:
961
+
962
+ - ``atol`` - absolute tolerance on the derivative
963
+ - ``rtol`` - relative tolerance on the derivative
964
+
965
+ Iteration will stop when ``res.error < atol + rtol * abs(res.df)``. The default
966
+ `atol` is the smallest normal number of the appropriate dtype, and
967
+ the default `rtol` is the square root of the precision of the
968
+ appropriate dtype.
969
+ order : int, default: 8
970
+ The (positive integer) order of the finite difference formula to be
971
+ used. Odd integers will be rounded up to the next even integer.
972
+ initial_step : float, default: 0.5
973
+ The (absolute) initial step size for the finite difference derivative
974
+ approximation.
975
+ step_factor : float, default: 2.0
976
+ The factor by which the step size is *reduced* in each iteration; i.e.
977
+ the step size in iteration 1 is ``initial_step/step_factor``. If
978
+ ``step_factor < 1``, subsequent steps will be greater than the initial
979
+ step; this may be useful if steps smaller than some threshold are
980
+ undesirable (e.g. due to subtractive cancellation error).
981
+ maxiter : int, default: 10
982
+ The maximum number of iterations of the algorithm to perform. See
983
+ Notes.
984
+
985
+ Returns
986
+ -------
987
+ res : _RichResult
988
+ An object similar to an instance of `scipy.optimize.OptimizeResult` with the
989
+ following attributes. The descriptions are written as though the values will
990
+ be scalars; however, if `f` returns an array, the outputs will be
991
+ arrays of the same shape.
992
+
993
+ success : bool array
994
+ ``True`` where the algorithm terminated successfully (status ``0``);
995
+ ``False`` otherwise.
996
+ status : int array
997
+ An integer representing the exit status of the algorithm.
998
+
999
+ - ``0`` : The algorithm converged to the specified tolerances.
1000
+ - ``-1`` : The error estimate increased, so iteration was terminated.
1001
+ - ``-2`` : The maximum number of iterations was reached.
1002
+ - ``-3`` : A non-finite value was encountered.
1003
+
1004
+ ddf : float array
1005
+ The Hessian of `f` at `x`, if the algorithm terminated
1006
+ successfully.
1007
+ error : float array
1008
+ An estimate of the error: the magnitude of the difference between
1009
+ the current estimate of the Hessian and the estimate in the
1010
+ previous iteration.
1011
+ nfev : int array
1012
+ The number of points at which `f` was evaluated.
1013
+
1014
+ Each element of an attribute is associated with the corresponding
1015
+ element of `ddf`. For instance, element ``[i, j]`` of `nfev` is the
1016
+ number of points at which `f` was evaluated for the sake of
1017
+ computing element ``[i, j]`` of `ddf`.
1018
+
1019
+ See Also
1020
+ --------
1021
+ derivative, jacobian
1022
+
1023
+ Notes
1024
+ -----
1025
+ Suppose we wish to evaluate the Hessian of a function
1026
+ :math:`f: \mathbf{R}^m \rightarrow \mathbf{R}`, and we assign to variable
1027
+ ``m`` the positive integer value of :math:`m`. If we wish to evaluate
1028
+ the Hessian at a single point, then:
1029
+
1030
+ - argument `x` must be an array of shape ``(m,)``
1031
+ - argument `f` must be vectorized to accept an array of shape
1032
+ ``(m, ...)``. The first axis represents the :math:`m` inputs of
1033
+ :math:`f`; the remaining axes indicated by ellipses are for evaluating
1034
+ the function at several abscissae in a single call.
1035
+ - argument `f` must return an array of shape ``(...)``.
1036
+ - attribute ``dff`` of the result object will be an array of shape ``(m, m)``,
1037
+ the Hessian.
1038
+
1039
+ This function is also vectorized in the sense that the Hessian can be
1040
+ evaluated at ``k`` points in a single call. In this case, `x` would be an
1041
+ array of shape ``(m, k)``, `f` would accept an array of shape
1042
+ ``(m, ...)`` and return an array of shape ``(...)``, and the ``ddf``
1043
+ attribute of the result would have shape ``(m, m, k)``. Note that the
1044
+ axis associated with the ``k`` points is included within the axes
1045
+ denoted by ``(...)``.
1046
+
1047
+ Currently, `hessian` is implemented by nesting calls to `jacobian`.
1048
+ All options passed to `hessian` are used for both the inner and outer
1049
+ calls with one exception: the `rtol` used in the inner `jacobian` call
1050
+ is tightened by a factor of 100 with the expectation that the inner
1051
+ error can be ignored. A consequence is that `rtol` should not be set
1052
+ less than 100 times the precision of the dtype of `x`; a warning is
1053
+ emitted otherwise.
1054
+
1055
+ References
1056
+ ----------
1057
+ .. [1] Hessian matrix, *Wikipedia*,
1058
+ https://en.wikipedia.org/wiki/Hessian_matrix
1059
+
1060
+ Examples
1061
+ --------
1062
+ The Rosenbrock function maps from :math:`\mathbf{R}^m \rightarrow \mathbf{R}`;
1063
+ the SciPy implementation `scipy.optimize.rosen` is vectorized to accept an
1064
+ array of shape ``(m, ...)`` and return an array of shape ``...``. Suppose we
1065
+ wish to evaluate the Hessian at ``[0.5, 0.5, 0.5]``.
1066
+
1067
+ >>> import numpy as np
1068
+ >>> from scipy.differentiate import hessian
1069
+ >>> from scipy.optimize import rosen, rosen_hess
1070
+ >>> m = 3
1071
+ >>> x = np.full(m, 0.5)
1072
+ >>> res = hessian(rosen, x)
1073
+ >>> ref = rosen_hess(x) # reference value of the Hessian
1074
+ >>> np.allclose(res.ddf, ref)
1075
+ True
1076
+
1077
+ `hessian` is vectorized to evaluate the Hessian at multiple points
1078
+ in a single call.
1079
+
1080
+ >>> rng = np.random.default_rng(4589245925010)
1081
+ >>> x = rng.random((m, 10))
1082
+ >>> res = hessian(rosen, x)
1083
+ >>> ref = [rosen_hess(xi) for xi in x.T]
1084
+ >>> ref = np.moveaxis(ref, 0, -1)
1085
+ >>> np.allclose(res.ddf, ref)
1086
+ True
1087
+
1088
+ """
1089
+ # todo:
1090
+ # - add ability to vectorize over additional parameters (*args?)
1091
+ # - error estimate stack with inner jacobian (or use legit 2D stencil)
1092
+
1093
+ kwargs = dict(maxiter=maxiter, order=order, initial_step=initial_step,
1094
+ step_factor=step_factor)
1095
+ tolerances = {} if tolerances is None else tolerances
1096
+ atol = tolerances.get('atol', None)
1097
+ rtol = tolerances.get('rtol', None)
1098
+
1099
+ xp = array_namespace(x)
1100
+ x = xp.asarray(x)
1101
+ dtype = x.dtype if not xp.isdtype(x.dtype, 'integral') else xp.asarray(1.).dtype
1102
+ finfo = xp.finfo(dtype)
1103
+ rtol = finfo.eps**0.5 if rtol is None else rtol # keep same as `derivative`
1104
+
1105
+ # tighten the inner tolerance to make the inner error negligible
1106
+ rtol_min = finfo.eps * 100
1107
+ message = (f"The specified `{rtol=}`, but error estimates are likely to be "
1108
+ f"unreliable when `rtol < {rtol_min}`.")
1109
+ if 0 < rtol < rtol_min: # rtol <= 0 is an error
1110
+ warnings.warn(message, RuntimeWarning, stacklevel=2)
1111
+ rtol = rtol_min
1112
+
1113
+ def df(x):
1114
+ tolerances = dict(rtol=rtol/100, atol=atol)
1115
+ temp = jacobian(f, x, tolerances=tolerances, **kwargs)
1116
+ nfev.append(temp.nfev if len(nfev) == 0 else temp.nfev.sum(axis=-1))
1117
+ return temp.df
1118
+
1119
+ nfev = [] # track inner function evaluations
1120
+ res = jacobian(df, x, tolerances=tolerances, **kwargs) # jacobian of jacobian
1121
+
1122
+ nfev = xp.cumulative_sum(xp.stack(nfev), axis=0)
1123
+ res_nit = xp.astype(res.nit[xp.newaxis, ...], xp.int64) # appease torch
1124
+ res.nfev = xp_take_along_axis(nfev, res_nit, axis=0)[0]
1125
+ res.ddf = res.df
1126
+ del res.df # this is renamed to ddf
1127
+ del res.nit # this is only the outer-jacobian nit
1128
+
1129
+ return res
infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__init__.py ADDED
File without changes
infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (183 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/__pycache__/test_differentiate.cpython-310.pyc ADDED
Binary file (27.3 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/differentiate/tests/test_differentiate.py ADDED
@@ -0,0 +1,695 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import pytest
3
+
4
+ import numpy as np
5
+
6
+ from scipy.conftest import array_api_compatible
7
+ import scipy._lib._elementwise_iterative_method as eim
8
+ from scipy._lib._array_api_no_0d import xp_assert_close, xp_assert_equal, xp_assert_less
9
+ from scipy._lib._array_api import is_numpy, is_torch, array_namespace
10
+
11
+ from scipy import stats, optimize, special
12
+ from scipy.differentiate import derivative, jacobian, hessian
13
+ from scipy.differentiate._differentiate import _EERRORINCREASE
14
+
15
+
16
+ pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_xp_backends")]
17
+
18
+ array_api_strict_skip_reason = 'Array API does not support fancy indexing assignment.'
19
+ jax_skip_reason = 'JAX arrays do not support item assignment.'
20
+
21
+
22
+ @pytest.mark.skip_xp_backends('array_api_strict', reason=array_api_strict_skip_reason)
23
+ @pytest.mark.skip_xp_backends('jax.numpy',reason=jax_skip_reason)
24
+ class TestDerivative:
25
+
26
+ def f(self, x):
27
+ return special.ndtr(x)
28
+
29
+ @pytest.mark.parametrize('x', [0.6, np.linspace(-0.05, 1.05, 10)])
30
+ def test_basic(self, x, xp):
31
+ # Invert distribution CDF and compare against distribution `ppf`
32
+ default_dtype = xp.asarray(1.).dtype
33
+ res = derivative(self.f, xp.asarray(x, dtype=default_dtype))
34
+ ref = xp.asarray(stats.norm().pdf(x), dtype=default_dtype)
35
+ xp_assert_close(res.df, ref)
36
+ # This would be nice, but doesn't always work out. `error` is an
37
+ # estimate, not a bound.
38
+ if not is_torch(xp):
39
+ xp_assert_less(xp.abs(res.df - ref), res.error)
40
+
41
+ @pytest.mark.skip_xp_backends(np_only=True)
42
+ @pytest.mark.parametrize('case', stats._distr_params.distcont)
43
+ def test_accuracy(self, case):
44
+ distname, params = case
45
+ dist = getattr(stats, distname)(*params)
46
+ x = dist.median() + 0.1
47
+ res = derivative(dist.cdf, x)
48
+ ref = dist.pdf(x)
49
+ xp_assert_close(res.df, ref, atol=1e-10)
50
+
51
+ @pytest.mark.parametrize('order', [1, 6])
52
+ @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
53
+ def test_vectorization(self, order, shape, xp):
54
+ # Test for correct functionality, output shapes, and dtypes for various
55
+ # input shapes.
56
+ x = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
57
+ n = np.size(x)
58
+ state = {}
59
+
60
+ @np.vectorize
61
+ def _derivative_single(x):
62
+ return derivative(self.f, x, order=order)
63
+
64
+ def f(x, *args, **kwargs):
65
+ state['nit'] += 1
66
+ state['feval'] += 1 if (x.size == n or x.ndim <=1) else x.shape[-1]
67
+ return self.f(x, *args, **kwargs)
68
+
69
+ state['nit'] = -1
70
+ state['feval'] = 0
71
+
72
+ res = derivative(f, xp.asarray(x, dtype=xp.float64), order=order)
73
+ refs = _derivative_single(x).ravel()
74
+
75
+ ref_x = [ref.x for ref in refs]
76
+ xp_assert_close(xp.reshape(res.x, (-1,)), xp.asarray(ref_x))
77
+
78
+ ref_df = [ref.df for ref in refs]
79
+ xp_assert_close(xp.reshape(res.df, (-1,)), xp.asarray(ref_df))
80
+
81
+ ref_error = [ref.error for ref in refs]
82
+ xp_assert_close(xp.reshape(res.error, (-1,)), xp.asarray(ref_error),
83
+ atol=1e-12)
84
+
85
+ ref_success = [bool(ref.success) for ref in refs]
86
+ xp_assert_equal(xp.reshape(res.success, (-1,)), xp.asarray(ref_success))
87
+
88
+ ref_flag = [np.int32(ref.status) for ref in refs]
89
+ xp_assert_equal(xp.reshape(res.status, (-1,)), xp.asarray(ref_flag))
90
+
91
+ ref_nfev = [np.int32(ref.nfev) for ref in refs]
92
+ xp_assert_equal(xp.reshape(res.nfev, (-1,)), xp.asarray(ref_nfev))
93
+ if is_numpy(xp): # can't expect other backends to be exactly the same
94
+ assert xp.max(res.nfev) == state['feval']
95
+
96
+ ref_nit = [np.int32(ref.nit) for ref in refs]
97
+ xp_assert_equal(xp.reshape(res.nit, (-1,)), xp.asarray(ref_nit))
98
+ if is_numpy(xp): # can't expect other backends to be exactly the same
99
+ assert xp.max(res.nit) == state['nit']
100
+
101
+ def test_flags(self, xp):
102
+ # Test cases that should produce different status flags; show that all
103
+ # can be produced simultaneously.
104
+ rng = np.random.default_rng(5651219684984213)
105
+ def f(xs, js):
106
+ f.nit += 1
107
+ funcs = [lambda x: x - 2.5, # converges
108
+ lambda x: xp.exp(x)*rng.random(), # error increases
109
+ lambda x: xp.exp(x), # reaches maxiter due to order=2
110
+ lambda x: xp.full_like(x, xp.nan)] # stops due to NaN
111
+ res = [funcs[int(j)](x) for x, j in zip(xs, xp.reshape(js, (-1,)))]
112
+ return xp.stack(res)
113
+ f.nit = 0
114
+
115
+ args = (xp.arange(4, dtype=xp.int64),)
116
+ res = derivative(f, xp.ones(4, dtype=xp.float64),
117
+ tolerances=dict(rtol=1e-14),
118
+ order=2, args=args)
119
+
120
+ ref_flags = xp.asarray([eim._ECONVERGED,
121
+ _EERRORINCREASE,
122
+ eim._ECONVERR,
123
+ eim._EVALUEERR], dtype=xp.int32)
124
+ xp_assert_equal(res.status, ref_flags)
125
+
126
+ def test_flags_preserve_shape(self, xp):
127
+ # Same test as above but using `preserve_shape` option to simplify.
128
+ rng = np.random.default_rng(5651219684984213)
129
+ def f(x):
130
+ out = [x - 2.5, # converges
131
+ xp.exp(x)*rng.random(), # error increases
132
+ xp.exp(x), # reaches maxiter due to order=2
133
+ xp.full_like(x, xp.nan)] # stops due to NaN
134
+ return xp.stack(out)
135
+
136
+ res = derivative(f, xp.asarray(1, dtype=xp.float64),
137
+ tolerances=dict(rtol=1e-14),
138
+ order=2, preserve_shape=True)
139
+
140
+ ref_flags = xp.asarray([eim._ECONVERGED,
141
+ _EERRORINCREASE,
142
+ eim._ECONVERR,
143
+ eim._EVALUEERR], dtype=xp.int32)
144
+ xp_assert_equal(res.status, ref_flags)
145
+
146
+ def test_preserve_shape(self, xp):
147
+ # Test `preserve_shape` option
148
+ def f(x):
149
+ out = [x, xp.sin(3*x), x+xp.sin(10*x), xp.sin(20*x)*(x-1)**2]
150
+ return xp.stack(out)
151
+
152
+ x = xp.asarray(0.)
153
+ ref = xp.asarray([xp.asarray(1), 3*xp.cos(3*x), 1+10*xp.cos(10*x),
154
+ 20*xp.cos(20*x)*(x-1)**2 + 2*xp.sin(20*x)*(x-1)])
155
+ res = derivative(f, x, preserve_shape=True)
156
+ xp_assert_close(res.df, ref)
157
+
158
+ def test_convergence(self, xp):
159
+ # Test that the convergence tolerances behave as expected
160
+ x = xp.asarray(1., dtype=xp.float64)
161
+ f = special.ndtr
162
+ ref = float(stats.norm.pdf(1.))
163
+ tolerances0 = dict(atol=0, rtol=0)
164
+
165
+ tolerances = tolerances0.copy()
166
+ tolerances['atol'] = 1e-3
167
+ res1 = derivative(f, x, tolerances=tolerances, order=4)
168
+ assert abs(res1.df - ref) < 1e-3
169
+ tolerances['atol'] = 1e-6
170
+ res2 = derivative(f, x, tolerances=tolerances, order=4)
171
+ assert abs(res2.df - ref) < 1e-6
172
+ assert abs(res2.df - ref) < abs(res1.df - ref)
173
+
174
+ tolerances = tolerances0.copy()
175
+ tolerances['rtol'] = 1e-3
176
+ res1 = derivative(f, x, tolerances=tolerances, order=4)
177
+ assert abs(res1.df - ref) < 1e-3 * ref
178
+ tolerances['rtol'] = 1e-6
179
+ res2 = derivative(f, x, tolerances=tolerances, order=4)
180
+ assert abs(res2.df - ref) < 1e-6 * ref
181
+ assert abs(res2.df - ref) < abs(res1.df - ref)
182
+
183
+ def test_step_parameters(self, xp):
184
+ # Test that step factors have the expected effect on accuracy
185
+ x = xp.asarray(1., dtype=xp.float64)
186
+ f = special.ndtr
187
+ ref = float(stats.norm.pdf(1.))
188
+
189
+ res1 = derivative(f, x, initial_step=0.5, maxiter=1)
190
+ res2 = derivative(f, x, initial_step=0.05, maxiter=1)
191
+ assert abs(res2.df - ref) < abs(res1.df - ref)
192
+
193
+ res1 = derivative(f, x, step_factor=2, maxiter=1)
194
+ res2 = derivative(f, x, step_factor=20, maxiter=1)
195
+ assert abs(res2.df - ref) < abs(res1.df - ref)
196
+
197
+ # `step_factor` can be less than 1: `initial_step` is the minimum step
198
+ kwargs = dict(order=4, maxiter=1, step_direction=0)
199
+ res = derivative(f, x, initial_step=0.5, step_factor=0.5, **kwargs)
200
+ ref = derivative(f, x, initial_step=1, step_factor=2, **kwargs)
201
+ xp_assert_close(res.df, ref.df, rtol=5e-15)
202
+
203
+ # This is a similar test for one-sided difference
204
+ kwargs = dict(order=2, maxiter=1, step_direction=1)
205
+ res = derivative(f, x, initial_step=1, step_factor=2, **kwargs)
206
+ ref = derivative(f, x, initial_step=1/np.sqrt(2), step_factor=0.5, **kwargs)
207
+ xp_assert_close(res.df, ref.df, rtol=5e-15)
208
+
209
+ kwargs['step_direction'] = -1
210
+ res = derivative(f, x, initial_step=1, step_factor=2, **kwargs)
211
+ ref = derivative(f, x, initial_step=1/np.sqrt(2), step_factor=0.5, **kwargs)
212
+ xp_assert_close(res.df, ref.df, rtol=5e-15)
213
+
214
+ def test_step_direction(self, xp):
215
+ # test that `step_direction` works as expected
216
+ def f(x):
217
+ y = xp.exp(x)
218
+ y[(x < 0) + (x > 2)] = xp.nan
219
+ return y
220
+
221
+ x = xp.linspace(0, 2, 10)
222
+ step_direction = xp.zeros_like(x)
223
+ step_direction[x < 0.6], step_direction[x > 1.4] = 1, -1
224
+ res = derivative(f, x, step_direction=step_direction)
225
+ xp_assert_close(res.df, xp.exp(x))
226
+ assert xp.all(res.success)
227
+
228
+ def test_vectorized_step_direction_args(self, xp):
229
+ # test that `step_direction` and `args` are vectorized properly
230
+ def f(x, p):
231
+ return x ** p
232
+
233
+ def df(x, p):
234
+ return p * x ** (p - 1)
235
+
236
+ x = xp.reshape(xp.asarray([1, 2, 3, 4]), (-1, 1, 1))
237
+ hdir = xp.reshape(xp.asarray([-1, 0, 1]), (1, -1, 1))
238
+ p = xp.reshape(xp.asarray([2, 3]), (1, 1, -1))
239
+ res = derivative(f, x, step_direction=hdir, args=(p,))
240
+ ref = xp.broadcast_to(df(x, p), res.df.shape)
241
+ ref = xp.asarray(ref, dtype=xp.asarray(1.).dtype)
242
+ xp_assert_close(res.df, ref)
243
+
244
+ def test_initial_step(self, xp):
245
+ # Test that `initial_step` works as expected and is vectorized
246
+ def f(x):
247
+ return xp.exp(x)
248
+
249
+ x = xp.asarray(0., dtype=xp.float64)
250
+ step_direction = xp.asarray([-1, 0, 1])
251
+ h0 = xp.reshape(xp.logspace(-3, 0, 10), (-1, 1))
252
+ res = derivative(f, x, initial_step=h0, order=2, maxiter=1,
253
+ step_direction=step_direction)
254
+ err = xp.abs(res.df - f(x))
255
+
256
+ # error should be smaller for smaller step sizes
257
+ assert xp.all(err[:-1, ...] < err[1:, ...])
258
+
259
+ # results of vectorized call should match results with
260
+ # initial_step taken one at a time
261
+ for i in range(h0.shape[0]):
262
+ ref = derivative(f, x, initial_step=h0[i, 0], order=2, maxiter=1,
263
+ step_direction=step_direction)
264
+ xp_assert_close(res.df[i, :], ref.df, rtol=1e-14)
265
+
266
+ def test_maxiter_callback(self, xp):
267
+ # Test behavior of `maxiter` parameter and `callback` interface
268
+ x = xp.asarray(0.612814, dtype=xp.float64)
269
+ maxiter = 3
270
+
271
+ def f(x):
272
+ res = special.ndtr(x)
273
+ return res
274
+
275
+ default_order = 8
276
+ res = derivative(f, x, maxiter=maxiter, tolerances=dict(rtol=1e-15))
277
+ assert not xp.any(res.success)
278
+ assert xp.all(res.nfev == default_order + 1 + (maxiter - 1)*2)
279
+ assert xp.all(res.nit == maxiter)
280
+
281
+ def callback(res):
282
+ callback.iter += 1
283
+ callback.res = res
284
+ assert hasattr(res, 'x')
285
+ assert float(res.df) not in callback.dfs
286
+ callback.dfs.add(float(res.df))
287
+ assert res.status == eim._EINPROGRESS
288
+ if callback.iter == maxiter:
289
+ raise StopIteration
290
+ callback.iter = -1 # callback called once before first iteration
291
+ callback.res = None
292
+ callback.dfs = set()
293
+
294
+ res2 = derivative(f, x, callback=callback, tolerances=dict(rtol=1e-15))
295
+ # terminating with callback is identical to terminating due to maxiter
296
+ # (except for `status`)
297
+ for key in res.keys():
298
+ if key == 'status':
299
+ assert res[key] == eim._ECONVERR
300
+ assert res2[key] == eim._ECALLBACK
301
+ else:
302
+ assert res2[key] == callback.res[key] == res[key]
303
+
304
+ @pytest.mark.parametrize("hdir", (-1, 0, 1))
305
+ @pytest.mark.parametrize("x", (0.65, [0.65, 0.7]))
306
+ @pytest.mark.parametrize("dtype", ('float16', 'float32', 'float64'))
307
+ def test_dtype(self, hdir, x, dtype, xp):
308
+ if dtype == 'float16' and not is_numpy(xp):
309
+ pytest.skip('float16 not tested for alternative backends')
310
+
311
+ # Test that dtypes are preserved
312
+ dtype = getattr(xp, dtype)
313
+ x = xp.asarray(x, dtype=dtype)
314
+
315
+ def f(x):
316
+ assert x.dtype == dtype
317
+ return xp.exp(x)
318
+
319
+ def callback(res):
320
+ assert res.x.dtype == dtype
321
+ assert res.df.dtype == dtype
322
+ assert res.error.dtype == dtype
323
+
324
+ res = derivative(f, x, order=4, step_direction=hdir, callback=callback)
325
+ assert res.x.dtype == dtype
326
+ assert res.df.dtype == dtype
327
+ assert res.error.dtype == dtype
328
+ eps = xp.finfo(dtype).eps
329
+ # not sure why torch is less accurate here; might be worth investigating
330
+ rtol = eps**0.5 * 50 if is_torch(xp) else eps**0.5
331
+ xp_assert_close(res.df, xp.exp(res.x), rtol=rtol)
332
+
333
+ def test_input_validation(self, xp):
334
+ # Test input validation for appropriate error messages
335
+ one = xp.asarray(1)
336
+
337
+ message = '`f` must be callable.'
338
+ with pytest.raises(ValueError, match=message):
339
+ derivative(None, one)
340
+
341
+ message = 'Abscissae and function output must be real numbers.'
342
+ with pytest.raises(ValueError, match=message):
343
+ derivative(lambda x: x, xp.asarray(-4+1j))
344
+
345
+ message = "When `preserve_shape=False`, the shape of the array..."
346
+ with pytest.raises(ValueError, match=message):
347
+ derivative(lambda x: [1, 2, 3], xp.asarray([-2, -3]))
348
+
349
+ message = 'Tolerances and step parameters must be non-negative...'
350
+ with pytest.raises(ValueError, match=message):
351
+ derivative(lambda x: x, one, tolerances=dict(atol=-1))
352
+ with pytest.raises(ValueError, match=message):
353
+ derivative(lambda x: x, one, tolerances=dict(rtol='ekki'))
354
+ with pytest.raises(ValueError, match=message):
355
+ derivative(lambda x: x, one, step_factor=object())
356
+
357
+ message = '`maxiter` must be a positive integer.'
358
+ with pytest.raises(ValueError, match=message):
359
+ derivative(lambda x: x, one, maxiter=1.5)
360
+ with pytest.raises(ValueError, match=message):
361
+ derivative(lambda x: x, one, maxiter=0)
362
+
363
+ message = '`order` must be a positive integer'
364
+ with pytest.raises(ValueError, match=message):
365
+ derivative(lambda x: x, one, order=1.5)
366
+ with pytest.raises(ValueError, match=message):
367
+ derivative(lambda x: x, one, order=0)
368
+
369
+ message = '`preserve_shape` must be True or False.'
370
+ with pytest.raises(ValueError, match=message):
371
+ derivative(lambda x: x, one, preserve_shape='herring')
372
+
373
+ message = '`callback` must be callable.'
374
+ with pytest.raises(ValueError, match=message):
375
+ derivative(lambda x: x, one, callback='shrubbery')
376
+
377
+ def test_special_cases(self, xp):
378
+ # Test edge cases and other special cases
379
+
380
+ # Test that integers are not passed to `f`
381
+ # (otherwise this would overflow)
382
+ def f(x):
383
+ xp_test = array_namespace(x) # needs `isdtype`
384
+ assert xp_test.isdtype(x.dtype, 'real floating')
385
+ return x ** 99 - 1
386
+
387
+ if not is_torch(xp): # torch defaults to float32
388
+ res = derivative(f, xp.asarray(7), tolerances=dict(rtol=1e-10))
389
+ assert res.success
390
+ xp_assert_close(res.df, xp.asarray(99*7.**98))
391
+
392
+ # Test invalid step size and direction
393
+ res = derivative(xp.exp, xp.asarray(1), step_direction=xp.nan)
394
+ xp_assert_equal(res.df, xp.asarray(xp.nan))
395
+ xp_assert_equal(res.status, xp.asarray(-3, dtype=xp.int32))
396
+
397
+ res = derivative(xp.exp, xp.asarray(1), initial_step=0)
398
+ xp_assert_equal(res.df, xp.asarray(xp.nan))
399
+ xp_assert_equal(res.status, xp.asarray(-3, dtype=xp.int32))
400
+
401
+ # Test that if success is achieved in the correct number
402
+ # of iterations if function is a polynomial. Ideally, all polynomials
403
+ # of order 0-2 would get exact result with 0 refinement iterations,
404
+ # all polynomials of order 3-4 would be differentiated exactly after
405
+ # 1 iteration, etc. However, it seems that `derivative` needs an
406
+ # extra iteration to detect convergence based on the error estimate.
407
+
408
+ for n in range(6):
409
+ x = xp.asarray(1.5, dtype=xp.float64)
410
+ def f(x):
411
+ return 2*x**n
412
+
413
+ ref = 2*n*x**(n-1)
414
+
415
+ res = derivative(f, x, maxiter=1, order=max(1, n))
416
+ xp_assert_close(res.df, ref, rtol=1e-15)
417
+ xp_assert_equal(res.error, xp.asarray(xp.nan, dtype=xp.float64))
418
+
419
+ res = derivative(f, x, order=max(1, n))
420
+ assert res.success
421
+ assert res.nit == 2
422
+ xp_assert_close(res.df, ref, rtol=1e-15)
423
+
424
+ # Test scalar `args` (not in tuple)
425
+ def f(x, c):
426
+ return c*x - 1
427
+
428
+ res = derivative(f, xp.asarray(2), args=xp.asarray(3))
429
+ xp_assert_close(res.df, xp.asarray(3.))
430
+
431
+ # no need to run a test on multiple backends if it's xfailed
432
+ @pytest.mark.skip_xp_backends(np_only=True)
433
+ @pytest.mark.xfail
434
+ @pytest.mark.parametrize("case", ( # function, evaluation point
435
+ (lambda x: (x - 1) ** 3, 1),
436
+ (lambda x: np.where(x > 1, (x - 1) ** 5, (x - 1) ** 3), 1)
437
+ ))
438
+ def test_saddle_gh18811(self, case):
439
+ # With default settings, `derivative` will not always converge when
440
+ # the true derivative is exactly zero. This tests that specifying a
441
+ # (tight) `atol` alleviates the problem. See discussion in gh-18811.
442
+ atol = 1e-16
443
+ res = derivative(*case, step_direction=[-1, 0, 1], atol=atol)
444
+ assert np.all(res.success)
445
+ xp_assert_close(res.df, 0, atol=atol)
446
+
447
+
448
+ class JacobianHessianTest:
449
+ def test_iv(self, xp):
450
+ jh_func = self.jh_func.__func__
451
+
452
+ # Test input validation
453
+ message = "Argument `x` must be at least 1-D."
454
+ with pytest.raises(ValueError, match=message):
455
+ jh_func(xp.sin, 1, tolerances=dict(atol=-1))
456
+
457
+ # Confirm that other parameters are being passed to `derivative`,
458
+ # which raises an appropriate error message.
459
+ x = xp.ones(3)
460
+ func = optimize.rosen
461
+ message = 'Tolerances and step parameters must be non-negative scalars.'
462
+ with pytest.raises(ValueError, match=message):
463
+ jh_func(func, x, tolerances=dict(atol=-1))
464
+ with pytest.raises(ValueError, match=message):
465
+ jh_func(func, x, tolerances=dict(rtol=-1))
466
+ with pytest.raises(ValueError, match=message):
467
+ jh_func(func, x, step_factor=-1)
468
+
469
+ message = '`order` must be a positive integer.'
470
+ with pytest.raises(ValueError, match=message):
471
+ jh_func(func, x, order=-1)
472
+
473
+ message = '`maxiter` must be a positive integer.'
474
+ with pytest.raises(ValueError, match=message):
475
+ jh_func(func, x, maxiter=-1)
476
+
477
+
478
+ @pytest.mark.skip_xp_backends('array_api_strict', reason=array_api_strict_skip_reason)
479
+ @pytest.mark.skip_xp_backends('jax.numpy',reason=jax_skip_reason)
480
+ class TestJacobian(JacobianHessianTest):
481
+ jh_func = jacobian
482
+
483
+ # Example functions and Jacobians from Wikipedia:
484
+ # https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant#Examples
485
+
486
+ def f1(z, xp):
487
+ x, y = z
488
+ return xp.stack([x ** 2 * y, 5 * x + xp.sin(y)])
489
+
490
+ def df1(z):
491
+ x, y = z
492
+ return [[2 * x * y, x ** 2], [np.full_like(x, 5), np.cos(y)]]
493
+
494
+ f1.mn = 2, 2 # type: ignore[attr-defined]
495
+ f1.ref = df1 # type: ignore[attr-defined]
496
+
497
+ def f2(z, xp):
498
+ r, phi = z
499
+ return xp.stack([r * xp.cos(phi), r * xp.sin(phi)])
500
+
501
+ def df2(z):
502
+ r, phi = z
503
+ return [[np.cos(phi), -r * np.sin(phi)],
504
+ [np.sin(phi), r * np.cos(phi)]]
505
+
506
+ f2.mn = 2, 2 # type: ignore[attr-defined]
507
+ f2.ref = df2 # type: ignore[attr-defined]
508
+
509
+ def f3(z, xp):
510
+ r, phi, th = z
511
+ return xp.stack([r * xp.sin(phi) * xp.cos(th), r * xp.sin(phi) * xp.sin(th),
512
+ r * xp.cos(phi)])
513
+
514
+ def df3(z):
515
+ r, phi, th = z
516
+ return [[np.sin(phi) * np.cos(th), r * np.cos(phi) * np.cos(th),
517
+ -r * np.sin(phi) * np.sin(th)],
518
+ [np.sin(phi) * np.sin(th), r * np.cos(phi) * np.sin(th),
519
+ r * np.sin(phi) * np.cos(th)],
520
+ [np.cos(phi), -r * np.sin(phi), np.zeros_like(r)]]
521
+
522
+ f3.mn = 3, 3 # type: ignore[attr-defined]
523
+ f3.ref = df3 # type: ignore[attr-defined]
524
+
525
+ def f4(x, xp):
526
+ x1, x2, x3 = x
527
+ return xp.stack([x1, 5 * x3, 4 * x2 ** 2 - 2 * x3, x3 * xp.sin(x1)])
528
+
529
+ def df4(x):
530
+ x1, x2, x3 = x
531
+ one = np.ones_like(x1)
532
+ return [[one, 0 * one, 0 * one],
533
+ [0 * one, 0 * one, 5 * one],
534
+ [0 * one, 8 * x2, -2 * one],
535
+ [x3 * np.cos(x1), 0 * one, np.sin(x1)]]
536
+
537
+ f4.mn = 3, 4 # type: ignore[attr-defined]
538
+ f4.ref = df4 # type: ignore[attr-defined]
539
+
540
+ def f5(x, xp):
541
+ x1, x2, x3 = x
542
+ return xp.stack([5 * x2, 4 * x1 ** 2 - 2 * xp.sin(x2 * x3), x2 * x3])
543
+
544
+ def df5(x):
545
+ x1, x2, x3 = x
546
+ one = np.ones_like(x1)
547
+ return [[0 * one, 5 * one, 0 * one],
548
+ [8 * x1, -2 * x3 * np.cos(x2 * x3), -2 * x2 * np.cos(x2 * x3)],
549
+ [0 * one, x3, x2]]
550
+
551
+ f5.mn = 3, 3 # type: ignore[attr-defined]
552
+ f5.ref = df5 # type: ignore[attr-defined]
553
+
554
+ def rosen(x, _): return optimize.rosen(x)
555
+ rosen.mn = 5, 1 # type: ignore[attr-defined]
556
+ rosen.ref = optimize.rosen_der # type: ignore[attr-defined]
557
+
558
+ @pytest.mark.parametrize('dtype', ('float32', 'float64'))
559
+ @pytest.mark.parametrize('size', [(), (6,), (2, 3)])
560
+ @pytest.mark.parametrize('func', [f1, f2, f3, f4, f5, rosen])
561
+ def test_examples(self, dtype, size, func, xp):
562
+ atol = 1e-10 if dtype == 'float64' else 1.99e-3
563
+ dtype = getattr(xp, dtype)
564
+ rng = np.random.default_rng(458912319542)
565
+ m, n = func.mn
566
+ x = rng.random(size=(m,) + size)
567
+ res = jacobian(lambda x: func(x , xp), xp.asarray(x, dtype=dtype))
568
+ # convert list of arrays to single array before converting to xp array
569
+ ref = xp.asarray(np.asarray(func.ref(x)), dtype=dtype)
570
+ xp_assert_close(res.df, ref, atol=atol)
571
+
572
+ def test_attrs(self, xp):
573
+ # Test attributes of result object
574
+ z = xp.asarray([0.5, 0.25])
575
+
576
+ # case in which some elements of the Jacobian are harder
577
+ # to calculate than others
578
+ def df1(z):
579
+ x, y = z
580
+ return xp.stack([xp.cos(0.5*x) * xp.cos(y), xp.sin(2*x) * y**2])
581
+
582
+ def df1_0xy(x, y):
583
+ return xp.cos(0.5*x) * xp.cos(y)
584
+
585
+ def df1_1xy(x, y):
586
+ return xp.sin(2*x) * y**2
587
+
588
+ res = jacobian(df1, z, initial_step=10)
589
+ if is_numpy(xp):
590
+ assert len(np.unique(res.nit)) == 4
591
+ assert len(np.unique(res.nfev)) == 4
592
+
593
+ res00 = jacobian(lambda x: df1_0xy(x, z[1]), z[0:1], initial_step=10)
594
+ res01 = jacobian(lambda y: df1_0xy(z[0], y), z[1:2], initial_step=10)
595
+ res10 = jacobian(lambda x: df1_1xy(x, z[1]), z[0:1], initial_step=10)
596
+ res11 = jacobian(lambda y: df1_1xy(z[0], y), z[1:2], initial_step=10)
597
+ ref = optimize.OptimizeResult()
598
+ for attr in ['success', 'status', 'df', 'nit', 'nfev']:
599
+ ref_attr = xp.asarray([[getattr(res00, attr), getattr(res01, attr)],
600
+ [getattr(res10, attr), getattr(res11, attr)]])
601
+ ref[attr] = xp.squeeze(ref_attr)
602
+ rtol = 1.5e-5 if res[attr].dtype == xp.float32 else 1.5e-14
603
+ xp_assert_close(res[attr], ref[attr], rtol=rtol)
604
+
605
+ def test_step_direction_size(self, xp):
606
+ # Check that `step_direction` and `initial_step` can be used to ensure that
607
+ # the usable domain of a function is respected.
608
+ rng = np.random.default_rng(23892589425245)
609
+ b = rng.random(3)
610
+ eps = 1e-7 # torch needs wiggle room?
611
+
612
+ def f(x):
613
+ x[0, x[0] < b[0]] = xp.nan
614
+ x[0, x[0] > b[0] + 0.25] = xp.nan
615
+ x[1, x[1] > b[1]] = xp.nan
616
+ x[1, x[1] < b[1] - 0.1-eps] = xp.nan
617
+ return TestJacobian.f5(x, xp)
618
+
619
+ dir = [1, -1, 0]
620
+ h0 = [0.25, 0.1, 0.5]
621
+ atol = {'atol': 1e-8}
622
+ res = jacobian(f, xp.asarray(b, dtype=xp.float64), initial_step=h0,
623
+ step_direction=dir, tolerances=atol)
624
+ ref = xp.asarray(TestJacobian.df5(b), dtype=xp.float64)
625
+ xp_assert_close(res.df, ref, atol=1e-8)
626
+ assert xp.all(xp.isfinite(ref))
627
+
628
+
629
+ @pytest.mark.skip_xp_backends('array_api_strict', reason=array_api_strict_skip_reason)
630
+ @pytest.mark.skip_xp_backends('jax.numpy',reason=jax_skip_reason)
631
+ class TestHessian(JacobianHessianTest):
632
+ jh_func = hessian
633
+
634
+ @pytest.mark.parametrize('shape', [(), (4,), (2, 4)])
635
+ def test_example(self, shape, xp):
636
+ rng = np.random.default_rng(458912319542)
637
+ m = 3
638
+ x = xp.asarray(rng.random((m,) + shape), dtype=xp.float64)
639
+ res = hessian(optimize.rosen, x)
640
+ if shape:
641
+ x = xp.reshape(x, (m, -1))
642
+ ref = xp.stack([optimize.rosen_hess(xi) for xi in x.T])
643
+ ref = xp.moveaxis(ref, 0, -1)
644
+ ref = xp.reshape(ref, (m, m,) + shape)
645
+ else:
646
+ ref = optimize.rosen_hess(x)
647
+ xp_assert_close(res.ddf, ref, atol=1e-8)
648
+
649
+ # # Removed symmetry enforcement; consider adding back in as a feature
650
+ # # check symmetry
651
+ # for key in ['ddf', 'error', 'nfev', 'success', 'status']:
652
+ # assert_equal(res[key], np.swapaxes(res[key], 0, 1))
653
+
654
+ def test_float32(self, xp):
655
+ rng = np.random.default_rng(458912319542)
656
+ x = xp.asarray(rng.random(3), dtype=xp.float32)
657
+ res = hessian(optimize.rosen, x)
658
+ ref = optimize.rosen_hess(x)
659
+ mask = (ref != 0)
660
+ xp_assert_close(res.ddf[mask], ref[mask])
661
+ atol = 1e-2 * xp.abs(xp.min(ref[mask]))
662
+ xp_assert_close(res.ddf[~mask], ref[~mask], atol=atol)
663
+
664
+ def test_nfev(self, xp):
665
+ z = xp.asarray([0.5, 0.25])
666
+ xp_test = array_namespace(z)
667
+
668
+ def f1(z):
669
+ x, y = xp_test.broadcast_arrays(*z)
670
+ f1.nfev = f1.nfev + (math.prod(x.shape[2:]) if x.ndim > 2 else 1)
671
+ return xp.sin(x) * y ** 3
672
+ f1.nfev = 0
673
+
674
+
675
+ res = hessian(f1, z, initial_step=10)
676
+ f1.nfev = 0
677
+ res00 = hessian(lambda x: f1([x[0], z[1]]), z[0:1], initial_step=10)
678
+ assert res.nfev[0, 0] == f1.nfev == res00.nfev[0, 0]
679
+
680
+ f1.nfev = 0
681
+ res11 = hessian(lambda y: f1([z[0], y[0]]), z[1:2], initial_step=10)
682
+ assert res.nfev[1, 1] == f1.nfev == res11.nfev[0, 0]
683
+
684
+ # Removed symmetry enforcement; consider adding back in as a feature
685
+ # assert_equal(res.nfev, res.nfev.T) # check symmetry
686
+ # assert np.unique(res.nfev).size == 3
687
+
688
+
689
+ @pytest.mark.thread_unsafe
690
+ @pytest.mark.skip_xp_backends(np_only=True,
691
+ reason='Python list input uses NumPy backend')
692
+ def test_small_rtol_warning(self, xp):
693
+ message = 'The specified `rtol=1e-15`, but...'
694
+ with pytest.warns(RuntimeWarning, match=message):
695
+ hessian(xp.sin, [1.], tolerances=dict(rtol=1e-15))
infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (930 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/__pycache__/_arffread.cpython-310.pyc ADDED
Binary file (21.9 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/nodata.arff ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @RELATION iris
2
+
3
+ @ATTRIBUTE sepallength REAL
4
+ @ATTRIBUTE sepalwidth REAL
5
+ @ATTRIBUTE petallength REAL
6
+ @ATTRIBUTE petalwidth REAL
7
+ @ATTRIBUTE class {Iris-setosa,Iris-versicolor,Iris-virginica}
8
+
9
+ @DATA
10
+
11
+ % This file has no data
infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/test10.arff ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_30_0/lib/python3.10/site-packages/scipy/io/arff/tests/data/test2.arff ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @RELATION test2
2
+
3
+ @ATTRIBUTE attr0 REAL
4
+ @ATTRIBUTE attr1 real
5
+ @ATTRIBUTE attr2 integer
6
+ @ATTRIBUTE attr3 Integer
7
+ @ATTRIBUTE attr4 Numeric
8
+ @ATTRIBUTE attr5 numeric
9
+ @ATTRIBUTE attr6 string
10
+ @ATTRIBUTE attr7 STRING
11
+ @ATTRIBUTE attr8 {bla}
12
+ @ATTRIBUTE attr9 {bla, bla}
13
+
14
+ @DATA
15
+ 0.1, 0.2, 0.3, 0.4,class1
infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/single_empty_string.mat ADDED
Binary file (171 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/test_empty_struct.mat ADDED
Binary file (173 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat ADDED
Binary file (283 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat ADDED
Binary file (228 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat ADDED
Binary file (228 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat ADDED
Binary file (276 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat ADDED
Binary file (672 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsimplecell.mat ADDED
Binary file (220 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat ADDED
Binary file (294 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat ADDED
Binary file (314 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/__init__.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =======================================
3
+ Signal processing (:mod:`scipy.signal`)
4
+ =======================================
5
+
6
+ Convolution
7
+ ===========
8
+
9
+ .. autosummary::
10
+ :toctree: generated/
11
+
12
+ convolve -- N-D convolution.
13
+ correlate -- N-D correlation.
14
+ fftconvolve -- N-D convolution using the FFT.
15
+ oaconvolve -- N-D convolution using the overlap-add method.
16
+ convolve2d -- 2-D convolution (more options).
17
+ correlate2d -- 2-D correlation (more options).
18
+ sepfir2d -- Convolve with a 2-D separable FIR filter.
19
+ choose_conv_method -- Chooses faster of FFT and direct convolution methods.
20
+ correlation_lags -- Determines lag indices for 1D cross-correlation.
21
+
22
+ B-splines
23
+ =========
24
+
25
+ .. autosummary::
26
+ :toctree: generated/
27
+
28
+ gauss_spline -- Gaussian approximation to the B-spline basis function.
29
+ cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
30
+ qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
31
+ cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
32
+ qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
33
+ cspline1d_eval -- Evaluate a cubic spline at the given points.
34
+ qspline1d_eval -- Evaluate a quadratic spline at the given points.
35
+ spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
36
+
37
+ Filtering
38
+ =========
39
+
40
+ .. autosummary::
41
+ :toctree: generated/
42
+
43
+ order_filter -- N-D order filter.
44
+ medfilt -- N-D median filter.
45
+ medfilt2d -- 2-D median filter (faster).
46
+ wiener -- N-D Wiener filter.
47
+
48
+ symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
49
+ symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
50
+ lfilter -- 1-D FIR and IIR digital linear filtering.
51
+ lfiltic -- Construct initial conditions for `lfilter`.
52
+ lfilter_zi -- Compute an initial state zi for the lfilter function that
53
+ -- corresponds to the steady state of the step response.
54
+ filtfilt -- A forward-backward filter.
55
+ savgol_filter -- Filter a signal using the Savitzky-Golay filter.
56
+
57
+ deconvolve -- 1-D deconvolution using lfilter.
58
+
59
+ sosfilt -- 1-D IIR digital linear filtering using
60
+ -- a second-order sections filter representation.
61
+ sosfilt_zi -- Compute an initial state zi for the sosfilt function that
62
+ -- corresponds to the steady state of the step response.
63
+ sosfiltfilt -- A forward-backward filter for second-order sections.
64
+ hilbert -- Compute 1-D analytic signal, using the Hilbert transform.
65
+ hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform.
66
+ envelope -- Compute the envelope of a real- or complex-valued signal.
67
+
68
+ decimate -- Downsample a signal.
69
+ detrend -- Remove linear and/or constant trends from data.
70
+ resample -- Resample using Fourier method.
71
+ resample_poly -- Resample using polyphase filtering method.
72
+ upfirdn -- Upsample, apply FIR filter, downsample.
73
+
74
+ Filter design
75
+ =============
76
+
77
+ .. autosummary::
78
+ :toctree: generated/
79
+
80
+ bilinear -- Digital filter from an analog filter using
81
+ -- the bilinear transform.
82
+ bilinear_zpk -- Digital filter from an analog filter using
83
+ -- the bilinear transform.
84
+ findfreqs -- Find array of frequencies for computing filter response.
85
+ firls -- FIR filter design using least-squares error minimization.
86
+ firwin -- Windowed FIR filter design, with frequency response
87
+ -- defined as pass and stop bands.
88
+ firwin2 -- Windowed FIR filter design, with arbitrary frequency
89
+ -- response.
90
+ freqs -- Analog filter frequency response from TF coefficients.
91
+ freqs_zpk -- Analog filter frequency response from ZPK coefficients.
92
+ freqz -- Digital filter frequency response from TF coefficients.
93
+ freqz_sos -- Digital filter frequency response for SOS format filter.
94
+ freqz_zpk -- Digital filter frequency response from ZPK coefficients.
95
+ gammatone -- FIR and IIR gammatone filter design.
96
+ group_delay -- Digital filter group delay.
97
+ iirdesign -- IIR filter design given bands and gains.
98
+ iirfilter -- IIR filter design given order and critical frequencies.
99
+ kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
100
+ -- the number of taps and the transition width at
101
+ -- discontinuities in the frequency response.
102
+ kaiser_beta -- Compute the Kaiser parameter beta, given the desired
103
+ -- FIR filter attenuation.
104
+ kaiserord -- Design a Kaiser window to limit ripple and width of
105
+ -- transition region.
106
+ minimum_phase -- Convert a linear phase FIR filter to minimum phase.
107
+ savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
108
+ -- filter.
109
+ remez -- Optimal FIR filter design.
110
+
111
+ unique_roots -- Unique roots and their multiplicities.
112
+ residue -- Partial fraction expansion of b(s) / a(s).
113
+ residuez -- Partial fraction expansion of b(z) / a(z).
114
+ invres -- Inverse partial fraction expansion for analog filter.
115
+ invresz -- Inverse partial fraction expansion for digital filter.
116
+ BadCoefficients -- Warning on badly conditioned filter coefficients.
117
+
118
+ Lower-level filter design functions:
119
+
120
+ .. autosummary::
121
+ :toctree: generated/
122
+
123
+ abcd_normalize -- Check state-space matrices and ensure they are rank-2.
124
+ band_stop_obj -- Band Stop Objective Function for order minimization.
125
+ besselap -- Return (z,p,k) for analog prototype of Bessel filter.
126
+ buttap -- Return (z,p,k) for analog prototype of Butterworth filter.
127
+ cheb1ap -- Return (z,p,k) for type I Chebyshev filter.
128
+ cheb2ap -- Return (z,p,k) for type II Chebyshev filter.
129
+ ellipap -- Return (z,p,k) for analog prototype of elliptic filter.
130
+ lp2bp -- Transform a lowpass filter prototype to a bandpass filter.
131
+ lp2bp_zpk -- Transform a lowpass filter prototype to a bandpass filter.
132
+ lp2bs -- Transform a lowpass filter prototype to a bandstop filter.
133
+ lp2bs_zpk -- Transform a lowpass filter prototype to a bandstop filter.
134
+ lp2hp -- Transform a lowpass filter prototype to a highpass filter.
135
+ lp2hp_zpk -- Transform a lowpass filter prototype to a highpass filter.
136
+ lp2lp -- Transform a lowpass filter prototype to a lowpass filter.
137
+ lp2lp_zpk -- Transform a lowpass filter prototype to a lowpass filter.
138
+ normalize -- Normalize polynomial representation of a transfer function.
139
+
140
+
141
+
142
+ Matlab-style IIR filter design
143
+ ==============================
144
+
145
+ .. autosummary::
146
+ :toctree: generated/
147
+
148
+ butter -- Butterworth
149
+ buttord
150
+ cheby1 -- Chebyshev Type I
151
+ cheb1ord
152
+ cheby2 -- Chebyshev Type II
153
+ cheb2ord
154
+ ellip -- Elliptic (Cauer)
155
+ ellipord
156
+ bessel -- Bessel (no order selection available -- try butterod)
157
+ iirnotch -- Design second-order IIR notch digital filter.
158
+ iirpeak -- Design second-order IIR peak (resonant) digital filter.
159
+ iircomb -- Design IIR comb filter.
160
+
161
+ Continuous-time linear systems
162
+ ==============================
163
+
164
+ .. autosummary::
165
+ :toctree: generated/
166
+
167
+ lti -- Continuous-time linear time invariant system base class.
168
+ StateSpace -- Linear time invariant system in state space form.
169
+ TransferFunction -- Linear time invariant system in transfer function form.
170
+ ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
171
+ lsim -- Continuous-time simulation of output to linear system.
172
+ impulse -- Impulse response of linear, time-invariant (LTI) system.
173
+ step -- Step response of continuous-time LTI system.
174
+ freqresp -- Frequency response of a continuous-time LTI system.
175
+ bode -- Bode magnitude and phase data (continuous-time LTI).
176
+
177
+ Discrete-time linear systems
178
+ ============================
179
+
180
+ .. autosummary::
181
+ :toctree: generated/
182
+
183
+ dlti -- Discrete-time linear time invariant system base class.
184
+ StateSpace -- Linear time invariant system in state space form.
185
+ TransferFunction -- Linear time invariant system in transfer function form.
186
+ ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
187
+ dlsim -- Simulation of output to a discrete-time linear system.
188
+ dimpulse -- Impulse response of a discrete-time LTI system.
189
+ dstep -- Step response of a discrete-time LTI system.
190
+ dfreqresp -- Frequency response of a discrete-time LTI system.
191
+ dbode -- Bode magnitude and phase data (discrete-time LTI).
192
+
193
+ LTI representations
194
+ ===================
195
+
196
+ .. autosummary::
197
+ :toctree: generated/
198
+
199
+ tf2zpk -- Transfer function to zero-pole-gain.
200
+ tf2sos -- Transfer function to second-order sections.
201
+ tf2ss -- Transfer function to state-space.
202
+ zpk2tf -- Zero-pole-gain to transfer function.
203
+ zpk2sos -- Zero-pole-gain to second-order sections.
204
+ zpk2ss -- Zero-pole-gain to state-space.
205
+ ss2tf -- State-pace to transfer function.
206
+ ss2zpk -- State-space to pole-zero-gain.
207
+ sos2zpk -- Second-order sections to zero-pole-gain.
208
+ sos2tf -- Second-order sections to transfer function.
209
+ cont2discrete -- Continuous-time to discrete-time LTI conversion.
210
+ place_poles -- Pole placement.
211
+
212
+ Waveforms
213
+ =========
214
+
215
+ .. autosummary::
216
+ :toctree: generated/
217
+
218
+ chirp -- Frequency swept cosine signal, with several freq functions.
219
+ gausspulse -- Gaussian modulated sinusoid.
220
+ max_len_seq -- Maximum length sequence.
221
+ sawtooth -- Periodic sawtooth.
222
+ square -- Square wave.
223
+ sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial.
224
+ unit_impulse -- Discrete unit impulse.
225
+
226
+ Window functions
227
+ ================
228
+
229
+ For window functions, see the `scipy.signal.windows` namespace.
230
+
231
+ In the `scipy.signal` namespace, there is a convenience function to
232
+ obtain these windows by name:
233
+
234
+ .. autosummary::
235
+ :toctree: generated/
236
+
237
+ get_window -- Return a window of a given length and type.
238
+
239
+ Peak finding
240
+ ============
241
+
242
+ .. autosummary::
243
+ :toctree: generated/
244
+
245
+ argrelmin -- Calculate the relative minima of data.
246
+ argrelmax -- Calculate the relative maxima of data.
247
+ argrelextrema -- Calculate the relative extrema of data.
248
+ find_peaks -- Find a subset of peaks inside a signal.
249
+ find_peaks_cwt -- Find peaks in a 1-D array with wavelet transformation.
250
+ peak_prominences -- Calculate the prominence of each peak in a signal.
251
+ peak_widths -- Calculate the width of each peak in a signal.
252
+
253
+ Spectral analysis
254
+ =================
255
+
256
+ .. autosummary::
257
+ :toctree: generated/
258
+
259
+ periodogram -- Compute a (modified) periodogram.
260
+ welch -- Compute a periodogram using Welch's method.
261
+ csd -- Compute the cross spectral density, using Welch's method.
262
+ coherence -- Compute the magnitude squared coherence, using Welch's method.
263
+ spectrogram -- Compute the spectrogram (legacy).
264
+ lombscargle -- Computes the Lomb-Scargle periodogram.
265
+ vectorstrength -- Computes the vector strength.
266
+ ShortTimeFFT -- Interface for calculating the \
267
+ :ref:`Short Time Fourier Transform <tutorial_stft>` and \
268
+ its inverse.
269
+ stft -- Compute the Short Time Fourier Transform (legacy).
270
+ istft -- Compute the Inverse Short Time Fourier Transform (legacy).
271
+ check_COLA -- Check the COLA constraint for iSTFT reconstruction.
272
+ check_NOLA -- Check the NOLA constraint for iSTFT reconstruction.
273
+
274
+ Chirp Z-transform and Zoom FFT
275
+ ============================================
276
+
277
+ .. autosummary::
278
+ :toctree: generated/
279
+
280
+ czt - Chirp z-transform convenience function
281
+ zoom_fft - Zoom FFT convenience function
282
+ CZT - Chirp z-transform function generator
283
+ ZoomFFT - Zoom FFT function generator
284
+ czt_points - Output the z-plane points sampled by a chirp z-transform
285
+
286
+ The functions are simpler to use than the classes, but are less efficient when
287
+ using the same transform on many arrays of the same length, since they
288
+ repeatedly generate the same chirp signal with every call. In these cases,
289
+ use the classes to create a reusable function instead.
290
+
291
+ """
292
+
293
+ from . import _sigtools, windows
294
+ from ._waveforms import *
295
+ from ._max_len_seq import max_len_seq
296
+ from ._upfirdn import upfirdn
297
+
298
+ from ._spline import (
299
+ sepfir2d
300
+ )
301
+
302
+ from ._spline_filters import *
303
+ from ._filter_design import *
304
+ from ._fir_filter_design import *
305
+ from ._ltisys import *
306
+ from ._lti_conversion import *
307
+ from ._signaltools import *
308
+ from ._savitzky_golay import savgol_coeffs, savgol_filter
309
+ from ._spectral_py import *
310
+ from ._short_time_fft import *
311
+ from ._peak_finding import *
312
+ from ._czt import *
313
+ from .windows import get_window # keep this one in signal namespace
314
+
315
+ # Deprecated namespaces, to be removed in v2.0.0
316
+ from . import (
317
+ bsplines, filter_design, fir_filter_design, lti_conversion, ltisys,
318
+ spectral, signaltools, waveforms, wavelets, spline
319
+ )
320
+
321
+ __all__ = [
322
+ s for s in dir() if not s.startswith("_")
323
+ ]
324
+
325
+ from scipy._lib._testutils import PytestTester
326
+ test = PytestTester(__name__)
327
+ del PytestTester
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_arraytools.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions for acting on a axis of an array.
3
+ """
4
+ import numpy as np
5
+
6
+
7
+ def axis_slice(a, start=None, stop=None, step=None, axis=-1):
8
+ """Take a slice along axis 'axis' from 'a'.
9
+
10
+ Parameters
11
+ ----------
12
+ a : numpy.ndarray
13
+ The array to be sliced.
14
+ start, stop, step : int or None
15
+ The slice parameters.
16
+ axis : int, optional
17
+ The axis of `a` to be sliced.
18
+
19
+ Examples
20
+ --------
21
+ >>> import numpy as np
22
+ >>> from scipy.signal._arraytools import axis_slice
23
+ >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
24
+ >>> axis_slice(a, start=0, stop=1, axis=1)
25
+ array([[1],
26
+ [4],
27
+ [7]])
28
+ >>> axis_slice(a, start=1, axis=0)
29
+ array([[4, 5, 6],
30
+ [7, 8, 9]])
31
+
32
+ Notes
33
+ -----
34
+ The keyword arguments start, stop and step are used by calling
35
+ slice(start, stop, step). This implies axis_slice() does not
36
+ handle its arguments the exactly the same as indexing. To select
37
+ a single index k, for example, use
38
+ axis_slice(a, start=k, stop=k+1)
39
+ In this case, the length of the axis 'axis' in the result will
40
+ be 1; the trivial dimension is not removed. (Use numpy.squeeze()
41
+ to remove trivial axes.)
42
+ """
43
+ a_slice = [slice(None)] * a.ndim
44
+ a_slice[axis] = slice(start, stop, step)
45
+ b = a[tuple(a_slice)]
46
+ return b
47
+
48
+
49
+ def axis_reverse(a, axis=-1):
50
+ """Reverse the 1-D slices of `a` along axis `axis`.
51
+
52
+ Returns axis_slice(a, step=-1, axis=axis).
53
+ """
54
+ return axis_slice(a, step=-1, axis=axis)
55
+
56
+
57
+ def odd_ext(x, n, axis=-1):
58
+ """
59
+ Odd extension at the boundaries of an array
60
+
61
+ Generate a new ndarray by making an odd extension of `x` along an axis.
62
+
63
+ Parameters
64
+ ----------
65
+ x : ndarray
66
+ The array to be extended.
67
+ n : int
68
+ The number of elements by which to extend `x` at each end of the axis.
69
+ axis : int, optional
70
+ The axis along which to extend `x`. Default is -1.
71
+
72
+ Examples
73
+ --------
74
+ >>> import numpy as np
75
+ >>> from scipy.signal._arraytools import odd_ext
76
+ >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
77
+ >>> odd_ext(a, 2)
78
+ array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
79
+ [-4, -1, 0, 1, 4, 9, 16, 23, 28]])
80
+
81
+ Odd extension is a "180 degree rotation" at the endpoints of the original
82
+ array:
83
+
84
+ >>> t = np.linspace(0, 1.5, 100)
85
+ >>> a = 0.9 * np.sin(2 * np.pi * t**2)
86
+ >>> b = odd_ext(a, 40)
87
+ >>> import matplotlib.pyplot as plt
88
+ >>> plt.plot(np.arange(-40, 140), b, 'b', lw=1, label='odd extension')
89
+ >>> plt.plot(np.arange(100), a, 'r', lw=2, label='original')
90
+ >>> plt.legend(loc='best')
91
+ >>> plt.show()
92
+ """
93
+ if n < 1:
94
+ return x
95
+ if n > x.shape[axis] - 1:
96
+ raise ValueError(("The extension length n (%d) is too big. " +
97
+ "It must not exceed x.shape[axis]-1, which is %d.")
98
+ % (n, x.shape[axis] - 1))
99
+ left_end = axis_slice(x, start=0, stop=1, axis=axis)
100
+ left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
101
+ right_end = axis_slice(x, start=-1, axis=axis)
102
+ right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
103
+ ext = np.concatenate((2 * left_end - left_ext,
104
+ x,
105
+ 2 * right_end - right_ext),
106
+ axis=axis)
107
+ return ext
108
+
109
+
110
+ def even_ext(x, n, axis=-1):
111
+ """
112
+ Even extension at the boundaries of an array
113
+
114
+ Generate a new ndarray by making an even extension of `x` along an axis.
115
+
116
+ Parameters
117
+ ----------
118
+ x : ndarray
119
+ The array to be extended.
120
+ n : int
121
+ The number of elements by which to extend `x` at each end of the axis.
122
+ axis : int, optional
123
+ The axis along which to extend `x`. Default is -1.
124
+
125
+ Examples
126
+ --------
127
+ >>> import numpy as np
128
+ >>> from scipy.signal._arraytools import even_ext
129
+ >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
130
+ >>> even_ext(a, 2)
131
+ array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3],
132
+ [ 4, 1, 0, 1, 4, 9, 16, 9, 4]])
133
+
134
+ Even extension is a "mirror image" at the boundaries of the original array:
135
+
136
+ >>> t = np.linspace(0, 1.5, 100)
137
+ >>> a = 0.9 * np.sin(2 * np.pi * t**2)
138
+ >>> b = even_ext(a, 40)
139
+ >>> import matplotlib.pyplot as plt
140
+ >>> plt.plot(np.arange(-40, 140), b, 'b', lw=1, label='even extension')
141
+ >>> plt.plot(np.arange(100), a, 'r', lw=2, label='original')
142
+ >>> plt.legend(loc='best')
143
+ >>> plt.show()
144
+ """
145
+ if n < 1:
146
+ return x
147
+ if n > x.shape[axis] - 1:
148
+ raise ValueError(("The extension length n (%d) is too big. " +
149
+ "It must not exceed x.shape[axis]-1, which is %d.")
150
+ % (n, x.shape[axis] - 1))
151
+ left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
152
+ right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
153
+ ext = np.concatenate((left_ext,
154
+ x,
155
+ right_ext),
156
+ axis=axis)
157
+ return ext
158
+
159
+
160
+ def const_ext(x, n, axis=-1):
161
+ """
162
+ Constant extension at the boundaries of an array
163
+
164
+ Generate a new ndarray that is a constant extension of `x` along an axis.
165
+
166
+ The extension repeats the values at the first and last element of
167
+ the axis.
168
+
169
+ Parameters
170
+ ----------
171
+ x : ndarray
172
+ The array to be extended.
173
+ n : int
174
+ The number of elements by which to extend `x` at each end of the axis.
175
+ axis : int, optional
176
+ The axis along which to extend `x`. Default is -1.
177
+
178
+ Examples
179
+ --------
180
+ >>> import numpy as np
181
+ >>> from scipy.signal._arraytools import const_ext
182
+ >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
183
+ >>> const_ext(a, 2)
184
+ array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5],
185
+ [ 0, 0, 0, 1, 4, 9, 16, 16, 16]])
186
+
187
+ Constant extension continues with the same values as the endpoints of the
188
+ array:
189
+
190
+ >>> t = np.linspace(0, 1.5, 100)
191
+ >>> a = 0.9 * np.sin(2 * np.pi * t**2)
192
+ >>> b = const_ext(a, 40)
193
+ >>> import matplotlib.pyplot as plt
194
+ >>> plt.plot(np.arange(-40, 140), b, 'b', lw=1, label='constant extension')
195
+ >>> plt.plot(np.arange(100), a, 'r', lw=2, label='original')
196
+ >>> plt.legend(loc='best')
197
+ >>> plt.show()
198
+ """
199
+ if n < 1:
200
+ return x
201
+ left_end = axis_slice(x, start=0, stop=1, axis=axis)
202
+ ones_shape = [1] * x.ndim
203
+ ones_shape[axis] = n
204
+ ones = np.ones(ones_shape, dtype=x.dtype)
205
+ left_ext = ones * left_end
206
+ right_end = axis_slice(x, start=-1, axis=axis)
207
+ right_ext = ones * right_end
208
+ ext = np.concatenate((left_ext,
209
+ x,
210
+ right_ext),
211
+ axis=axis)
212
+ return ext
213
+
214
+
215
+ def zero_ext(x, n, axis=-1):
216
+ """
217
+ Zero padding at the boundaries of an array
218
+
219
+ Generate a new ndarray that is a zero-padded extension of `x` along
220
+ an axis.
221
+
222
+ Parameters
223
+ ----------
224
+ x : ndarray
225
+ The array to be extended.
226
+ n : int
227
+ The number of elements by which to extend `x` at each end of the
228
+ axis.
229
+ axis : int, optional
230
+ The axis along which to extend `x`. Default is -1.
231
+
232
+ Examples
233
+ --------
234
+ >>> import numpy as np
235
+ >>> from scipy.signal._arraytools import zero_ext
236
+ >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
237
+ >>> zero_ext(a, 2)
238
+ array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0],
239
+ [ 0, 0, 0, 1, 4, 9, 16, 0, 0]])
240
+ """
241
+ if n < 1:
242
+ return x
243
+ zeros_shape = list(x.shape)
244
+ zeros_shape[axis] = n
245
+ zeros = np.zeros(zeros_shape, dtype=x.dtype)
246
+ ext = np.concatenate((zeros, x, zeros), axis=axis)
247
+ return ext
248
+
249
+
250
+ def _validate_fs(fs, allow_none=True):
251
+ """
252
+ Check if the given sampling frequency is a scalar and raises an exception
253
+ otherwise. If allow_none is False, also raises an exception for none
254
+ sampling rates. Returns the sampling frequency as float or none if the
255
+ input is none.
256
+ """
257
+ if fs is None:
258
+ if not allow_none:
259
+ raise ValueError("Sampling frequency can not be none.")
260
+ else: # should be float
261
+ if not np.isscalar(fs):
262
+ raise ValueError("Sampling frequency fs must be a single scalar.")
263
+ fs = float(fs)
264
+ return fs
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_czt.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This program is public domain
2
+ # Authors: Paul Kienzle, Nadav Horesh
3
+ """
4
+ Chirp z-transform.
5
+
6
+ We provide two interfaces to the chirp z-transform: an object interface
7
+ which precalculates part of the transform and can be applied efficiently
8
+ to many different data sets, and a functional interface which is applied
9
+ only to the given data set.
10
+
11
+ Transforms
12
+ ----------
13
+
14
+ CZT : callable (x, axis=-1) -> array
15
+ Define a chirp z-transform that can be applied to different signals.
16
+ ZoomFFT : callable (x, axis=-1) -> array
17
+ Define a Fourier transform on a range of frequencies.
18
+
19
+ Functions
20
+ ---------
21
+
22
+ czt : array
23
+ Compute the chirp z-transform for a signal.
24
+ zoom_fft : array
25
+ Compute the Fourier transform on a range of frequencies.
26
+ """
27
+
28
+ import cmath
29
+ import numbers
30
+ import numpy as np
31
+ from numpy import pi, arange
32
+ from scipy.fft import fft, ifft, next_fast_len
33
+
34
+ __all__ = ['czt', 'zoom_fft', 'CZT', 'ZoomFFT', 'czt_points']
35
+
36
+
37
+ def _validate_sizes(n, m):
38
+ if n < 1 or not isinstance(n, numbers.Integral):
39
+ raise ValueError('Invalid number of CZT data '
40
+ f'points ({n}) specified. '
41
+ 'n must be positive and integer type.')
42
+
43
+ if m is None:
44
+ m = n
45
+ elif m < 1 or not isinstance(m, numbers.Integral):
46
+ raise ValueError('Invalid number of CZT output '
47
+ f'points ({m}) specified. '
48
+ 'm must be positive and integer type.')
49
+
50
+ return m
51
+
52
+
53
+ def czt_points(m, w=None, a=1+0j):
54
+ """
55
+ Return the points at which the chirp z-transform is computed.
56
+
57
+ Parameters
58
+ ----------
59
+ m : int
60
+ The number of points desired.
61
+ w : complex, optional
62
+ The ratio between points in each step.
63
+ Defaults to equally spaced points around the entire unit circle.
64
+ a : complex, optional
65
+ The starting point in the complex plane. Default is 1+0j.
66
+
67
+ Returns
68
+ -------
69
+ out : ndarray
70
+ The points in the Z plane at which `CZT` samples the z-transform,
71
+ when called with arguments `m`, `w`, and `a`, as complex numbers.
72
+
73
+ See Also
74
+ --------
75
+ CZT : Class that creates a callable chirp z-transform function.
76
+ czt : Convenience function for quickly calculating CZT.
77
+
78
+ Examples
79
+ --------
80
+ Plot the points of a 16-point FFT:
81
+
82
+ >>> import numpy as np
83
+ >>> from scipy.signal import czt_points
84
+ >>> points = czt_points(16)
85
+ >>> import matplotlib.pyplot as plt
86
+ >>> plt.plot(points.real, points.imag, 'o')
87
+ >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3))
88
+ >>> plt.axis('equal')
89
+ >>> plt.show()
90
+
91
+ and a 91-point logarithmic spiral that crosses the unit circle:
92
+
93
+ >>> m, w, a = 91, 0.995*np.exp(-1j*np.pi*.05), 0.8*np.exp(1j*np.pi/6)
94
+ >>> points = czt_points(m, w, a)
95
+ >>> plt.plot(points.real, points.imag, 'o')
96
+ >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3))
97
+ >>> plt.axis('equal')
98
+ >>> plt.show()
99
+ """
100
+ m = _validate_sizes(1, m)
101
+
102
+ k = arange(m)
103
+
104
+ a = 1.0 * a # at least float
105
+
106
+ if w is None:
107
+ # Nothing specified, default to FFT
108
+ return a * np.exp(2j * pi * k / m)
109
+ else:
110
+ # w specified
111
+ w = 1.0 * w # at least float
112
+ return a * w**-k
113
+
114
+
115
+ class CZT:
116
+ """
117
+ Create a callable chirp z-transform function.
118
+
119
+ Transform to compute the frequency response around a spiral.
120
+ Objects of this class are callables which can compute the
121
+ chirp z-transform on their inputs. This object precalculates the constant
122
+ chirps used in the given transform.
123
+
124
+ Parameters
125
+ ----------
126
+ n : int
127
+ The size of the signal.
128
+ m : int, optional
129
+ The number of output points desired. Default is `n`.
130
+ w : complex, optional
131
+ The ratio between points in each step. This must be precise or the
132
+ accumulated error will degrade the tail of the output sequence.
133
+ Defaults to equally spaced points around the entire unit circle.
134
+ a : complex, optional
135
+ The starting point in the complex plane. Default is 1+0j.
136
+
137
+ Returns
138
+ -------
139
+ f : CZT
140
+ Callable object ``f(x, axis=-1)`` for computing the chirp z-transform
141
+ on `x`.
142
+
143
+ See Also
144
+ --------
145
+ czt : Convenience function for quickly calculating CZT.
146
+ ZoomFFT : Class that creates a callable partial FFT function.
147
+
148
+ Notes
149
+ -----
150
+ The defaults are chosen such that ``f(x)`` is equivalent to
151
+ ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, m)`` is equivalent to
152
+ ``fft.fft(x, m)``.
153
+
154
+ If `w` does not lie on the unit circle, then the transform will be
155
+ around a spiral with exponentially-increasing radius. Regardless,
156
+ angle will increase linearly.
157
+
158
+ For transforms that do lie on the unit circle, accuracy is better when
159
+ using `ZoomFFT`, since any numerical error in `w` is
160
+ accumulated for long data lengths, drifting away from the unit circle.
161
+
162
+ The chirp z-transform can be faster than an equivalent FFT with
163
+ zero padding. Try it with your own array sizes to see.
164
+
165
+ However, the chirp z-transform is considerably less precise than the
166
+ equivalent zero-padded FFT.
167
+
168
+ As this CZT is implemented using the Bluestein algorithm, it can compute
169
+ large prime-length Fourier transforms in O(N log N) time, rather than the
170
+ O(N**2) time required by the direct DFT calculation. (`scipy.fft` also
171
+ uses Bluestein's algorithm'.)
172
+
173
+ (The name "chirp z-transform" comes from the use of a chirp in the
174
+ Bluestein algorithm. It does not decompose signals into chirps, like
175
+ other transforms with "chirp" in the name.)
176
+
177
+ References
178
+ ----------
179
+ .. [1] Leo I. Bluestein, "A linear filtering approach to the computation
180
+ of the discrete Fourier transform," Northeast Electronics Research
181
+ and Engineering Meeting Record 10, 218-219 (1968).
182
+ .. [2] Rabiner, Schafer, and Rader, "The chirp z-transform algorithm and
183
+ its application," Bell Syst. Tech. J. 48, 1249-1292 (1969).
184
+
185
+ Examples
186
+ --------
187
+ Compute multiple prime-length FFTs:
188
+
189
+ >>> from scipy.signal import CZT
190
+ >>> import numpy as np
191
+ >>> a = np.random.rand(7)
192
+ >>> b = np.random.rand(7)
193
+ >>> c = np.random.rand(7)
194
+ >>> czt_7 = CZT(n=7)
195
+ >>> A = czt_7(a)
196
+ >>> B = czt_7(b)
197
+ >>> C = czt_7(c)
198
+
199
+ Display the points at which the FFT is calculated:
200
+
201
+ >>> czt_7.points()
202
+ array([ 1.00000000+0.j , 0.62348980+0.78183148j,
203
+ -0.22252093+0.97492791j, -0.90096887+0.43388374j,
204
+ -0.90096887-0.43388374j, -0.22252093-0.97492791j,
205
+ 0.62348980-0.78183148j])
206
+ >>> import matplotlib.pyplot as plt
207
+ >>> plt.plot(czt_7.points().real, czt_7.points().imag, 'o')
208
+ >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3))
209
+ >>> plt.axis('equal')
210
+ >>> plt.show()
211
+ """
212
+
213
+ def __init__(self, n, m=None, w=None, a=1+0j):
214
+ m = _validate_sizes(n, m)
215
+
216
+ k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2))
217
+
218
+ if w is None:
219
+ # Nothing specified, default to FFT-like
220
+ w = cmath.exp(-2j*pi/m)
221
+ wk2 = np.exp(-(1j * pi * ((k**2) % (2*m))) / m)
222
+ else:
223
+ # w specified
224
+ wk2 = w**(k**2/2.)
225
+
226
+ a = 1.0 * a # at least float
227
+
228
+ self.w, self.a = w, a
229
+ self.m, self.n = m, n
230
+
231
+ nfft = next_fast_len(n + m - 1)
232
+ self._Awk2 = a**-k[:n] * wk2[:n]
233
+ self._nfft = nfft
234
+ self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft)
235
+ self._wk2 = wk2[:m]
236
+ self._yidx = slice(n-1, n+m-1)
237
+
238
+ def __call__(self, x, *, axis=-1):
239
+ """
240
+ Calculate the chirp z-transform of a signal.
241
+
242
+ Parameters
243
+ ----------
244
+ x : array
245
+ The signal to transform.
246
+ axis : int, optional
247
+ Axis over which to compute the FFT. If not given, the last axis is
248
+ used.
249
+
250
+ Returns
251
+ -------
252
+ out : ndarray
253
+ An array of the same dimensions as `x`, but with the length of the
254
+ transformed axis set to `m`.
255
+ """
256
+ x = np.asarray(x)
257
+ if x.shape[axis] != self.n:
258
+ raise ValueError(f"CZT defined for length {self.n}, not "
259
+ f"{x.shape[axis]}")
260
+ # Calculate transpose coordinates, to allow operation on any given axis
261
+ trnsp = np.arange(x.ndim)
262
+ trnsp[[axis, -1]] = [-1, axis]
263
+ x = x.transpose(*trnsp)
264
+ y = ifft(self._Fwk2 * fft(x*self._Awk2, self._nfft))
265
+ y = y[..., self._yidx] * self._wk2
266
+ return y.transpose(*trnsp)
267
+
268
+ def points(self):
269
+ """
270
+ Return the points at which the chirp z-transform is computed.
271
+ """
272
+ return czt_points(self.m, self.w, self.a)
273
+
274
+
275
+ class ZoomFFT(CZT):
276
+ """
277
+ Create a callable zoom FFT transform function.
278
+
279
+ This is a specialization of the chirp z-transform (`CZT`) for a set of
280
+ equally-spaced frequencies around the unit circle, used to calculate a
281
+ section of the FFT more efficiently than calculating the entire FFT and
282
+ truncating.
283
+
284
+ Parameters
285
+ ----------
286
+ n : int
287
+ The size of the signal.
288
+ fn : array_like
289
+ A length-2 sequence [`f1`, `f2`] giving the frequency range, or a
290
+ scalar, for which the range [0, `fn`] is assumed.
291
+ m : int, optional
292
+ The number of points to evaluate. Default is `n`.
293
+ fs : float, optional
294
+ The sampling frequency. If ``fs=10`` represented 10 kHz, for example,
295
+ then `f1` and `f2` would also be given in kHz.
296
+ The default sampling frequency is 2, so `f1` and `f2` should be
297
+ in the range [0, 1] to keep the transform below the Nyquist
298
+ frequency.
299
+ endpoint : bool, optional
300
+ If True, `f2` is the last sample. Otherwise, it is not included.
301
+ Default is False.
302
+
303
+ Returns
304
+ -------
305
+ f : ZoomFFT
306
+ Callable object ``f(x, axis=-1)`` for computing the zoom FFT on `x`.
307
+
308
+ See Also
309
+ --------
310
+ zoom_fft : Convenience function for calculating a zoom FFT.
311
+
312
+ Notes
313
+ -----
314
+ The defaults are chosen such that ``f(x, 2)`` is equivalent to
315
+ ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, 2, m)`` is equivalent to
316
+ ``fft.fft(x, m)``.
317
+
318
+ Sampling frequency is 1/dt, the time step between samples in the
319
+ signal `x`. The unit circle corresponds to frequencies from 0 up
320
+ to the sampling frequency. The default sampling frequency of 2
321
+ means that `f1`, `f2` values up to the Nyquist frequency are in the
322
+ range [0, 1). For `f1`, `f2` values expressed in radians, a sampling
323
+ frequency of 2*pi should be used.
324
+
325
+ Remember that a zoom FFT can only interpolate the points of the existing
326
+ FFT. It cannot help to resolve two separate nearby frequencies.
327
+ Frequency resolution can only be increased by increasing acquisition
328
+ time.
329
+
330
+ These functions are implemented using Bluestein's algorithm (as is
331
+ `scipy.fft`). [2]_
332
+
333
+ References
334
+ ----------
335
+ .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its
336
+ applications", pg 29 (1970)
337
+ https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf
338
+ .. [2] Leo I. Bluestein, "A linear filtering approach to the computation
339
+ of the discrete Fourier transform," Northeast Electronics Research
340
+ and Engineering Meeting Record 10, 218-219 (1968).
341
+
342
+ Examples
343
+ --------
344
+ To plot the transform results use something like the following:
345
+
346
+ >>> import numpy as np
347
+ >>> from scipy.signal import ZoomFFT
348
+ >>> t = np.linspace(0, 1, 1021)
349
+ >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t)
350
+ >>> f1, f2 = 5, 27
351
+ >>> transform = ZoomFFT(len(x), [f1, f2], len(x), fs=1021)
352
+ >>> X = transform(x)
353
+ >>> f = np.linspace(f1, f2, len(x))
354
+ >>> import matplotlib.pyplot as plt
355
+ >>> plt.plot(f, 20*np.log10(np.abs(X)))
356
+ >>> plt.show()
357
+ """
358
+
359
+ def __init__(self, n, fn, m=None, *, fs=2, endpoint=False):
360
+ m = _validate_sizes(n, m)
361
+
362
+ k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2))
363
+
364
+ if np.size(fn) == 2:
365
+ f1, f2 = fn
366
+ elif np.size(fn) == 1:
367
+ f1, f2 = 0.0, fn
368
+ else:
369
+ raise ValueError('fn must be a scalar or 2-length sequence')
370
+
371
+ self.f1, self.f2, self.fs = f1, f2, fs
372
+
373
+ if endpoint:
374
+ scale = ((f2 - f1) * m) / (fs * (m - 1))
375
+ else:
376
+ scale = (f2 - f1) / fs
377
+ a = cmath.exp(2j * pi * f1/fs)
378
+ wk2 = np.exp(-(1j * pi * scale * k**2) / m)
379
+
380
+ self.w = cmath.exp(-2j*pi/m * scale)
381
+ self.a = a
382
+ self.m, self.n = m, n
383
+
384
+ ak = np.exp(-2j * pi * f1/fs * k[:n])
385
+ self._Awk2 = ak * wk2[:n]
386
+
387
+ nfft = next_fast_len(n + m - 1)
388
+ self._nfft = nfft
389
+ self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft)
390
+ self._wk2 = wk2[:m]
391
+ self._yidx = slice(n-1, n+m-1)
392
+
393
+
394
+ def czt(x, m=None, w=None, a=1+0j, *, axis=-1):
395
+ """
396
+ Compute the frequency response around a spiral in the Z plane.
397
+
398
+ Parameters
399
+ ----------
400
+ x : array
401
+ The signal to transform.
402
+ m : int, optional
403
+ The number of output points desired. Default is the length of the
404
+ input data.
405
+ w : complex, optional
406
+ The ratio between points in each step. This must be precise or the
407
+ accumulated error will degrade the tail of the output sequence.
408
+ Defaults to equally spaced points around the entire unit circle.
409
+ a : complex, optional
410
+ The starting point in the complex plane. Default is 1+0j.
411
+ axis : int, optional
412
+ Axis over which to compute the FFT. If not given, the last axis is
413
+ used.
414
+
415
+ Returns
416
+ -------
417
+ out : ndarray
418
+ An array of the same dimensions as `x`, but with the length of the
419
+ transformed axis set to `m`.
420
+
421
+ See Also
422
+ --------
423
+ CZT : Class that creates a callable chirp z-transform function.
424
+ zoom_fft : Convenience function for partial FFT calculations.
425
+
426
+ Notes
427
+ -----
428
+ The defaults are chosen such that ``signal.czt(x)`` is equivalent to
429
+ ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.czt(x, m)`` is
430
+ equivalent to ``fft.fft(x, m)``.
431
+
432
+ If the transform needs to be repeated, use `CZT` to construct a
433
+ specialized transform function which can be reused without
434
+ recomputing constants.
435
+
436
+ An example application is in system identification, repeatedly evaluating
437
+ small slices of the z-transform of a system, around where a pole is
438
+ expected to exist, to refine the estimate of the pole's true location. [1]_
439
+
440
+ References
441
+ ----------
442
+ .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its
443
+ applications", pg 20 (1970)
444
+ https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf
445
+
446
+ Examples
447
+ --------
448
+ Generate a sinusoid:
449
+
450
+ >>> import numpy as np
451
+ >>> f1, f2, fs = 8, 10, 200 # Hz
452
+ >>> t = np.linspace(0, 1, fs, endpoint=False)
453
+ >>> x = np.sin(2*np.pi*t*f2)
454
+ >>> import matplotlib.pyplot as plt
455
+ >>> plt.plot(t, x)
456
+ >>> plt.axis([0, 1, -1.1, 1.1])
457
+ >>> plt.show()
458
+
459
+ Its discrete Fourier transform has all of its energy in a single frequency
460
+ bin:
461
+
462
+ >>> from scipy.fft import rfft, rfftfreq
463
+ >>> from scipy.signal import czt, czt_points
464
+ >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x)))
465
+ >>> plt.margins(0, 0.1)
466
+ >>> plt.show()
467
+
468
+ However, if the sinusoid is logarithmically-decaying:
469
+
470
+ >>> x = np.exp(-t*f1) * np.sin(2*np.pi*t*f2)
471
+ >>> plt.plot(t, x)
472
+ >>> plt.axis([0, 1, -1.1, 1.1])
473
+ >>> plt.show()
474
+
475
+ the DFT will have spectral leakage:
476
+
477
+ >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x)))
478
+ >>> plt.margins(0, 0.1)
479
+ >>> plt.show()
480
+
481
+ While the DFT always samples the z-transform around the unit circle, the
482
+ chirp z-transform allows us to sample the Z-transform along any
483
+ logarithmic spiral, such as a circle with radius smaller than unity:
484
+
485
+ >>> M = fs // 2 # Just positive frequencies, like rfft
486
+ >>> a = np.exp(-f1/fs) # Starting point of the circle, radius < 1
487
+ >>> w = np.exp(-1j*np.pi/M) # "Step size" of circle
488
+ >>> points = czt_points(M + 1, w, a) # M + 1 to include Nyquist
489
+ >>> plt.plot(points.real, points.imag, '.')
490
+ >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3))
491
+ >>> plt.axis('equal'); plt.axis([-1.05, 1.05, -0.05, 1.05])
492
+ >>> plt.show()
493
+
494
+ With the correct radius, this transforms the decaying sinusoid (and others
495
+ with the same decay rate) without spectral leakage:
496
+
497
+ >>> z_vals = czt(x, M + 1, w, a) # Include Nyquist for comparison to rfft
498
+ >>> freqs = np.angle(points)*fs/(2*np.pi) # angle = omega, radius = sigma
499
+ >>> plt.plot(freqs, abs(z_vals))
500
+ >>> plt.margins(0, 0.1)
501
+ >>> plt.show()
502
+ """
503
+ x = np.asarray(x)
504
+ transform = CZT(x.shape[axis], m=m, w=w, a=a)
505
+ return transform(x, axis=axis)
506
+
507
+
508
+ def zoom_fft(x, fn, m=None, *, fs=2, endpoint=False, axis=-1):
509
+ """
510
+ Compute the DFT of `x` only for frequencies in range `fn`.
511
+
512
+ Parameters
513
+ ----------
514
+ x : array
515
+ The signal to transform.
516
+ fn : array_like
517
+ A length-2 sequence [`f1`, `f2`] giving the frequency range, or a
518
+ scalar, for which the range [0, `fn`] is assumed.
519
+ m : int, optional
520
+ The number of points to evaluate. The default is the length of `x`.
521
+ fs : float, optional
522
+ The sampling frequency. If ``fs=10`` represented 10 kHz, for example,
523
+ then `f1` and `f2` would also be given in kHz.
524
+ The default sampling frequency is 2, so `f1` and `f2` should be
525
+ in the range [0, 1] to keep the transform below the Nyquist
526
+ frequency.
527
+ endpoint : bool, optional
528
+ If True, `f2` is the last sample. Otherwise, it is not included.
529
+ Default is False.
530
+ axis : int, optional
531
+ Axis over which to compute the FFT. If not given, the last axis is
532
+ used.
533
+
534
+ Returns
535
+ -------
536
+ out : ndarray
537
+ The transformed signal. The Fourier transform will be calculated
538
+ at the points f1, f1+df, f1+2df, ..., f2, where df=(f2-f1)/m.
539
+
540
+ See Also
541
+ --------
542
+ ZoomFFT : Class that creates a callable partial FFT function.
543
+
544
+ Notes
545
+ -----
546
+ The defaults are chosen such that ``signal.zoom_fft(x, 2)`` is equivalent
547
+ to ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.zoom_fft(x, 2, m)``
548
+ is equivalent to ``fft.fft(x, m)``.
549
+
550
+ To graph the magnitude of the resulting transform, use::
551
+
552
+ plot(linspace(f1, f2, m, endpoint=False), abs(zoom_fft(x, [f1, f2], m)))
553
+
554
+ If the transform needs to be repeated, use `ZoomFFT` to construct
555
+ a specialized transform function which can be reused without
556
+ recomputing constants.
557
+
558
+ Examples
559
+ --------
560
+ To plot the transform results use something like the following:
561
+
562
+ >>> import numpy as np
563
+ >>> from scipy.signal import zoom_fft
564
+ >>> t = np.linspace(0, 1, 1021)
565
+ >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t)
566
+ >>> f1, f2 = 5, 27
567
+ >>> X = zoom_fft(x, [f1, f2], len(x), fs=1021)
568
+ >>> f = np.linspace(f1, f2, len(x))
569
+ >>> import matplotlib.pyplot as plt
570
+ >>> plt.plot(f, 20*np.log10(np.abs(X)))
571
+ >>> plt.show()
572
+ """
573
+ x = np.asarray(x)
574
+ transform = ZoomFFT(x.shape[axis], fn, m=m, fs=fs, endpoint=endpoint)
575
+ return transform(x, axis=axis)
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_filter_design.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_fir_filter_design.py ADDED
@@ -0,0 +1,1286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions for FIR filter design."""
2
+
3
+ from math import ceil, log
4
+ import operator
5
+ import warnings
6
+ from typing import Literal
7
+
8
+ import numpy as np
9
+ from numpy.fft import irfft, fft, ifft
10
+ from scipy.special import sinc
11
+ from scipy.linalg import (toeplitz, hankel, solve, LinAlgError, LinAlgWarning,
12
+ lstsq)
13
+ from scipy.signal._arraytools import _validate_fs
14
+
15
+ from . import _sigtools
16
+
17
+ __all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
18
+ 'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase']
19
+
20
+
21
+ # Some notes on function parameters:
22
+ #
23
+ # `cutoff` and `width` are given as numbers between 0 and 1. These are
24
+ # relative frequencies, expressed as a fraction of the Nyquist frequency.
25
+ # For example, if the Nyquist frequency is 2 KHz, then width=0.15 is a width
26
+ # of 300 Hz.
27
+ #
28
+ # The `order` of a FIR filter is one less than the number of taps.
29
+ # This is a potential source of confusion, so in the following code,
30
+ # we will always use the number of taps as the parameterization of
31
+ # the 'size' of the filter. The "number of taps" means the number
32
+ # of coefficients, which is the same as the length of the impulse
33
+ # response of the filter.
34
+
35
+
36
+ def kaiser_beta(a):
37
+ """Compute the Kaiser parameter `beta`, given the attenuation `a`.
38
+
39
+ Parameters
40
+ ----------
41
+ a : float
42
+ The desired attenuation in the stopband and maximum ripple in
43
+ the passband, in dB. This should be a *positive* number.
44
+
45
+ Returns
46
+ -------
47
+ beta : float
48
+ The `beta` parameter to be used in the formula for a Kaiser window.
49
+
50
+ References
51
+ ----------
52
+ Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
53
+
54
+ Examples
55
+ --------
56
+ Suppose we want to design a lowpass filter, with 65 dB attenuation
57
+ in the stop band. The Kaiser window parameter to be used in the
58
+ window method is computed by ``kaiser_beta(65)``:
59
+
60
+ >>> from scipy.signal import kaiser_beta
61
+ >>> kaiser_beta(65)
62
+ 6.20426
63
+
64
+ """
65
+ if a > 50:
66
+ beta = 0.1102 * (a - 8.7)
67
+ elif a > 21:
68
+ beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
69
+ else:
70
+ beta = 0.0
71
+ return beta
72
+
73
+
74
+ def kaiser_atten(numtaps, width):
75
+ """Compute the attenuation of a Kaiser FIR filter.
76
+
77
+ Given the number of taps `N` and the transition width `width`, compute the
78
+ attenuation `a` in dB, given by Kaiser's formula:
79
+
80
+ a = 2.285 * (N - 1) * pi * width + 7.95
81
+
82
+ Parameters
83
+ ----------
84
+ numtaps : int
85
+ The number of taps in the FIR filter.
86
+ width : float
87
+ The desired width of the transition region between passband and
88
+ stopband (or, in general, at any discontinuity) for the filter,
89
+ expressed as a fraction of the Nyquist frequency.
90
+
91
+ Returns
92
+ -------
93
+ a : float
94
+ The attenuation of the ripple, in dB.
95
+
96
+ See Also
97
+ --------
98
+ kaiserord, kaiser_beta
99
+
100
+ Examples
101
+ --------
102
+ Suppose we want to design a FIR filter using the Kaiser window method
103
+ that will have 211 taps and a transition width of 9 Hz for a signal that
104
+ is sampled at 480 Hz. Expressed as a fraction of the Nyquist frequency,
105
+ the width is 9/(0.5*480) = 0.0375. The approximate attenuation (in dB)
106
+ is computed as follows:
107
+
108
+ >>> from scipy.signal import kaiser_atten
109
+ >>> kaiser_atten(211, 0.0375)
110
+ 64.48099630593983
111
+
112
+ """
113
+ a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
114
+ return a
115
+
116
+
117
+ def kaiserord(ripple, width):
118
+ """
119
+ Determine the filter window parameters for the Kaiser window method.
120
+
121
+ The parameters returned by this function are generally used to create
122
+ a finite impulse response filter using the window method, with either
123
+ `firwin` or `firwin2`.
124
+
125
+ Parameters
126
+ ----------
127
+ ripple : float
128
+ Upper bound for the deviation (in dB) of the magnitude of the
129
+ filter's frequency response from that of the desired filter (not
130
+ including frequencies in any transition intervals). That is, if w
131
+ is the frequency expressed as a fraction of the Nyquist frequency,
132
+ A(w) is the actual frequency response of the filter and D(w) is the
133
+ desired frequency response, the design requirement is that::
134
+
135
+ abs(A(w) - D(w))) < 10**(-ripple/20)
136
+
137
+ for 0 <= w <= 1 and w not in a transition interval.
138
+ width : float
139
+ Width of transition region, normalized so that 1 corresponds to pi
140
+ radians / sample. That is, the frequency is expressed as a fraction
141
+ of the Nyquist frequency.
142
+
143
+ Returns
144
+ -------
145
+ numtaps : int
146
+ The length of the Kaiser window.
147
+ beta : float
148
+ The beta parameter for the Kaiser window.
149
+
150
+ See Also
151
+ --------
152
+ kaiser_beta, kaiser_atten
153
+
154
+ Notes
155
+ -----
156
+ There are several ways to obtain the Kaiser window:
157
+
158
+ - ``signal.windows.kaiser(numtaps, beta, sym=True)``
159
+ - ``signal.get_window(beta, numtaps)``
160
+ - ``signal.get_window(('kaiser', beta), numtaps)``
161
+
162
+ The empirical equations discovered by Kaiser are used.
163
+
164
+ References
165
+ ----------
166
+ Oppenheim, Schafer, "Discrete-Time Signal Processing", pp.475-476.
167
+
168
+ Examples
169
+ --------
170
+ We will use the Kaiser window method to design a lowpass FIR filter
171
+ for a signal that is sampled at 1000 Hz.
172
+
173
+ We want at least 65 dB rejection in the stop band, and in the pass
174
+ band the gain should vary no more than 0.5%.
175
+
176
+ We want a cutoff frequency of 175 Hz, with a transition between the
177
+ pass band and the stop band of 24 Hz. That is, in the band [0, 163],
178
+ the gain varies no more than 0.5%, and in the band [187, 500], the
179
+ signal is attenuated by at least 65 dB.
180
+
181
+ >>> import numpy as np
182
+ >>> from scipy.signal import kaiserord, firwin, freqz
183
+ >>> import matplotlib.pyplot as plt
184
+ >>> fs = 1000.0
185
+ >>> cutoff = 175
186
+ >>> width = 24
187
+
188
+ The Kaiser method accepts just a single parameter to control the pass
189
+ band ripple and the stop band rejection, so we use the more restrictive
190
+ of the two. In this case, the pass band ripple is 0.005, or 46.02 dB,
191
+ so we will use 65 dB as the design parameter.
192
+
193
+ Use `kaiserord` to determine the length of the filter and the
194
+ parameter for the Kaiser window.
195
+
196
+ >>> numtaps, beta = kaiserord(65, width/(0.5*fs))
197
+ >>> numtaps
198
+ 167
199
+ >>> beta
200
+ 6.20426
201
+
202
+ Use `firwin` to create the FIR filter.
203
+
204
+ >>> taps = firwin(numtaps, cutoff, window=('kaiser', beta),
205
+ ... scale=False, fs=fs)
206
+
207
+ Compute the frequency response of the filter. ``w`` is the array of
208
+ frequencies, and ``h`` is the corresponding complex array of frequency
209
+ responses.
210
+
211
+ >>> w, h = freqz(taps, worN=8000)
212
+ >>> w *= 0.5*fs/np.pi # Convert w to Hz.
213
+
214
+ Compute the deviation of the magnitude of the filter's response from
215
+ that of the ideal lowpass filter. Values in the transition region are
216
+ set to ``nan``, so they won't appear in the plot.
217
+
218
+ >>> ideal = w < cutoff # The "ideal" frequency response.
219
+ >>> deviation = np.abs(np.abs(h) - ideal)
220
+ >>> deviation[(w > cutoff - 0.5*width) & (w < cutoff + 0.5*width)] = np.nan
221
+
222
+ Plot the deviation. A close look at the left end of the stop band shows
223
+ that the requirement for 65 dB attenuation is violated in the first lobe
224
+ by about 0.125 dB. This is not unusual for the Kaiser window method.
225
+
226
+ >>> plt.plot(w, 20*np.log10(np.abs(deviation)))
227
+ >>> plt.xlim(0, 0.5*fs)
228
+ >>> plt.ylim(-90, -60)
229
+ >>> plt.grid(alpha=0.25)
230
+ >>> plt.axhline(-65, color='r', ls='--', alpha=0.3)
231
+ >>> plt.xlabel('Frequency (Hz)')
232
+ >>> plt.ylabel('Deviation from ideal (dB)')
233
+ >>> plt.title('Lowpass Filter Frequency Response')
234
+ >>> plt.show()
235
+
236
+ """
237
+ A = abs(ripple) # in case somebody is confused as to what's meant
238
+ if A < 8:
239
+ # Formula for N is not valid in this range.
240
+ raise ValueError("Requested maximum ripple attenuation "
241
+ f"{A:f} is too small for the Kaiser formula.")
242
+ beta = kaiser_beta(A)
243
+
244
+ # Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
245
+ # order, so we have to add 1 to get the number of taps.
246
+ numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
247
+
248
+ return int(ceil(numtaps)), beta
249
+
250
+
251
+ def firwin(numtaps, cutoff, *, width=None, window='hamming', pass_zero=True,
252
+ scale=True, fs=None):
253
+ """
254
+ FIR filter design using the window method.
255
+
256
+ This function computes the coefficients of a finite impulse response
257
+ filter. The filter will have linear phase; it will be Type I if
258
+ `numtaps` is odd and Type II if `numtaps` is even.
259
+
260
+ Type II filters always have zero response at the Nyquist frequency, so a
261
+ ValueError exception is raised if firwin is called with `numtaps` even and
262
+ having a passband whose right end is at the Nyquist frequency.
263
+
264
+ Parameters
265
+ ----------
266
+ numtaps : int
267
+ Length of the filter (number of coefficients, i.e. the filter
268
+ order + 1). `numtaps` must be odd if a passband includes the
269
+ Nyquist frequency.
270
+ cutoff : float or 1-D array_like
271
+ Cutoff frequency of filter (expressed in the same units as `fs`)
272
+ OR an array of cutoff frequencies (that is, band edges). In the
273
+ former case, as a float, the cutoff frequency should correspond
274
+ with the half-amplitude point, where the attenuation will be -6dB.
275
+ In the latter case, the frequencies in `cutoff` should be positive
276
+ and monotonically increasing between 0 and `fs/2`. The values 0
277
+ and `fs/2` must not be included in `cutoff`. It should be noted
278
+ that this is different than the behavior of `scipy.signal.iirdesign`,
279
+ where the cutoff is the half-power point (-3dB).
280
+ width : float or None, optional
281
+ If `width` is not None, then assume it is the approximate width
282
+ of the transition region (expressed in the same units as `fs`)
283
+ for use in Kaiser FIR filter design. In this case, the `window`
284
+ argument is ignored.
285
+ window : string or tuple of string and parameter values, optional
286
+ Desired window to use. See `scipy.signal.get_window` for a list
287
+ of windows and required parameters.
288
+ pass_zero : {True, False, 'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
289
+ If True, the gain at the frequency 0 (i.e., the "DC gain") is 1.
290
+ If False, the DC gain is 0. Can also be a string argument for the
291
+ desired filter type (equivalent to ``btype`` in IIR design functions).
292
+
293
+ .. versionadded:: 1.3.0
294
+ Support for string arguments.
295
+ scale : bool, optional
296
+ Set to True to scale the coefficients so that the frequency
297
+ response is exactly unity at a certain frequency.
298
+ That frequency is either:
299
+
300
+ - 0 (DC) if the first passband starts at 0 (i.e. pass_zero
301
+ is True)
302
+ - `fs/2` (the Nyquist frequency) if the first passband ends at
303
+ `fs/2` (i.e the filter is a single band highpass filter);
304
+ center of first passband otherwise
305
+
306
+ fs : float, optional
307
+ The sampling frequency of the signal. Each frequency in `cutoff`
308
+ must be between 0 and ``fs/2``. Default is 2.
309
+
310
+ Returns
311
+ -------
312
+ h : (numtaps,) ndarray
313
+ Coefficients of length `numtaps` FIR filter.
314
+
315
+ Raises
316
+ ------
317
+ ValueError
318
+ If any value in `cutoff` is less than or equal to 0 or greater
319
+ than or equal to ``fs/2``, if the values in `cutoff` are not strictly
320
+ monotonically increasing, or if `numtaps` is even but a passband
321
+ includes the Nyquist frequency.
322
+
323
+ See Also
324
+ --------
325
+ firwin2
326
+ firls
327
+ minimum_phase
328
+ remez
329
+
330
+ Examples
331
+ --------
332
+ Low-pass from 0 to f:
333
+
334
+ >>> from scipy import signal
335
+ >>> numtaps = 3
336
+ >>> f = 0.1
337
+ >>> signal.firwin(numtaps, f)
338
+ array([ 0.06799017, 0.86401967, 0.06799017])
339
+
340
+ Use a specific window function:
341
+
342
+ >>> signal.firwin(numtaps, f, window='nuttall')
343
+ array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04])
344
+
345
+ High-pass ('stop' from 0 to f):
346
+
347
+ >>> signal.firwin(numtaps, f, pass_zero=False)
348
+ array([-0.00859313, 0.98281375, -0.00859313])
349
+
350
+ Band-pass:
351
+
352
+ >>> f1, f2 = 0.1, 0.2
353
+ >>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
354
+ array([ 0.06301614, 0.88770441, 0.06301614])
355
+
356
+ Band-stop:
357
+
358
+ >>> signal.firwin(numtaps, [f1, f2])
359
+ array([-0.00801395, 1.0160279 , -0.00801395])
360
+
361
+ Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):
362
+
363
+ >>> f3, f4 = 0.3, 0.4
364
+ >>> signal.firwin(numtaps, [f1, f2, f3, f4])
365
+ array([-0.01376344, 1.02752689, -0.01376344])
366
+
367
+ Multi-band (passbands are [f1, f2] and [f3,f4]):
368
+
369
+ >>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
370
+ array([ 0.04890915, 0.91284326, 0.04890915])
371
+
372
+ """
373
+ # The major enhancements to this function added in November 2010 were
374
+ # developed by Tom Krauss (see ticket #902).
375
+ fs = _validate_fs(fs, allow_none=True)
376
+ fs = 2 if fs is None else fs
377
+
378
+ nyq = 0.5 * fs
379
+
380
+ cutoff = np.atleast_1d(cutoff) / float(nyq)
381
+
382
+ # Check for invalid input.
383
+ if cutoff.ndim > 1:
384
+ raise ValueError("The cutoff argument must be at most "
385
+ "one-dimensional.")
386
+ if cutoff.size == 0:
387
+ raise ValueError("At least one cutoff frequency must be given.")
388
+ if cutoff.min() <= 0 or cutoff.max() >= 1:
389
+ raise ValueError("Invalid cutoff frequency: frequencies must be "
390
+ "greater than 0 and less than fs/2.")
391
+ if np.any(np.diff(cutoff) <= 0):
392
+ raise ValueError("Invalid cutoff frequencies: the frequencies "
393
+ "must be strictly increasing.")
394
+
395
+ if width is not None:
396
+ # A width was given. Find the beta parameter of the Kaiser window
397
+ # and set `window`. This overrides the value of `window` passed in.
398
+ atten = kaiser_atten(numtaps, float(width) / nyq)
399
+ beta = kaiser_beta(atten)
400
+ window = ('kaiser', beta)
401
+
402
+ if isinstance(pass_zero, str):
403
+ if pass_zero in ('bandstop', 'lowpass'):
404
+ if pass_zero == 'lowpass':
405
+ if cutoff.size != 1:
406
+ raise ValueError('cutoff must have one element if '
407
+ f'pass_zero=="lowpass", got {cutoff.shape}')
408
+ elif cutoff.size <= 1:
409
+ raise ValueError('cutoff must have at least two elements if '
410
+ f'pass_zero=="bandstop", got {cutoff.shape}')
411
+ pass_zero = True
412
+ elif pass_zero in ('bandpass', 'highpass'):
413
+ if pass_zero == 'highpass':
414
+ if cutoff.size != 1:
415
+ raise ValueError('cutoff must have one element if '
416
+ f'pass_zero=="highpass", got {cutoff.shape}')
417
+ elif cutoff.size <= 1:
418
+ raise ValueError('cutoff must have at least two elements if '
419
+ f'pass_zero=="bandpass", got {cutoff.shape}')
420
+ pass_zero = False
421
+ else:
422
+ raise ValueError('pass_zero must be True, False, "bandpass", '
423
+ '"lowpass", "highpass", or "bandstop", got '
424
+ f'{pass_zero}')
425
+ pass_zero = bool(operator.index(pass_zero)) # ensure bool-like
426
+
427
+ pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
428
+ if pass_nyquist and numtaps % 2 == 0:
429
+ raise ValueError("A filter with an even number of coefficients must "
430
+ "have zero response at the Nyquist frequency.")
431
+
432
+ # Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
433
+ # is even, and each pair in cutoff corresponds to passband.
434
+ cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
435
+
436
+ # `bands` is a 2-D array; each row gives the left and right edges of
437
+ # a passband.
438
+ bands = cutoff.reshape(-1, 2)
439
+
440
+ # Build up the coefficients.
441
+ alpha = 0.5 * (numtaps - 1)
442
+ m = np.arange(0, numtaps) - alpha
443
+ h = 0
444
+ for left, right in bands:
445
+ h += right * sinc(right * m)
446
+ h -= left * sinc(left * m)
447
+
448
+ # Get and apply the window function.
449
+ from .windows import get_window
450
+ win = get_window(window, numtaps, fftbins=False)
451
+ h *= win
452
+
453
+ # Now handle scaling if desired.
454
+ if scale:
455
+ # Get the first passband.
456
+ left, right = bands[0]
457
+ if left == 0:
458
+ scale_frequency = 0.0
459
+ elif right == 1:
460
+ scale_frequency = 1.0
461
+ else:
462
+ scale_frequency = 0.5 * (left + right)
463
+ c = np.cos(np.pi * m * scale_frequency)
464
+ s = np.sum(h * c)
465
+ h /= s
466
+
467
+ return h
468
+
469
+
470
+ # Original version of firwin2 from scipy ticket #457, submitted by "tash".
471
+ #
472
+ # Rewritten by Warren Weckesser, 2010.
473
+ def firwin2(numtaps, freq, gain, *, nfreqs=None, window='hamming',
474
+ antisymmetric=False, fs=None):
475
+ """
476
+ FIR filter design using the window method.
477
+
478
+ From the given frequencies `freq` and corresponding gains `gain`,
479
+ this function constructs an FIR filter with linear phase and
480
+ (approximately) the given frequency response.
481
+
482
+ Parameters
483
+ ----------
484
+ numtaps : int
485
+ The number of taps in the FIR filter. `numtaps` must be less than
486
+ `nfreqs`.
487
+ freq : array_like, 1-D
488
+ The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
489
+ Nyquist. The Nyquist frequency is half `fs`.
490
+ The values in `freq` must be nondecreasing. A value can be repeated
491
+ once to implement a discontinuity. The first value in `freq` must
492
+ be 0, and the last value must be ``fs/2``. Values 0 and ``fs/2`` must
493
+ not be repeated.
494
+ gain : array_like
495
+ The filter gains at the frequency sampling points. Certain
496
+ constraints to gain values, depending on the filter type, are applied,
497
+ see Notes for details.
498
+ nfreqs : int, optional
499
+ The size of the interpolation mesh used to construct the filter.
500
+ For most efficient behavior, this should be a power of 2 plus 1
501
+ (e.g, 129, 257, etc). The default is one more than the smallest
502
+ power of 2 that is not less than `numtaps`. `nfreqs` must be greater
503
+ than `numtaps`.
504
+ window : string or (string, float) or float, or None, optional
505
+ Window function to use. Default is "hamming". See
506
+ `scipy.signal.get_window` for the complete list of possible values.
507
+ If None, no window function is applied.
508
+ antisymmetric : bool, optional
509
+ Whether resulting impulse response is symmetric/antisymmetric.
510
+ See Notes for more details.
511
+ fs : float, optional
512
+ The sampling frequency of the signal. Each frequency in `cutoff`
513
+ must be between 0 and ``fs/2``. Default is 2.
514
+
515
+ Returns
516
+ -------
517
+ taps : ndarray
518
+ The filter coefficients of the FIR filter, as a 1-D array of length
519
+ `numtaps`.
520
+
521
+ See Also
522
+ --------
523
+ firls
524
+ firwin
525
+ minimum_phase
526
+ remez
527
+
528
+ Notes
529
+ -----
530
+ From the given set of frequencies and gains, the desired response is
531
+ constructed in the frequency domain. The inverse FFT is applied to the
532
+ desired response to create the associated convolution kernel, and the
533
+ first `numtaps` coefficients of this kernel, scaled by `window`, are
534
+ returned.
535
+
536
+ The FIR filter will have linear phase. The type of filter is determined by
537
+ the value of 'numtaps` and `antisymmetric` flag.
538
+ There are four possible combinations:
539
+
540
+ - odd `numtaps`, `antisymmetric` is False, type I filter is produced
541
+ - even `numtaps`, `antisymmetric` is False, type II filter is produced
542
+ - odd `numtaps`, `antisymmetric` is True, type III filter is produced
543
+ - even `numtaps`, `antisymmetric` is True, type IV filter is produced
544
+
545
+ Magnitude response of all but type I filters are subjects to following
546
+ constraints:
547
+
548
+ - type II -- zero at the Nyquist frequency
549
+ - type III -- zero at zero and Nyquist frequencies
550
+ - type IV -- zero at zero frequency
551
+
552
+ .. versionadded:: 0.9.0
553
+
554
+ References
555
+ ----------
556
+ .. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
557
+ Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
558
+ (See, for example, Section 7.4.)
559
+
560
+ .. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
561
+ Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
562
+
563
+ Examples
564
+ --------
565
+ A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
566
+ that decreases linearly on [0.5, 1.0] from 1 to 0:
567
+
568
+ >>> from scipy import signal
569
+ >>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
570
+ >>> print(taps[72:78])
571
+ [-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
572
+
573
+ """
574
+ fs = _validate_fs(fs, allow_none=True)
575
+ fs = 2 if fs is None else fs
576
+ nyq = 0.5 * fs
577
+
578
+ if len(freq) != len(gain):
579
+ raise ValueError('freq and gain must be of same length.')
580
+
581
+ if nfreqs is not None and numtaps >= nfreqs:
582
+ raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
583
+ 'called with ntaps=%d and nfreqs=%s') %
584
+ (numtaps, nfreqs))
585
+
586
+ if freq[0] != 0 or freq[-1] != nyq:
587
+ raise ValueError('freq must start with 0 and end with fs/2.')
588
+ d = np.diff(freq)
589
+ if (d < 0).any():
590
+ raise ValueError('The values in freq must be nondecreasing.')
591
+ d2 = d[:-1] + d[1:]
592
+ if (d2 == 0).any():
593
+ raise ValueError('A value in freq must not occur more than twice.')
594
+ if freq[1] == 0:
595
+ raise ValueError('Value 0 must not be repeated in freq')
596
+ if freq[-2] == nyq:
597
+ raise ValueError('Value fs/2 must not be repeated in freq')
598
+
599
+ if antisymmetric:
600
+ if numtaps % 2 == 0:
601
+ ftype = 4
602
+ else:
603
+ ftype = 3
604
+ else:
605
+ if numtaps % 2 == 0:
606
+ ftype = 2
607
+ else:
608
+ ftype = 1
609
+
610
+ if ftype == 2 and gain[-1] != 0.0:
611
+ raise ValueError("A Type II filter must have zero gain at the "
612
+ "Nyquist frequency.")
613
+ elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
614
+ raise ValueError("A Type III filter must have zero gain at zero "
615
+ "and Nyquist frequencies.")
616
+ elif ftype == 4 and gain[0] != 0.0:
617
+ raise ValueError("A Type IV filter must have zero gain at zero "
618
+ "frequency.")
619
+
620
+ if nfreqs is None:
621
+ nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
622
+
623
+ if (d == 0).any():
624
+ # Tweak any repeated values in freq so that interp works.
625
+ freq = np.array(freq, copy=True)
626
+ eps = np.finfo(float).eps * nyq
627
+ for k in range(len(freq) - 1):
628
+ if freq[k] == freq[k + 1]:
629
+ freq[k] = freq[k] - eps
630
+ freq[k + 1] = freq[k + 1] + eps
631
+ # Check if freq is strictly increasing after tweak
632
+ d = np.diff(freq)
633
+ if (d <= 0).any():
634
+ raise ValueError("freq cannot contain numbers that are too close "
635
+ "(within eps * (fs/2): "
636
+ f"{eps}) to a repeated value")
637
+
638
+ # Linearly interpolate the desired response on a uniform mesh `x`.
639
+ x = np.linspace(0.0, nyq, nfreqs)
640
+ fx = np.interp(x, freq, gain)
641
+
642
+ # Adjust the phases of the coefficients so that the first `ntaps` of the
643
+ # inverse FFT are the desired filter coefficients.
644
+ shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
645
+ if ftype > 2:
646
+ shift *= 1j
647
+
648
+ fx2 = fx * shift
649
+
650
+ # Use irfft to compute the inverse FFT.
651
+ out_full = irfft(fx2)
652
+
653
+ if window is not None:
654
+ # Create the window to apply to the filter coefficients.
655
+ from .windows import get_window
656
+ wind = get_window(window, numtaps, fftbins=False)
657
+ else:
658
+ wind = 1
659
+
660
+ # Keep only the first `numtaps` coefficients in `out`, and multiply by
661
+ # the window.
662
+ out = out_full[:numtaps] * wind
663
+
664
+ if ftype == 3:
665
+ out[out.size // 2] = 0.0
666
+
667
+ return out
668
+
669
+
670
+ def remez(numtaps, bands, desired, *, weight=None, type='bandpass',
671
+ maxiter=25, grid_density=16, fs=None):
672
+ """
673
+ Calculate the minimax optimal filter using the Remez exchange algorithm.
674
+
675
+ Calculate the filter-coefficients for the finite impulse response
676
+ (FIR) filter whose transfer function minimizes the maximum error
677
+ between the desired gain and the realized gain in the specified
678
+ frequency bands using the Remez exchange algorithm.
679
+
680
+ Parameters
681
+ ----------
682
+ numtaps : int
683
+ The desired number of taps in the filter. The number of taps is
684
+ the number of terms in the filter, or the filter order plus one.
685
+ bands : array_like
686
+ A monotonic sequence containing the band edges.
687
+ All elements must be non-negative and less than half the sampling
688
+ frequency as given by `fs`.
689
+ desired : array_like
690
+ A sequence half the size of bands containing the desired gain
691
+ in each of the specified bands.
692
+ weight : array_like, optional
693
+ A relative weighting to give to each band region. The length of
694
+ `weight` has to be half the length of `bands`.
695
+ type : {'bandpass', 'differentiator', 'hilbert'}, optional
696
+ The type of filter:
697
+
698
+ * 'bandpass' : flat response in bands. This is the default.
699
+
700
+ * 'differentiator' : frequency proportional response in bands.
701
+
702
+ * 'hilbert' : filter with odd symmetry, that is, type III
703
+ (for even order) or type IV (for odd order)
704
+ linear phase filters.
705
+
706
+ maxiter : int, optional
707
+ Maximum number of iterations of the algorithm. Default is 25.
708
+ grid_density : int, optional
709
+ Grid density. The dense grid used in `remez` is of size
710
+ ``(numtaps + 1) * grid_density``. Default is 16.
711
+ fs : float, optional
712
+ The sampling frequency of the signal. Default is 1.
713
+
714
+ Returns
715
+ -------
716
+ out : ndarray
717
+ A rank-1 array containing the coefficients of the optimal
718
+ (in a minimax sense) filter.
719
+
720
+ See Also
721
+ --------
722
+ firls
723
+ firwin
724
+ firwin2
725
+ minimum_phase
726
+
727
+ References
728
+ ----------
729
+ .. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
730
+ design of optimum FIR linear phase digital filters",
731
+ IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
732
+ .. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
733
+ Program for Designing Optimum FIR Linear Phase Digital
734
+ Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
735
+ pp. 506-525, 1973.
736
+
737
+ Examples
738
+ --------
739
+ In these examples, `remez` is used to design low-pass, high-pass,
740
+ band-pass and band-stop filters. The parameters that define each filter
741
+ are the filter order, the band boundaries, the transition widths of the
742
+ boundaries, the desired gains in each band, and the sampling frequency.
743
+
744
+ We'll use a sample frequency of 22050 Hz in all the examples. In each
745
+ example, the desired gain in each band is either 0 (for a stop band)
746
+ or 1 (for a pass band).
747
+
748
+ `freqz` is used to compute the frequency response of each filter, and
749
+ the utility function ``plot_response`` defined below is used to plot
750
+ the response.
751
+
752
+ >>> import numpy as np
753
+ >>> from scipy import signal
754
+ >>> import matplotlib.pyplot as plt
755
+
756
+ >>> fs = 22050 # Sample rate, Hz
757
+
758
+ >>> def plot_response(w, h, title):
759
+ ... "Utility function to plot response functions"
760
+ ... fig = plt.figure()
761
+ ... ax = fig.add_subplot(111)
762
+ ... ax.plot(w, 20*np.log10(np.abs(h)))
763
+ ... ax.set_ylim(-40, 5)
764
+ ... ax.grid(True)
765
+ ... ax.set_xlabel('Frequency (Hz)')
766
+ ... ax.set_ylabel('Gain (dB)')
767
+ ... ax.set_title(title)
768
+
769
+ The first example is a low-pass filter, with cutoff frequency 8 kHz.
770
+ The filter length is 325, and the transition width from pass to stop
771
+ is 100 Hz.
772
+
773
+ >>> cutoff = 8000.0 # Desired cutoff frequency, Hz
774
+ >>> trans_width = 100 # Width of transition from pass to stop, Hz
775
+ >>> numtaps = 325 # Size of the FIR filter.
776
+ >>> taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs],
777
+ ... [1, 0], fs=fs)
778
+ >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs)
779
+ >>> plot_response(w, h, "Low-pass Filter")
780
+ >>> plt.show()
781
+
782
+ This example shows a high-pass filter:
783
+
784
+ >>> cutoff = 2000.0 # Desired cutoff frequency, Hz
785
+ >>> trans_width = 250 # Width of transition from pass to stop, Hz
786
+ >>> numtaps = 125 # Size of the FIR filter.
787
+ >>> taps = signal.remez(numtaps, [0, cutoff - trans_width, cutoff, 0.5*fs],
788
+ ... [0, 1], fs=fs)
789
+ >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs)
790
+ >>> plot_response(w, h, "High-pass Filter")
791
+ >>> plt.show()
792
+
793
+ This example shows a band-pass filter with a pass-band from 2 kHz to
794
+ 5 kHz. The transition width is 260 Hz and the length of the filter
795
+ is 63, which is smaller than in the other examples:
796
+
797
+ >>> band = [2000, 5000] # Desired pass band, Hz
798
+ >>> trans_width = 260 # Width of transition from pass to stop, Hz
799
+ >>> numtaps = 63 # Size of the FIR filter.
800
+ >>> edges = [0, band[0] - trans_width, band[0], band[1],
801
+ ... band[1] + trans_width, 0.5*fs]
802
+ >>> taps = signal.remez(numtaps, edges, [0, 1, 0], fs=fs)
803
+ >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs)
804
+ >>> plot_response(w, h, "Band-pass Filter")
805
+ >>> plt.show()
806
+
807
+ The low order leads to higher ripple and less steep transitions.
808
+
809
+ The next example shows a band-stop filter.
810
+
811
+ >>> band = [6000, 8000] # Desired stop band, Hz
812
+ >>> trans_width = 200 # Width of transition from pass to stop, Hz
813
+ >>> numtaps = 175 # Size of the FIR filter.
814
+ >>> edges = [0, band[0] - trans_width, band[0], band[1],
815
+ ... band[1] + trans_width, 0.5*fs]
816
+ >>> taps = signal.remez(numtaps, edges, [1, 0, 1], fs=fs)
817
+ >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs)
818
+ >>> plot_response(w, h, "Band-stop Filter")
819
+ >>> plt.show()
820
+
821
+ """
822
+ fs = _validate_fs(fs, allow_none=True)
823
+ fs = 1.0 if fs is None else fs
824
+
825
+ # Convert type
826
+ try:
827
+ tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
828
+ except KeyError as e:
829
+ raise ValueError("Type must be 'bandpass', 'differentiator', "
830
+ "or 'hilbert'") from e
831
+
832
+ # Convert weight
833
+ if weight is None:
834
+ weight = [1] * len(desired)
835
+
836
+ bands = np.asarray(bands).copy()
837
+ return _sigtools._remez(numtaps, bands, desired, weight, tnum, fs,
838
+ maxiter, grid_density)
839
+
840
+
841
+ def firls(numtaps, bands, desired, *, weight=None, fs=None):
842
+ """
843
+ FIR filter design using least-squares error minimization.
844
+
845
+ Calculate the filter coefficients for the linear-phase finite
846
+ impulse response (FIR) filter which has the best approximation
847
+ to the desired frequency response described by `bands` and
848
+ `desired` in the least squares sense (i.e., the integral of the
849
+ weighted mean-squared error within the specified bands is
850
+ minimized).
851
+
852
+ Parameters
853
+ ----------
854
+ numtaps : int
855
+ The number of taps in the FIR filter. `numtaps` must be odd.
856
+ bands : array_like
857
+ A monotonic nondecreasing sequence containing the band edges in
858
+ Hz. All elements must be non-negative and less than or equal to
859
+ the Nyquist frequency given by `nyq`. The bands are specified as
860
+ frequency pairs, thus, if using a 1D array, its length must be
861
+ even, e.g., `np.array([0, 1, 2, 3, 4, 5])`. Alternatively, the
862
+ bands can be specified as an nx2 sized 2D array, where n is the
863
+ number of bands, e.g, `np.array([[0, 1], [2, 3], [4, 5]])`.
864
+ desired : array_like
865
+ A sequence the same size as `bands` containing the desired gain
866
+ at the start and end point of each band.
867
+ weight : array_like, optional
868
+ A relative weighting to give to each band region when solving
869
+ the least squares problem. `weight` has to be half the size of
870
+ `bands`.
871
+ fs : float, optional
872
+ The sampling frequency of the signal. Each frequency in `bands`
873
+ must be between 0 and ``fs/2`` (inclusive). Default is 2.
874
+
875
+ Returns
876
+ -------
877
+ coeffs : ndarray
878
+ Coefficients of the optimal (in a least squares sense) FIR filter.
879
+
880
+ See Also
881
+ --------
882
+ firwin
883
+ firwin2
884
+ minimum_phase
885
+ remez
886
+
887
+ Notes
888
+ -----
889
+ This implementation follows the algorithm given in [1]_.
890
+ As noted there, least squares design has multiple advantages:
891
+
892
+ 1. Optimal in a least-squares sense.
893
+ 2. Simple, non-iterative method.
894
+ 3. The general solution can obtained by solving a linear
895
+ system of equations.
896
+ 4. Allows the use of a frequency dependent weighting function.
897
+
898
+ This function constructs a Type I linear phase FIR filter, which
899
+ contains an odd number of `coeffs` satisfying for :math:`n < numtaps`:
900
+
901
+ .. math:: coeffs(n) = coeffs(numtaps - 1 - n)
902
+
903
+ The odd number of coefficients and filter symmetry avoid boundary
904
+ conditions that could otherwise occur at the Nyquist and 0 frequencies
905
+ (e.g., for Type II, III, or IV variants).
906
+
907
+ .. versionadded:: 0.18
908
+
909
+ References
910
+ ----------
911
+ .. [1] Ivan Selesnick, Linear-Phase Fir Filter Design By Least Squares.
912
+ OpenStax CNX. Aug 9, 2005.
913
+ https://eeweb.engineering.nyu.edu/iselesni/EL713/firls/firls.pdf
914
+
915
+ Examples
916
+ --------
917
+ We want to construct a band-pass filter. Note that the behavior in the
918
+ frequency ranges between our stop bands and pass bands is unspecified,
919
+ and thus may overshoot depending on the parameters of our filter:
920
+
921
+ >>> import numpy as np
922
+ >>> from scipy import signal
923
+ >>> import matplotlib.pyplot as plt
924
+ >>> fig, axs = plt.subplots(2)
925
+ >>> fs = 10.0 # Hz
926
+ >>> desired = (0, 0, 1, 1, 0, 0)
927
+ >>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))):
928
+ ... fir_firls = signal.firls(73, bands, desired, fs=fs)
929
+ ... fir_remez = signal.remez(73, bands, desired[::2], fs=fs)
930
+ ... fir_firwin2 = signal.firwin2(73, bands, desired, fs=fs)
931
+ ... hs = list()
932
+ ... ax = axs[bi]
933
+ ... for fir in (fir_firls, fir_remez, fir_firwin2):
934
+ ... freq, response = signal.freqz(fir)
935
+ ... hs.append(ax.semilogy(0.5*fs*freq/np.pi, np.abs(response))[0])
936
+ ... for band, gains in zip(zip(bands[::2], bands[1::2]),
937
+ ... zip(desired[::2], desired[1::2])):
938
+ ... ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2)
939
+ ... if bi == 0:
940
+ ... ax.legend(hs, ('firls', 'remez', 'firwin2'),
941
+ ... loc='lower center', frameon=False)
942
+ ... else:
943
+ ... ax.set_xlabel('Frequency (Hz)')
944
+ ... ax.grid(True)
945
+ ... ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude')
946
+ ...
947
+ >>> fig.tight_layout()
948
+ >>> plt.show()
949
+
950
+ """
951
+ fs = _validate_fs(fs, allow_none=True)
952
+ fs = 2 if fs is None else fs
953
+ nyq = 0.5 * fs
954
+
955
+ numtaps = int(numtaps)
956
+ if numtaps % 2 == 0 or numtaps < 1:
957
+ raise ValueError("numtaps must be odd and >= 1")
958
+ M = (numtaps-1) // 2
959
+
960
+ # normalize bands 0->1 and make it 2 columns
961
+ nyq = float(nyq)
962
+ if nyq <= 0:
963
+ raise ValueError(f'nyq must be positive, got {nyq} <= 0.')
964
+ bands = np.asarray(bands).flatten() / nyq
965
+ if len(bands) % 2 != 0:
966
+ raise ValueError("bands must contain frequency pairs.")
967
+ if (bands < 0).any() or (bands > 1).any():
968
+ raise ValueError("bands must be between 0 and 1 relative to Nyquist")
969
+ bands.shape = (-1, 2)
970
+
971
+ # check remaining params
972
+ desired = np.asarray(desired).flatten()
973
+ if bands.size != desired.size:
974
+ raise ValueError(
975
+ f"desired must have one entry per frequency, got {desired.size} "
976
+ f"gains for {bands.size} frequencies."
977
+ )
978
+ desired.shape = (-1, 2)
979
+ if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any():
980
+ raise ValueError("bands must be monotonically nondecreasing and have "
981
+ "width > 0.")
982
+ if (bands[:-1, 1] > bands[1:, 0]).any():
983
+ raise ValueError("bands must not overlap.")
984
+ if (desired < 0).any():
985
+ raise ValueError("desired must be non-negative.")
986
+ if weight is None:
987
+ weight = np.ones(len(desired))
988
+ weight = np.asarray(weight).flatten()
989
+ if len(weight) != len(desired):
990
+ raise ValueError("weight must be the same size as the number of "
991
+ f"band pairs ({len(bands)}).")
992
+ if (weight < 0).any():
993
+ raise ValueError("weight must be non-negative.")
994
+
995
+ # Set up the linear matrix equation to be solved, Qa = b
996
+
997
+ # We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n)
998
+ # where Q1(k,n)=q(k-n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel.
999
+
1000
+ # We omit the factor of 0.5 above, instead adding it during coefficient
1001
+ # calculation.
1002
+
1003
+ # We also omit the 1/π from both Q and b equations, as they cancel
1004
+ # during solving.
1005
+
1006
+ # We have that:
1007
+ # q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π)
1008
+ # Using our normalization ω=πf and with a constant weight W over each
1009
+ # interval f1->f2 we get:
1010
+ # q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf
1011
+ # integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
1012
+ n = np.arange(numtaps)[:, np.newaxis, np.newaxis]
1013
+ q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weight)
1014
+
1015
+ # Now we assemble our sum of Toeplitz and Hankel
1016
+ Q1 = toeplitz(q[:M+1])
1017
+ Q2 = hankel(q[:M+1], q[M:])
1018
+ Q = Q1 + Q2
1019
+
1020
+ # Now for b(n) we have that:
1021
+ # b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π)
1022
+ # Using our normalization ω=πf and with a constant weight W over each
1023
+ # interval and a linear term for D(ω) we get (over each f1->f2 interval):
1024
+ # b(n) = W ∫ (mf+c)cos(πnf)df
1025
+ # = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2
1026
+ # integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
1027
+ n = n[:M + 1] # only need this many coefficients here
1028
+ # Choose m and c such that we are at the start and end weights
1029
+ m = (np.diff(desired, axis=1) / np.diff(bands, axis=1))
1030
+ c = desired[:, [0]] - bands[:, [0]] * m
1031
+ b = bands * (m*bands + c) * np.sinc(bands * n)
1032
+ # Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0
1033
+ b[0] -= m * bands * bands / 2.
1034
+ b[1:] += m * np.cos(n[1:] * np.pi * bands) / (np.pi * n[1:]) ** 2
1035
+ b = np.dot(np.diff(b, axis=2)[:, :, 0], weight)
1036
+
1037
+ # Now we can solve the equation
1038
+ try: # try the fast way
1039
+ with warnings.catch_warnings(record=True) as w:
1040
+ warnings.simplefilter('always')
1041
+ a = solve(Q, b, assume_a="pos", check_finite=False)
1042
+ for ww in w:
1043
+ if (ww.category == LinAlgWarning and
1044
+ str(ww.message).startswith('Ill-conditioned matrix')):
1045
+ raise LinAlgError(str(ww.message))
1046
+ except LinAlgError: # in case Q is rank deficient
1047
+ # This is faster than pinvh, even though we don't explicitly use
1048
+ # the symmetry here. gelsy was faster than gelsd and gelss in
1049
+ # some non-exhaustive tests.
1050
+ a = lstsq(Q, b, lapack_driver='gelsy')[0]
1051
+
1052
+ # make coefficients symmetric (linear phase)
1053
+ coeffs = np.hstack((a[:0:-1], 2 * a[0], a[1:]))
1054
+ return coeffs
1055
+
1056
+
1057
+ def _dhtm(mag):
1058
+ """Compute the modified 1-D discrete Hilbert transform
1059
+
1060
+ Parameters
1061
+ ----------
1062
+ mag : ndarray
1063
+ The magnitude spectrum. Should be 1-D with an even length, and
1064
+ preferably a fast length for FFT/IFFT.
1065
+ """
1066
+ # Adapted based on code by Niranjan Damera-Venkata,
1067
+ # Brian L. Evans and Shawn R. McCaslin (see refs for `minimum_phase`)
1068
+ sig = np.zeros(len(mag))
1069
+ # Leave Nyquist and DC at 0, knowing np.abs(fftfreq(N)[midpt]) == 0.5
1070
+ midpt = len(mag) // 2
1071
+ sig[1:midpt] = 1
1072
+ sig[midpt+1:] = -1
1073
+ # eventually if we want to support complex filters, we will need a
1074
+ # np.abs() on the mag inside the log, and should remove the .real
1075
+ recon = ifft(mag * np.exp(fft(sig * ifft(np.log(mag))))).real
1076
+ return recon
1077
+
1078
+
1079
+ def minimum_phase(h: np.ndarray,
1080
+ method: Literal['homomorphic', 'hilbert'] = 'homomorphic',
1081
+ n_fft: int | None = None, *, half: bool = True) -> np.ndarray:
1082
+ """Convert a linear-phase FIR filter to minimum phase
1083
+
1084
+ Parameters
1085
+ ----------
1086
+ h : array
1087
+ Linear-phase FIR filter coefficients.
1088
+ method : {'hilbert', 'homomorphic'}
1089
+ The provided methods are:
1090
+
1091
+ 'homomorphic' (default)
1092
+ This method [4]_ [5]_ works best with filters with an
1093
+ odd number of taps, and the resulting minimum phase filter
1094
+ will have a magnitude response that approximates the square
1095
+ root of the original filter's magnitude response using half
1096
+ the number of taps when ``half=True`` (default), or the
1097
+ original magnitude spectrum using the same number of taps
1098
+ when ``half=False``.
1099
+
1100
+ 'hilbert'
1101
+ This method [1]_ is designed to be used with equiripple
1102
+ filters (e.g., from `remez`) with unity or zero gain
1103
+ regions.
1104
+
1105
+ n_fft : int
1106
+ The number of points to use for the FFT. Should be at least a
1107
+ few times larger than the signal length (see Notes).
1108
+ half : bool
1109
+ If ``True``, create a filter that is half the length of the original, with a
1110
+ magnitude spectrum that is the square root of the original. If ``False``,
1111
+ create a filter that is the same length as the original, with a magnitude
1112
+ spectrum that is designed to match the original (only supported when
1113
+ ``method='homomorphic'``).
1114
+
1115
+ .. versionadded:: 1.14.0
1116
+
1117
+ Returns
1118
+ -------
1119
+ h_minimum : array
1120
+ The minimum-phase version of the filter, with length
1121
+ ``(len(h) + 1) // 2`` when ``half is True`` or ``len(h)`` otherwise.
1122
+
1123
+ See Also
1124
+ --------
1125
+ firwin
1126
+ firwin2
1127
+ remez
1128
+
1129
+ Notes
1130
+ -----
1131
+ Both the Hilbert [1]_ or homomorphic [4]_ [5]_ methods require selection
1132
+ of an FFT length to estimate the complex cepstrum of the filter.
1133
+
1134
+ In the case of the Hilbert method, the deviation from the ideal
1135
+ spectrum ``epsilon`` is related to the number of stopband zeros
1136
+ ``n_stop`` and FFT length ``n_fft`` as::
1137
+
1138
+ epsilon = 2. * n_stop / n_fft
1139
+
1140
+ For example, with 100 stopband zeros and a FFT length of 2048,
1141
+ ``epsilon = 0.0976``. If we conservatively assume that the number of
1142
+ stopband zeros is one less than the filter length, we can take the FFT
1143
+ length to be the next power of 2 that satisfies ``epsilon=0.01`` as::
1144
+
1145
+ n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01)))
1146
+
1147
+ This gives reasonable results for both the Hilbert and homomorphic
1148
+ methods, and gives the value used when ``n_fft=None``.
1149
+
1150
+ Alternative implementations exist for creating minimum-phase filters,
1151
+ including zero inversion [2]_ and spectral factorization [3]_ [4]_.
1152
+ For more information, see `this DSPGuru page
1153
+ <http://dspguru.com/dsp/howtos/how-to-design-minimum-phase-fir-filters>`__.
1154
+
1155
+ References
1156
+ ----------
1157
+ .. [1] N. Damera-Venkata and B. L. Evans, "Optimal design of real and
1158
+ complex minimum phase digital FIR filters," Acoustics, Speech,
1159
+ and Signal Processing, 1999. Proceedings., 1999 IEEE International
1160
+ Conference on, Phoenix, AZ, 1999, pp. 1145-1148 vol.3.
1161
+ :doi:`10.1109/ICASSP.1999.756179`
1162
+ .. [2] X. Chen and T. W. Parks, "Design of optimal minimum phase FIR
1163
+ filters by direct factorization," Signal Processing,
1164
+ vol. 10, no. 4, pp. 369-383, Jun. 1986.
1165
+ .. [3] T. Saramaki, "Finite Impulse Response Filter Design," in
1166
+ Handbook for Digital Signal Processing, chapter 4,
1167
+ New York: Wiley-Interscience, 1993.
1168
+ .. [4] J. S. Lim, Advanced Topics in Signal Processing.
1169
+ Englewood Cliffs, N.J.: Prentice Hall, 1988.
1170
+ .. [5] A. V. Oppenheim, R. W. Schafer, and J. R. Buck,
1171
+ "Discrete-Time Signal Processing," 3rd edition.
1172
+ Upper Saddle River, N.J.: Pearson, 2009.
1173
+
1174
+ Examples
1175
+ --------
1176
+ Create an optimal linear-phase low-pass filter `h` with a transition band of
1177
+ [0.2, 0.3] (assuming a Nyquist frequency of 1):
1178
+
1179
+ >>> import numpy as np
1180
+ >>> from scipy.signal import remez, minimum_phase, freqz, group_delay
1181
+ >>> import matplotlib.pyplot as plt
1182
+ >>> freq = [0, 0.2, 0.3, 1.0]
1183
+ >>> desired = [1, 0]
1184
+ >>> h_linear = remez(151, freq, desired, fs=2)
1185
+
1186
+ Convert it to minimum phase:
1187
+
1188
+ >>> h_hil = minimum_phase(h_linear, method='hilbert')
1189
+ >>> h_hom = minimum_phase(h_linear, method='homomorphic')
1190
+ >>> h_hom_full = minimum_phase(h_linear, method='homomorphic', half=False)
1191
+
1192
+ Compare the impulse and frequency response of the four filters:
1193
+
1194
+ >>> fig0, ax0 = plt.subplots(figsize=(6, 3), tight_layout=True)
1195
+ >>> fig1, axs = plt.subplots(3, sharex='all', figsize=(6, 6), tight_layout=True)
1196
+ >>> ax0.set_title("Impulse response")
1197
+ >>> ax0.set(xlabel='Samples', ylabel='Amplitude', xlim=(0, len(h_linear) - 1))
1198
+ >>> axs[0].set_title("Frequency Response")
1199
+ >>> axs[0].set(xlim=(0, .65), ylabel="Magnitude / dB")
1200
+ >>> axs[1].set(ylabel="Phase / rad")
1201
+ >>> axs[2].set(ylabel="Group Delay / samples", ylim=(-31, 81),
1202
+ ... xlabel='Normalized Frequency (Nyqist frequency: 1)')
1203
+ >>> for h, lb in ((h_linear, f'Linear ({len(h_linear)})'),
1204
+ ... (h_hil, f'Min-Hilbert ({len(h_hil)})'),
1205
+ ... (h_hom, f'Min-Homomorphic ({len(h_hom)})'),
1206
+ ... (h_hom_full, f'Min-Homom. Full ({len(h_hom_full)})')):
1207
+ ... w_H, H = freqz(h, fs=2)
1208
+ ... w_gd, gd = group_delay((h, 1), fs=2)
1209
+ ...
1210
+ ... alpha = 1.0 if lb == 'linear' else 0.5 # full opacity for 'linear' line
1211
+ ... ax0.plot(h, '.-', alpha=alpha, label=lb)
1212
+ ... axs[0].plot(w_H, 20 * np.log10(np.abs(H)), alpha=alpha)
1213
+ ... axs[1].plot(w_H, np.unwrap(np.angle(H)), alpha=alpha, label=lb)
1214
+ ... axs[2].plot(w_gd, gd, alpha=alpha)
1215
+ >>> ax0.grid(True)
1216
+ >>> ax0.legend(title='Filter Phase (Order)')
1217
+ >>> axs[1].legend(title='Filter Phase (Order)', loc='lower right')
1218
+ >>> for ax_ in axs: # shade transition band:
1219
+ ... ax_.axvspan(freq[1], freq[2], color='y', alpha=.25)
1220
+ ... ax_.grid(True)
1221
+ >>> plt.show()
1222
+
1223
+ The impulse response and group delay plot depict the 75 sample delay of the linear
1224
+ phase filter `h`. The phase should also be linear in the stop band--due to the small
1225
+ magnitude, numeric noise dominates there. Furthermore, the plots show that the
1226
+ minimum phase filters clearly show a reduced (negative) phase slope in the pass and
1227
+ transition band. The plots also illustrate that the filter with parameters
1228
+ ``method='homomorphic', half=False`` has same order and magnitude response as the
1229
+ linear filter `h` whereas the other minimum phase filters have only half the order
1230
+ and the square root of the magnitude response.
1231
+ """
1232
+ h = np.asarray(h)
1233
+ if np.iscomplexobj(h):
1234
+ raise ValueError('Complex filters not supported')
1235
+ if h.ndim != 1 or h.size <= 2:
1236
+ raise ValueError('h must be 1-D and at least 2 samples long')
1237
+ n_half = len(h) // 2
1238
+ if not np.allclose(h[-n_half:][::-1], h[:n_half]):
1239
+ warnings.warn('h does not appear to by symmetric, conversion may fail',
1240
+ RuntimeWarning, stacklevel=2)
1241
+ if not isinstance(method, str) or method not in \
1242
+ ('homomorphic', 'hilbert',):
1243
+ raise ValueError(f'method must be "homomorphic" or "hilbert", got {method!r}')
1244
+ if method == "hilbert" and not half:
1245
+ raise ValueError("`half=False` is only supported when `method='homomorphic'`")
1246
+ if n_fft is None:
1247
+ n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01)))
1248
+ n_fft = int(n_fft)
1249
+ if n_fft < len(h):
1250
+ raise ValueError(f'n_fft must be at least len(h)=={len(h)}')
1251
+ if method == 'hilbert':
1252
+ w = np.arange(n_fft) * (2 * np.pi / n_fft * n_half)
1253
+ H = np.real(fft(h, n_fft) * np.exp(1j * w))
1254
+ dp = max(H) - 1
1255
+ ds = 0 - min(H)
1256
+ S = 4. / (np.sqrt(1+dp+ds) + np.sqrt(1-dp+ds)) ** 2
1257
+ H += ds
1258
+ H *= S
1259
+ H = np.sqrt(H, out=H)
1260
+ H += 1e-10 # ensure that the log does not explode
1261
+ h_minimum = _dhtm(H)
1262
+ else: # method == 'homomorphic'
1263
+ # zero-pad; calculate the DFT
1264
+ h_temp = np.abs(fft(h, n_fft))
1265
+ # take 0.25*log(|H|**2) = 0.5*log(|H|)
1266
+ h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up
1267
+ np.log(h_temp, out=h_temp)
1268
+ if half: # halving of magnitude spectrum optional
1269
+ h_temp *= 0.5
1270
+ # IDFT
1271
+ h_temp = ifft(h_temp).real
1272
+ # multiply pointwise by the homomorphic filter
1273
+ # lmin[n] = 2u[n] - d[n]
1274
+ # i.e., double the positive frequencies and zero out the negative ones;
1275
+ # Oppenheim+Shafer 3rd ed p991 eq13.42b and p1004 fig13.7
1276
+ win = np.zeros(n_fft)
1277
+ win[0] = 1
1278
+ stop = n_fft // 2
1279
+ win[1:stop] = 2
1280
+ if n_fft % 2:
1281
+ win[stop] = 1
1282
+ h_temp *= win
1283
+ h_temp = ifft(np.exp(fft(h_temp)))
1284
+ h_minimum = h_temp.real
1285
+ n_out = (n_half + len(h) % 2) if half else len(h)
1286
+ return h_minimum[:n_out]
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_lti_conversion.py ADDED
@@ -0,0 +1,533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ltisys -- a collection of functions to convert linear time invariant systems
3
+ from one representation to another.
4
+ """
5
+
6
+ import numpy as np
7
+ from numpy import (r_, eye, atleast_2d, poly, dot,
8
+ asarray, zeros, array, outer)
9
+ from scipy import linalg
10
+
11
+ from ._filter_design import tf2zpk, zpk2tf, normalize
12
+
13
+
14
+ __all__ = ['tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk',
15
+ 'cont2discrete']
16
+
17
+
18
+ def tf2ss(num, den):
19
+ r"""Transfer function to state-space representation.
20
+
21
+ Parameters
22
+ ----------
23
+ num, den : array_like
24
+ Sequences representing the coefficients of the numerator and
25
+ denominator polynomials, in order of descending degree. The
26
+ denominator needs to be at least as long as the numerator.
27
+
28
+ Returns
29
+ -------
30
+ A, B, C, D : ndarray
31
+ State space representation of the system, in controller canonical
32
+ form.
33
+
34
+ Examples
35
+ --------
36
+ Convert the transfer function:
37
+
38
+ .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
39
+
40
+ >>> num = [1, 3, 3]
41
+ >>> den = [1, 2, 1]
42
+
43
+ to the state-space representation:
44
+
45
+ .. math::
46
+
47
+ \dot{\textbf{x}}(t) =
48
+ \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
49
+ \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
50
+
51
+ \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
52
+ \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
53
+
54
+ >>> from scipy.signal import tf2ss
55
+ >>> A, B, C, D = tf2ss(num, den)
56
+ >>> A
57
+ array([[-2., -1.],
58
+ [ 1., 0.]])
59
+ >>> B
60
+ array([[ 1.],
61
+ [ 0.]])
62
+ >>> C
63
+ array([[ 1., 2.]])
64
+ >>> D
65
+ array([[ 1.]])
66
+ """
67
+ # Controller canonical state-space representation.
68
+ # if M+1 = len(num) and K+1 = len(den) then we must have M <= K
69
+ # states are found by asserting that X(s) = U(s) / D(s)
70
+ # then Y(s) = N(s) * X(s)
71
+ #
72
+ # A, B, C, and D follow quite naturally.
73
+ #
74
+ num, den = normalize(num, den) # Strips zeros, checks arrays
75
+ nn = len(num.shape)
76
+ if nn == 1:
77
+ num = asarray([num], num.dtype)
78
+ M = num.shape[1]
79
+ K = len(den)
80
+ if M > K:
81
+ msg = "Improper transfer function. `num` is longer than `den`."
82
+ raise ValueError(msg)
83
+ if M == 0 or K == 0: # Null system
84
+ return (array([], float), array([], float), array([], float),
85
+ array([], float))
86
+
87
+ # pad numerator to have same number of columns has denominator
88
+ num = np.hstack((np.zeros((num.shape[0], K - M), dtype=num.dtype), num))
89
+
90
+ if num.shape[-1] > 0:
91
+ D = atleast_2d(num[:, 0])
92
+
93
+ else:
94
+ # We don't assign it an empty array because this system
95
+ # is not 'null'. It just doesn't have a non-zero D
96
+ # matrix. Thus, it should have a non-zero shape so that
97
+ # it can be operated on by functions like 'ss2tf'
98
+ D = array([[0]], float)
99
+
100
+ if K == 1:
101
+ D = D.reshape(num.shape)
102
+
103
+ return (zeros((1, 1)), zeros((1, D.shape[1])),
104
+ zeros((D.shape[0], 1)), D)
105
+
106
+ frow = -array([den[1:]])
107
+ A = r_[frow, eye(K - 2, K - 1)]
108
+ B = eye(K - 1, 1)
109
+ C = num[:, 1:] - outer(num[:, 0], den[1:])
110
+ D = D.reshape((C.shape[0], B.shape[1]))
111
+
112
+ return A, B, C, D
113
+
114
+
115
+ def _none_to_empty_2d(arg):
116
+ if arg is None:
117
+ return zeros((0, 0))
118
+ else:
119
+ return arg
120
+
121
+
122
+ def _atleast_2d_or_none(arg):
123
+ if arg is not None:
124
+ return atleast_2d(arg)
125
+
126
+
127
+ def _shape_or_none(M):
128
+ if M is not None:
129
+ return M.shape
130
+ else:
131
+ return (None,) * 2
132
+
133
+
134
+ def _choice_not_none(*args):
135
+ for arg in args:
136
+ if arg is not None:
137
+ return arg
138
+
139
+
140
+ def _restore(M, shape):
141
+ if M.shape == (0, 0):
142
+ return zeros(shape)
143
+ else:
144
+ if M.shape != shape:
145
+ raise ValueError("The input arrays have incompatible shapes.")
146
+ return M
147
+
148
+
149
+ def abcd_normalize(A=None, B=None, C=None, D=None):
150
+ """Check state-space matrices and ensure they are 2-D.
151
+
152
+ If enough information on the system is provided, that is, enough
153
+ properly-shaped arrays are passed to the function, the missing ones
154
+ are built from this information, ensuring the correct number of
155
+ rows and columns. Otherwise a ValueError is raised.
156
+
157
+ Parameters
158
+ ----------
159
+ A, B, C, D : array_like, optional
160
+ State-space matrices. All of them are None (missing) by default.
161
+ See `ss2tf` for format.
162
+
163
+ Returns
164
+ -------
165
+ A, B, C, D : array
166
+ Properly shaped state-space matrices.
167
+
168
+ Raises
169
+ ------
170
+ ValueError
171
+ If not enough information on the system was provided.
172
+
173
+ """
174
+ A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
175
+
176
+ MA, NA = _shape_or_none(A)
177
+ MB, NB = _shape_or_none(B)
178
+ MC, NC = _shape_or_none(C)
179
+ MD, ND = _shape_or_none(D)
180
+
181
+ p = _choice_not_none(MA, MB, NC)
182
+ q = _choice_not_none(NB, ND)
183
+ r = _choice_not_none(MC, MD)
184
+ if p is None or q is None or r is None:
185
+ raise ValueError("Not enough information on the system.")
186
+
187
+ A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
188
+ A = _restore(A, (p, p))
189
+ B = _restore(B, (p, q))
190
+ C = _restore(C, (r, p))
191
+ D = _restore(D, (r, q))
192
+
193
+ return A, B, C, D
194
+
195
+
196
+ def ss2tf(A, B, C, D, input=0):
197
+ r"""State-space to transfer function.
198
+
199
+ A, B, C, D defines a linear state-space system with `p` inputs,
200
+ `q` outputs, and `n` state variables.
201
+
202
+ Parameters
203
+ ----------
204
+ A : array_like
205
+ State (or system) matrix of shape ``(n, n)``
206
+ B : array_like
207
+ Input matrix of shape ``(n, p)``
208
+ C : array_like
209
+ Output matrix of shape ``(q, n)``
210
+ D : array_like
211
+ Feedthrough (or feedforward) matrix of shape ``(q, p)``
212
+ input : int, optional
213
+ For multiple-input systems, the index of the input to use.
214
+
215
+ Returns
216
+ -------
217
+ num : 2-D ndarray
218
+ Numerator(s) of the resulting transfer function(s). `num` has one row
219
+ for each of the system's outputs. Each row is a sequence representation
220
+ of the numerator polynomial.
221
+ den : 1-D ndarray
222
+ Denominator of the resulting transfer function(s). `den` is a sequence
223
+ representation of the denominator polynomial.
224
+
225
+ Examples
226
+ --------
227
+ Convert the state-space representation:
228
+
229
+ .. math::
230
+
231
+ \dot{\textbf{x}}(t) =
232
+ \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
233
+ \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
234
+
235
+ \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
236
+ \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
237
+
238
+ >>> A = [[-2, -1], [1, 0]]
239
+ >>> B = [[1], [0]] # 2-D column vector
240
+ >>> C = [[1, 2]] # 2-D row vector
241
+ >>> D = 1
242
+
243
+ to the transfer function:
244
+
245
+ .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
246
+
247
+ >>> from scipy.signal import ss2tf
248
+ >>> ss2tf(A, B, C, D)
249
+ (array([[1., 3., 3.]]), array([ 1., 2., 1.]))
250
+ """
251
+ # transfer function is C (sI - A)**(-1) B + D
252
+
253
+ # Check consistency and make them all rank-2 arrays
254
+ A, B, C, D = abcd_normalize(A, B, C, D)
255
+
256
+ nout, nin = D.shape
257
+ if input >= nin:
258
+ raise ValueError("System does not have the input specified.")
259
+
260
+ # make SIMO from possibly MIMO system.
261
+ B = B[:, input:input + 1]
262
+ D = D[:, input:input + 1]
263
+
264
+ try:
265
+ den = poly(A)
266
+ except ValueError:
267
+ den = 1
268
+
269
+ if (B.size == 0) and (C.size == 0):
270
+ num = np.ravel(D)
271
+ if (D.size == 0) and (A.size == 0):
272
+ den = []
273
+ return num, den
274
+
275
+ num_states = A.shape[0]
276
+ type_test = A[:, 0] + B[:, 0] + C[0, :] + D + 0.0
277
+ num = np.empty((nout, num_states + 1), type_test.dtype)
278
+ for k in range(nout):
279
+ Ck = atleast_2d(C[k, :])
280
+ num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den
281
+
282
+ return num, den
283
+
284
+
285
+ def zpk2ss(z, p, k):
286
+ """Zero-pole-gain representation to state-space representation
287
+
288
+ Parameters
289
+ ----------
290
+ z, p : sequence
291
+ Zeros and poles.
292
+ k : float
293
+ System gain.
294
+
295
+ Returns
296
+ -------
297
+ A, B, C, D : ndarray
298
+ State space representation of the system, in controller canonical
299
+ form.
300
+
301
+ """
302
+ return tf2ss(*zpk2tf(z, p, k))
303
+
304
+
305
+ def ss2zpk(A, B, C, D, input=0):
306
+ """State-space representation to zero-pole-gain representation.
307
+
308
+ A, B, C, D defines a linear state-space system with `p` inputs,
309
+ `q` outputs, and `n` state variables.
310
+
311
+ Parameters
312
+ ----------
313
+ A : array_like
314
+ State (or system) matrix of shape ``(n, n)``
315
+ B : array_like
316
+ Input matrix of shape ``(n, p)``
317
+ C : array_like
318
+ Output matrix of shape ``(q, n)``
319
+ D : array_like
320
+ Feedthrough (or feedforward) matrix of shape ``(q, p)``
321
+ input : int, optional
322
+ For multiple-input systems, the index of the input to use.
323
+
324
+ Returns
325
+ -------
326
+ z, p : sequence
327
+ Zeros and poles.
328
+ k : float
329
+ System gain.
330
+
331
+ """
332
+ return tf2zpk(*ss2tf(A, B, C, D, input=input))
333
+
334
+
335
+ def cont2discrete(system, dt, method="zoh", alpha=None):
336
+ """
337
+ Transform a continuous to a discrete state-space system.
338
+
339
+ Parameters
340
+ ----------
341
+ system : a tuple describing the system or an instance of `lti`
342
+ The following gives the number of elements in the tuple and
343
+ the interpretation:
344
+
345
+ * 1: (instance of `lti`)
346
+ * 2: (num, den)
347
+ * 3: (zeros, poles, gain)
348
+ * 4: (A, B, C, D)
349
+
350
+ dt : float
351
+ The discretization time step.
352
+ method : str, optional
353
+ Which method to use:
354
+
355
+ * gbt: generalized bilinear transformation
356
+ * bilinear: Tustin's approximation ("gbt" with alpha=0.5)
357
+ * euler: Euler (or forward differencing) method ("gbt" with alpha=0)
358
+ * backward_diff: Backwards differencing ("gbt" with alpha=1.0)
359
+ * zoh: zero-order hold (default)
360
+ * foh: first-order hold (*versionadded: 1.3.0*)
361
+ * impulse: equivalent impulse response (*versionadded: 1.3.0*)
362
+
363
+ alpha : float within [0, 1], optional
364
+ The generalized bilinear transformation weighting parameter, which
365
+ should only be specified with method="gbt", and is ignored otherwise
366
+
367
+ Returns
368
+ -------
369
+ sysd : tuple containing the discrete system
370
+ Based on the input type, the output will be of the form
371
+
372
+ * (num, den, dt) for transfer function input
373
+ * (zeros, poles, gain, dt) for zeros-poles-gain input
374
+ * (A, B, C, D, dt) for state-space system input
375
+
376
+ Notes
377
+ -----
378
+ By default, the routine uses a Zero-Order Hold (zoh) method to perform
379
+ the transformation. Alternatively, a generalized bilinear transformation
380
+ may be used, which includes the common Tustin's bilinear approximation,
381
+ an Euler's method technique, or a backwards differencing technique.
382
+
383
+ The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear
384
+ approximation is based on [2]_ and [3]_, the First-Order Hold (foh) method
385
+ is based on [4]_.
386
+
387
+ References
388
+ ----------
389
+ .. [1] https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models
390
+
391
+ .. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf
392
+
393
+ .. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized
394
+ bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754,
395
+ 2009.
396
+ (https://www.mypolyuweb.hk/~magzhang/Research/ZCC09_IJC.pdf)
397
+
398
+ .. [4] G. F. Franklin, J. D. Powell, and M. L. Workman, Digital control
399
+ of dynamic systems, 3rd ed. Menlo Park, Calif: Addison-Wesley,
400
+ pp. 204-206, 1998.
401
+
402
+ Examples
403
+ --------
404
+ We can transform a continuous state-space system to a discrete one:
405
+
406
+ >>> import numpy as np
407
+ >>> import matplotlib.pyplot as plt
408
+ >>> from scipy.signal import cont2discrete, lti, dlti, dstep
409
+
410
+ Define a continuous state-space system.
411
+
412
+ >>> A = np.array([[0, 1],[-10., -3]])
413
+ >>> B = np.array([[0],[10.]])
414
+ >>> C = np.array([[1., 0]])
415
+ >>> D = np.array([[0.]])
416
+ >>> l_system = lti(A, B, C, D)
417
+ >>> t, x = l_system.step(T=np.linspace(0, 5, 100))
418
+ >>> fig, ax = plt.subplots()
419
+ >>> ax.plot(t, x, label='Continuous', linewidth=3)
420
+
421
+ Transform it to a discrete state-space system using several methods.
422
+
423
+ >>> dt = 0.1
424
+ >>> for method in ['zoh', 'bilinear', 'euler', 'backward_diff', 'foh', 'impulse']:
425
+ ... d_system = cont2discrete((A, B, C, D), dt, method=method)
426
+ ... s, x_d = dstep(d_system)
427
+ ... ax.step(s, np.squeeze(x_d), label=method, where='post')
428
+ >>> ax.axis([t[0], t[-1], x[0], 1.4])
429
+ >>> ax.legend(loc='best')
430
+ >>> fig.tight_layout()
431
+ >>> plt.show()
432
+
433
+ """
434
+ if len(system) == 1:
435
+ return system.to_discrete()
436
+ if len(system) == 2:
437
+ sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method,
438
+ alpha=alpha)
439
+ return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
440
+ elif len(system) == 3:
441
+ sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt,
442
+ method=method, alpha=alpha)
443
+ return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
444
+ elif len(system) == 4:
445
+ a, b, c, d = system
446
+ else:
447
+ raise ValueError("First argument must either be a tuple of 2 (tf), "
448
+ "3 (zpk), or 4 (ss) arrays.")
449
+
450
+ if method == 'gbt':
451
+ if alpha is None:
452
+ raise ValueError("Alpha parameter must be specified for the "
453
+ "generalized bilinear transform (gbt) method")
454
+ elif alpha < 0 or alpha > 1:
455
+ raise ValueError("Alpha parameter must be within the interval "
456
+ "[0,1] for the gbt method")
457
+
458
+ if method == 'gbt':
459
+ # This parameter is used repeatedly - compute once here
460
+ ima = np.eye(a.shape[0]) - alpha*dt*a
461
+ ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a)
462
+ bd = linalg.solve(ima, dt*b)
463
+
464
+ # Similarly solve for the output equation matrices
465
+ cd = linalg.solve(ima.transpose(), c.transpose())
466
+ cd = cd.transpose()
467
+ dd = d + alpha*np.dot(c, bd)
468
+
469
+ elif method == 'bilinear' or method == 'tustin':
470
+ return cont2discrete(system, dt, method="gbt", alpha=0.5)
471
+
472
+ elif method == 'euler' or method == 'forward_diff':
473
+ return cont2discrete(system, dt, method="gbt", alpha=0.0)
474
+
475
+ elif method == 'backward_diff':
476
+ return cont2discrete(system, dt, method="gbt", alpha=1.0)
477
+
478
+ elif method == 'zoh':
479
+ # Build an exponential matrix
480
+ em_upper = np.hstack((a, b))
481
+
482
+ # Need to stack zeros under the a and b matrices
483
+ em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])),
484
+ np.zeros((b.shape[1], b.shape[1]))))
485
+
486
+ em = np.vstack((em_upper, em_lower))
487
+ ms = linalg.expm(dt * em)
488
+
489
+ # Dispose of the lower rows
490
+ ms = ms[:a.shape[0], :]
491
+
492
+ ad = ms[:, 0:a.shape[1]]
493
+ bd = ms[:, a.shape[1]:]
494
+
495
+ cd = c
496
+ dd = d
497
+
498
+ elif method == 'foh':
499
+ # Size parameters for convenience
500
+ n = a.shape[0]
501
+ m = b.shape[1]
502
+
503
+ # Build an exponential matrix similar to 'zoh' method
504
+ em_upper = linalg.block_diag(np.block([a, b]) * dt, np.eye(m))
505
+ em_lower = zeros((m, n + 2 * m))
506
+ em = np.block([[em_upper], [em_lower]])
507
+
508
+ ms = linalg.expm(em)
509
+
510
+ # Get the three blocks from upper rows
511
+ ms11 = ms[:n, 0:n]
512
+ ms12 = ms[:n, n:n + m]
513
+ ms13 = ms[:n, n + m:]
514
+
515
+ ad = ms11
516
+ bd = ms12 - ms13 + ms11 @ ms13
517
+ cd = c
518
+ dd = d + c @ ms13
519
+
520
+ elif method == 'impulse':
521
+ if not np.allclose(d, 0):
522
+ raise ValueError("Impulse method is only applicable "
523
+ "to strictly proper systems")
524
+
525
+ ad = linalg.expm(a * dt)
526
+ bd = ad @ b * dt
527
+ cd = c
528
+ dd = c @ b * dt
529
+
530
+ else:
531
+ raise ValueError(f"Unknown transformation method '{method}'")
532
+
533
+ return ad, bd, cd, dd, dt
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_ltisys.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_max_len_seq.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Eric Larson
2
+ # 2014
3
+
4
+ """Tools for MLS generation"""
5
+
6
+ import numpy as np
7
+
8
+ from ._max_len_seq_inner import _max_len_seq_inner
9
+
10
+ __all__ = ['max_len_seq']
11
+
12
+
13
+ # These are definitions of linear shift register taps for use in max_len_seq()
14
+ _mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
15
+ 9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
16
+ 14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
17
+ 18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
18
+ 23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
19
+ 27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
20
+ 31: [28], 32: [31, 30, 10]}
21
+
22
+ def max_len_seq(nbits, state=None, length=None, taps=None):
23
+ """
24
+ Maximum length sequence (MLS) generator.
25
+
26
+ Parameters
27
+ ----------
28
+ nbits : int
29
+ Number of bits to use. Length of the resulting sequence will
30
+ be ``(2**nbits) - 1``. Note that generating long sequences
31
+ (e.g., greater than ``nbits == 16``) can take a long time.
32
+ state : array_like, optional
33
+ If array, must be of length ``nbits``, and will be cast to binary
34
+ (bool) representation. If None, a seed of ones will be used,
35
+ producing a repeatable representation. If ``state`` is all
36
+ zeros, an error is raised as this is invalid. Default: None.
37
+ length : int, optional
38
+ Number of samples to compute. If None, the entire length
39
+ ``(2**nbits) - 1`` is computed.
40
+ taps : array_like, optional
41
+ Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
42
+ If None, taps will be automatically selected (for up to
43
+ ``nbits == 32``).
44
+
45
+ Returns
46
+ -------
47
+ seq : array
48
+ Resulting MLS sequence of 0's and 1's.
49
+ state : array
50
+ The final state of the shift register.
51
+
52
+ Notes
53
+ -----
54
+ The algorithm for MLS generation is generically described in:
55
+
56
+ https://en.wikipedia.org/wiki/Maximum_length_sequence
57
+
58
+ The default values for taps are specifically taken from the first
59
+ option listed for each value of ``nbits`` in:
60
+
61
+ https://web.archive.org/web/20181001062252/http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm
62
+
63
+ .. versionadded:: 0.15.0
64
+
65
+ Examples
66
+ --------
67
+ MLS uses binary convention:
68
+
69
+ >>> from scipy.signal import max_len_seq
70
+ >>> max_len_seq(4)[0]
71
+ array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
72
+
73
+ MLS has a white spectrum (except for DC):
74
+
75
+ >>> import numpy as np
76
+ >>> import matplotlib.pyplot as plt
77
+ >>> from numpy.fft import fft, ifft, fftshift, fftfreq
78
+ >>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
79
+ >>> spec = fft(seq)
80
+ >>> N = len(seq)
81
+ >>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
82
+ >>> plt.margins(0.1, 0.1)
83
+ >>> plt.grid(True)
84
+ >>> plt.show()
85
+
86
+ Circular autocorrelation of MLS is an impulse:
87
+
88
+ >>> acorrcirc = ifft(spec * np.conj(spec)).real
89
+ >>> plt.figure()
90
+ >>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
91
+ >>> plt.margins(0.1, 0.1)
92
+ >>> plt.grid(True)
93
+ >>> plt.show()
94
+
95
+ Linear autocorrelation of MLS is approximately an impulse:
96
+
97
+ >>> acorr = np.correlate(seq, seq, 'full')
98
+ >>> plt.figure()
99
+ >>> plt.plot(np.arange(-N+1, N), acorr, '.-')
100
+ >>> plt.margins(0.1, 0.1)
101
+ >>> plt.grid(True)
102
+ >>> plt.show()
103
+
104
+ """
105
+ taps_dtype = np.int32 if np.intp().itemsize == 4 else np.int64
106
+ if taps is None:
107
+ if nbits not in _mls_taps:
108
+ known_taps = np.array(list(_mls_taps.keys()))
109
+ raise ValueError(f'nbits must be between {known_taps.min()} and '
110
+ f'{known_taps.max()} if taps is None')
111
+ taps = np.array(_mls_taps[nbits], taps_dtype)
112
+ else:
113
+ taps = np.unique(np.array(taps, taps_dtype))[::-1]
114
+ if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
115
+ raise ValueError('taps must be non-empty with values between '
116
+ 'zero and nbits (inclusive)')
117
+ taps = np.array(taps) # needed for Cython and Pythran
118
+ n_max = (2**nbits) - 1
119
+ if length is None:
120
+ length = n_max
121
+ else:
122
+ length = int(length)
123
+ if length < 0:
124
+ raise ValueError('length must be greater than or equal to 0')
125
+ # We use int8 instead of bool here because NumPy arrays of bools
126
+ # don't seem to work nicely with Cython
127
+ if state is None:
128
+ state = np.ones(nbits, dtype=np.int8, order='c')
129
+ else:
130
+ # makes a copy if need be, ensuring it's 0's and 1's
131
+ state = np.array(state, dtype=bool, order='c').astype(np.int8)
132
+ if state.ndim != 1 or state.size != nbits:
133
+ raise ValueError('state must be a 1-D array of size nbits')
134
+ if np.all(state == 0):
135
+ raise ValueError('state must not be all zeros')
136
+
137
+ seq = np.empty(length, dtype=np.int8, order='c')
138
+ state = _max_len_seq_inner(taps, state, nbits, length, seq)
139
+ return seq, state
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_max_len_seq_inner.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (77.5 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_peak_finding.py ADDED
@@ -0,0 +1,1310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions for identifying peaks in signals.
3
+ """
4
+ import math
5
+ import numpy as np
6
+
7
+ from scipy.signal._wavelets import _cwt, _ricker
8
+ from scipy.stats import scoreatpercentile
9
+
10
+ from ._peak_finding_utils import (
11
+ _local_maxima_1d,
12
+ _select_by_peak_distance,
13
+ _peak_prominences,
14
+ _peak_widths
15
+ )
16
+
17
+
18
+ __all__ = ['argrelmin', 'argrelmax', 'argrelextrema', 'peak_prominences',
19
+ 'peak_widths', 'find_peaks', 'find_peaks_cwt']
20
+
21
+
22
+ def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'):
23
+ """
24
+ Calculate the relative extrema of `data`.
25
+
26
+ Relative extrema are calculated by finding locations where
27
+ ``comparator(data[n], data[n+1:n+order+1])`` is True.
28
+
29
+ Parameters
30
+ ----------
31
+ data : ndarray
32
+ Array in which to find the relative extrema.
33
+ comparator : callable
34
+ Function to use to compare two data points.
35
+ Should take two arrays as arguments.
36
+ axis : int, optional
37
+ Axis over which to select from `data`. Default is 0.
38
+ order : int, optional
39
+ How many points on each side to use for the comparison
40
+ to consider ``comparator(n,n+x)`` to be True.
41
+ mode : str, optional
42
+ How the edges of the vector are treated. 'wrap' (wrap around) or
43
+ 'clip' (treat overflow as the same as the last (or first) element).
44
+ Default 'clip'. See numpy.take.
45
+
46
+ Returns
47
+ -------
48
+ extrema : ndarray
49
+ Boolean array of the same shape as `data` that is True at an extrema,
50
+ False otherwise.
51
+
52
+ See also
53
+ --------
54
+ argrelmax, argrelmin
55
+
56
+ Examples
57
+ --------
58
+ >>> import numpy as np
59
+ >>> from scipy.signal._peak_finding import _boolrelextrema
60
+ >>> testdata = np.array([1,2,3,2,1])
61
+ >>> _boolrelextrema(testdata, np.greater, axis=0)
62
+ array([False, False, True, False, False], dtype=bool)
63
+
64
+ """
65
+ if (int(order) != order) or (order < 1):
66
+ raise ValueError('Order must be an int >= 1')
67
+
68
+ datalen = data.shape[axis]
69
+ locs = np.arange(0, datalen)
70
+
71
+ results = np.ones(data.shape, dtype=bool)
72
+ main = data.take(locs, axis=axis, mode=mode)
73
+ for shift in range(1, order + 1):
74
+ plus = data.take(locs + shift, axis=axis, mode=mode)
75
+ minus = data.take(locs - shift, axis=axis, mode=mode)
76
+ results &= comparator(main, plus)
77
+ results &= comparator(main, minus)
78
+ if ~results.any():
79
+ return results
80
+ return results
81
+
82
+
83
+ def argrelmin(data, axis=0, order=1, mode='clip'):
84
+ """
85
+ Calculate the relative minima of `data`.
86
+
87
+ Parameters
88
+ ----------
89
+ data : ndarray
90
+ Array in which to find the relative minima.
91
+ axis : int, optional
92
+ Axis over which to select from `data`. Default is 0.
93
+ order : int, optional
94
+ How many points on each side to use for the comparison
95
+ to consider ``comparator(n, n+x)`` to be True.
96
+ mode : str, optional
97
+ How the edges of the vector are treated.
98
+ Available options are 'wrap' (wrap around) or 'clip' (treat overflow
99
+ as the same as the last (or first) element).
100
+ Default 'clip'. See numpy.take.
101
+
102
+ Returns
103
+ -------
104
+ extrema : tuple of ndarrays
105
+ Indices of the minima in arrays of integers. ``extrema[k]`` is
106
+ the array of indices of axis `k` of `data`. Note that the
107
+ return value is a tuple even when `data` is 1-D.
108
+
109
+ See Also
110
+ --------
111
+ argrelextrema, argrelmax, find_peaks
112
+
113
+ Notes
114
+ -----
115
+ This function uses `argrelextrema` with np.less as comparator. Therefore, it
116
+ requires a strict inequality on both sides of a value to consider it a
117
+ minimum. This means flat minima (more than one sample wide) are not detected.
118
+ In case of 1-D `data` `find_peaks` can be used to detect all
119
+ local minima, including flat ones, by calling it with negated `data`.
120
+
121
+ .. versionadded:: 0.11.0
122
+
123
+ Examples
124
+ --------
125
+ >>> import numpy as np
126
+ >>> from scipy.signal import argrelmin
127
+ >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
128
+ >>> argrelmin(x)
129
+ (array([1, 5]),)
130
+ >>> y = np.array([[1, 2, 1, 2],
131
+ ... [2, 2, 0, 0],
132
+ ... [5, 3, 4, 4]])
133
+ ...
134
+ >>> argrelmin(y, axis=1)
135
+ (array([0, 2]), array([2, 1]))
136
+
137
+ """
138
+ return argrelextrema(data, np.less, axis, order, mode)
139
+
140
+
141
+ def argrelmax(data, axis=0, order=1, mode='clip'):
142
+ """
143
+ Calculate the relative maxima of `data`.
144
+
145
+ Parameters
146
+ ----------
147
+ data : ndarray
148
+ Array in which to find the relative maxima.
149
+ axis : int, optional
150
+ Axis over which to select from `data`. Default is 0.
151
+ order : int, optional
152
+ How many points on each side to use for the comparison
153
+ to consider ``comparator(n, n+x)`` to be True.
154
+ mode : str, optional
155
+ How the edges of the vector are treated.
156
+ Available options are 'wrap' (wrap around) or 'clip' (treat overflow
157
+ as the same as the last (or first) element).
158
+ Default 'clip'. See `numpy.take`.
159
+
160
+ Returns
161
+ -------
162
+ extrema : tuple of ndarrays
163
+ Indices of the maxima in arrays of integers. ``extrema[k]`` is
164
+ the array of indices of axis `k` of `data`. Note that the
165
+ return value is a tuple even when `data` is 1-D.
166
+
167
+ See Also
168
+ --------
169
+ argrelextrema, argrelmin, find_peaks
170
+
171
+ Notes
172
+ -----
173
+ This function uses `argrelextrema` with np.greater as comparator. Therefore,
174
+ it requires a strict inequality on both sides of a value to consider it a
175
+ maximum. This means flat maxima (more than one sample wide) are not detected.
176
+ In case of 1-D `data` `find_peaks` can be used to detect all
177
+ local maxima, including flat ones.
178
+
179
+ .. versionadded:: 0.11.0
180
+
181
+ Examples
182
+ --------
183
+ >>> import numpy as np
184
+ >>> from scipy.signal import argrelmax
185
+ >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
186
+ >>> argrelmax(x)
187
+ (array([3, 6]),)
188
+ >>> y = np.array([[1, 2, 1, 2],
189
+ ... [2, 2, 0, 0],
190
+ ... [5, 3, 4, 4]])
191
+ ...
192
+ >>> argrelmax(y, axis=1)
193
+ (array([0]), array([1]))
194
+ """
195
+ return argrelextrema(data, np.greater, axis, order, mode)
196
+
197
+
198
+ def argrelextrema(data, comparator, axis=0, order=1, mode='clip'):
199
+ """
200
+ Calculate the relative extrema of `data`.
201
+
202
+ Parameters
203
+ ----------
204
+ data : ndarray
205
+ Array in which to find the relative extrema.
206
+ comparator : callable
207
+ Function to use to compare two data points.
208
+ Should take two arrays as arguments.
209
+ axis : int, optional
210
+ Axis over which to select from `data`. Default is 0.
211
+ order : int, optional
212
+ How many points on each side to use for the comparison
213
+ to consider ``comparator(n, n+x)`` to be True.
214
+ mode : str, optional
215
+ How the edges of the vector are treated. 'wrap' (wrap around) or
216
+ 'clip' (treat overflow as the same as the last (or first) element).
217
+ Default is 'clip'. See `numpy.take`.
218
+
219
+ Returns
220
+ -------
221
+ extrema : tuple of ndarrays
222
+ Indices of the maxima in arrays of integers. ``extrema[k]`` is
223
+ the array of indices of axis `k` of `data`. Note that the
224
+ return value is a tuple even when `data` is 1-D.
225
+
226
+ See Also
227
+ --------
228
+ argrelmin, argrelmax
229
+
230
+ Notes
231
+ -----
232
+
233
+ .. versionadded:: 0.11.0
234
+
235
+ Examples
236
+ --------
237
+ >>> import numpy as np
238
+ >>> from scipy.signal import argrelextrema
239
+ >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
240
+ >>> argrelextrema(x, np.greater)
241
+ (array([3, 6]),)
242
+ >>> y = np.array([[1, 2, 1, 2],
243
+ ... [2, 2, 0, 0],
244
+ ... [5, 3, 4, 4]])
245
+ ...
246
+ >>> argrelextrema(y, np.less, axis=1)
247
+ (array([0, 2]), array([2, 1]))
248
+
249
+ """
250
+ results = _boolrelextrema(data, comparator,
251
+ axis, order, mode)
252
+ return np.nonzero(results)
253
+
254
+
255
+ def _arg_x_as_expected(value):
256
+ """Ensure argument `x` is a 1-D C-contiguous array of dtype('float64').
257
+
258
+ Used in `find_peaks`, `peak_prominences` and `peak_widths` to make `x`
259
+ compatible with the signature of the wrapped Cython functions.
260
+
261
+ Returns
262
+ -------
263
+ value : ndarray
264
+ A 1-D C-contiguous array with dtype('float64').
265
+ """
266
+ value = np.asarray(value, order='C', dtype=np.float64)
267
+ if value.ndim != 1:
268
+ raise ValueError('`x` must be a 1-D array')
269
+ return value
270
+
271
+
272
+ def _arg_peaks_as_expected(value):
273
+ """Ensure argument `peaks` is a 1-D C-contiguous array of dtype('intp').
274
+
275
+ Used in `peak_prominences` and `peak_widths` to make `peaks` compatible
276
+ with the signature of the wrapped Cython functions.
277
+
278
+ Returns
279
+ -------
280
+ value : ndarray
281
+ A 1-D C-contiguous array with dtype('intp').
282
+ """
283
+ value = np.asarray(value)
284
+ if value.size == 0:
285
+ # Empty arrays default to np.float64 but are valid input
286
+ value = np.array([], dtype=np.intp)
287
+ try:
288
+ # Safely convert to C-contiguous array of type np.intp
289
+ value = value.astype(np.intp, order='C', casting='safe',
290
+ subok=False, copy=False)
291
+ except TypeError as e:
292
+ raise TypeError("cannot safely cast `peaks` to dtype('intp')") from e
293
+ if value.ndim != 1:
294
+ raise ValueError('`peaks` must be a 1-D array')
295
+ return value
296
+
297
+
298
+ def _arg_wlen_as_expected(value):
299
+ """Ensure argument `wlen` is of type `np.intp` and larger than 1.
300
+
301
+ Used in `peak_prominences` and `peak_widths`.
302
+
303
+ Returns
304
+ -------
305
+ value : np.intp
306
+ The original `value` rounded up to an integer or -1 if `value` was
307
+ None.
308
+ """
309
+ if value is None:
310
+ # _peak_prominences expects an intp; -1 signals that no value was
311
+ # supplied by the user
312
+ value = -1
313
+ elif 1 < value:
314
+ # Round up to a positive integer
315
+ if isinstance(value, float):
316
+ value = math.ceil(value)
317
+ value = np.intp(value)
318
+ else:
319
+ raise ValueError(f'`wlen` must be larger than 1, was {value}')
320
+ return value
321
+
322
+
323
+ def peak_prominences(x, peaks, wlen=None):
324
+ """
325
+ Calculate the prominence of each peak in a signal.
326
+
327
+ The prominence of a peak measures how much a peak stands out from the
328
+ surrounding baseline of the signal and is defined as the vertical distance
329
+ between the peak and its lowest contour line.
330
+
331
+ Parameters
332
+ ----------
333
+ x : sequence
334
+ A signal with peaks.
335
+ peaks : sequence
336
+ Indices of peaks in `x`.
337
+ wlen : int, optional
338
+ A window length in samples that optionally limits the evaluated area for
339
+ each peak to a subset of `x`. The peak is always placed in the middle of
340
+ the window therefore the given length is rounded up to the next odd
341
+ integer. This parameter can speed up the calculation (see Notes).
342
+
343
+ Returns
344
+ -------
345
+ prominences : ndarray
346
+ The calculated prominences for each peak in `peaks`.
347
+ left_bases, right_bases : ndarray
348
+ The peaks' bases as indices in `x` to the left and right of each peak.
349
+ The higher base of each pair is a peak's lowest contour line.
350
+
351
+ Raises
352
+ ------
353
+ ValueError
354
+ If a value in `peaks` is an invalid index for `x`.
355
+
356
+ Warns
357
+ -----
358
+ PeakPropertyWarning
359
+ For indices in `peaks` that don't point to valid local maxima in `x`,
360
+ the returned prominence will be 0 and this warning is raised. This
361
+ also happens if `wlen` is smaller than the plateau size of a peak.
362
+
363
+ Warnings
364
+ --------
365
+ This function may return unexpected results for data containing NaNs. To
366
+ avoid this, NaNs should either be removed or replaced.
367
+
368
+ See Also
369
+ --------
370
+ find_peaks
371
+ Find peaks inside a signal based on peak properties.
372
+ peak_widths
373
+ Calculate the width of peaks.
374
+
375
+ Notes
376
+ -----
377
+ Strategy to compute a peak's prominence:
378
+
379
+ 1. Extend a horizontal line from the current peak to the left and right
380
+ until the line either reaches the window border (see `wlen`) or
381
+ intersects the signal again at the slope of a higher peak. An
382
+ intersection with a peak of the same height is ignored.
383
+ 2. On each side find the minimal signal value within the interval defined
384
+ above. These points are the peak's bases.
385
+ 3. The higher one of the two bases marks the peak's lowest contour line. The
386
+ prominence can then be calculated as the vertical difference between the
387
+ peaks height itself and its lowest contour line.
388
+
389
+ Searching for the peak's bases can be slow for large `x` with periodic
390
+ behavior because large chunks or even the full signal need to be evaluated
391
+ for the first algorithmic step. This evaluation area can be limited with the
392
+ parameter `wlen` which restricts the algorithm to a window around the
393
+ current peak and can shorten the calculation time if the window length is
394
+ short in relation to `x`.
395
+ However, this may stop the algorithm from finding the true global contour
396
+ line if the peak's true bases are outside this window. Instead, a higher
397
+ contour line is found within the restricted window leading to a smaller
398
+ calculated prominence. In practice, this is only relevant for the highest set
399
+ of peaks in `x`. This behavior may even be used intentionally to calculate
400
+ "local" prominences.
401
+
402
+ .. versionadded:: 1.1.0
403
+
404
+ References
405
+ ----------
406
+ .. [1] Wikipedia Article for Topographic Prominence:
407
+ https://en.wikipedia.org/wiki/Topographic_prominence
408
+
409
+ Examples
410
+ --------
411
+ >>> import numpy as np
412
+ >>> from scipy.signal import find_peaks, peak_prominences
413
+ >>> import matplotlib.pyplot as plt
414
+
415
+ Create a test signal with two overlaid harmonics
416
+
417
+ >>> x = np.linspace(0, 6 * np.pi, 1000)
418
+ >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x)
419
+
420
+ Find all peaks and calculate prominences
421
+
422
+ >>> peaks, _ = find_peaks(x)
423
+ >>> prominences = peak_prominences(x, peaks)[0]
424
+ >>> prominences
425
+ array([1.24159486, 0.47840168, 0.28470524, 3.10716793, 0.284603 ,
426
+ 0.47822491, 2.48340261, 0.47822491])
427
+
428
+ Calculate the height of each peak's contour line and plot the results
429
+
430
+ >>> contour_heights = x[peaks] - prominences
431
+ >>> plt.plot(x)
432
+ >>> plt.plot(peaks, x[peaks], "x")
433
+ >>> plt.vlines(x=peaks, ymin=contour_heights, ymax=x[peaks])
434
+ >>> plt.show()
435
+
436
+ Let's evaluate a second example that demonstrates several edge cases for
437
+ one peak at index 5.
438
+
439
+ >>> x = np.array([0, 1, 0, 3, 1, 3, 0, 4, 0])
440
+ >>> peaks = np.array([5])
441
+ >>> plt.plot(x)
442
+ >>> plt.plot(peaks, x[peaks], "x")
443
+ >>> plt.show()
444
+ >>> peak_prominences(x, peaks) # -> (prominences, left_bases, right_bases)
445
+ (array([3.]), array([2]), array([6]))
446
+
447
+ Note how the peak at index 3 of the same height is not considered as a
448
+ border while searching for the left base. Instead, two minima at 0 and 2
449
+ are found in which case the one closer to the evaluated peak is always
450
+ chosen. On the right side, however, the base must be placed at 6 because the
451
+ higher peak represents the right border to the evaluated area.
452
+
453
+ >>> peak_prominences(x, peaks, wlen=3.1)
454
+ (array([2.]), array([4]), array([6]))
455
+
456
+ Here, we restricted the algorithm to a window from 3 to 7 (the length is 5
457
+ samples because `wlen` was rounded up to the next odd integer). Thus, the
458
+ only two candidates in the evaluated area are the two neighboring samples
459
+ and a smaller prominence is calculated.
460
+ """
461
+ x = _arg_x_as_expected(x)
462
+ peaks = _arg_peaks_as_expected(peaks)
463
+ wlen = _arg_wlen_as_expected(wlen)
464
+ return _peak_prominences(x, peaks, wlen)
465
+
466
+
467
+ def peak_widths(x, peaks, rel_height=0.5, prominence_data=None, wlen=None):
468
+ """
469
+ Calculate the width of each peak in a signal.
470
+
471
+ This function calculates the width of a peak in samples at a relative
472
+ distance to the peak's height and prominence.
473
+
474
+ Parameters
475
+ ----------
476
+ x : sequence
477
+ A signal with peaks.
478
+ peaks : sequence
479
+ Indices of peaks in `x`.
480
+ rel_height : float, optional
481
+ Chooses the relative height at which the peak width is measured as a
482
+ percentage of its prominence. 1.0 calculates the width of the peak at
483
+ its lowest contour line while 0.5 evaluates at half the prominence
484
+ height. Must be at least 0. See notes for further explanation.
485
+ prominence_data : tuple, optional
486
+ A tuple of three arrays matching the output of `peak_prominences` when
487
+ called with the same arguments `x` and `peaks`. This data are calculated
488
+ internally if not provided.
489
+ wlen : int, optional
490
+ A window length in samples passed to `peak_prominences` as an optional
491
+ argument for internal calculation of `prominence_data`. This argument
492
+ is ignored if `prominence_data` is given.
493
+
494
+ Returns
495
+ -------
496
+ widths : ndarray
497
+ The widths for each peak in samples.
498
+ width_heights : ndarray
499
+ The height of the contour lines at which the `widths` where evaluated.
500
+ left_ips, right_ips : ndarray
501
+ Interpolated positions of left and right intersection points of a
502
+ horizontal line at the respective evaluation height.
503
+
504
+ Raises
505
+ ------
506
+ ValueError
507
+ If `prominence_data` is supplied but doesn't satisfy the condition
508
+ ``0 <= left_base <= peak <= right_base < x.shape[0]`` for each peak,
509
+ has the wrong dtype, is not C-contiguous or does not have the same
510
+ shape.
511
+
512
+ Warns
513
+ -----
514
+ PeakPropertyWarning
515
+ Raised if any calculated width is 0. This may stem from the supplied
516
+ `prominence_data` or if `rel_height` is set to 0.
517
+
518
+ Warnings
519
+ --------
520
+ This function may return unexpected results for data containing NaNs. To
521
+ avoid this, NaNs should either be removed or replaced.
522
+
523
+ See Also
524
+ --------
525
+ find_peaks
526
+ Find peaks inside a signal based on peak properties.
527
+ peak_prominences
528
+ Calculate the prominence of peaks.
529
+
530
+ Notes
531
+ -----
532
+ The basic algorithm to calculate a peak's width is as follows:
533
+
534
+ * Calculate the evaluation height :math:`h_{eval}` with the formula
535
+ :math:`h_{eval} = h_{Peak} - P \\cdot R`, where :math:`h_{Peak}` is the
536
+ height of the peak itself, :math:`P` is the peak's prominence and
537
+ :math:`R` a positive ratio specified with the argument `rel_height`.
538
+ * Draw a horizontal line at the evaluation height to both sides, starting at
539
+ the peak's current vertical position until the lines either intersect a
540
+ slope, the signal border or cross the vertical position of the peak's
541
+ base (see `peak_prominences` for an definition). For the first case,
542
+ intersection with the signal, the true intersection point is estimated
543
+ with linear interpolation.
544
+ * Calculate the width as the horizontal distance between the chosen
545
+ endpoints on both sides. As a consequence of this the maximal possible
546
+ width for each peak is the horizontal distance between its bases.
547
+
548
+ As shown above to calculate a peak's width its prominence and bases must be
549
+ known. You can supply these yourself with the argument `prominence_data`.
550
+ Otherwise, they are internally calculated (see `peak_prominences`).
551
+
552
+ .. versionadded:: 1.1.0
553
+
554
+ Examples
555
+ --------
556
+ >>> import numpy as np
557
+ >>> from scipy.signal import chirp, find_peaks, peak_widths
558
+ >>> import matplotlib.pyplot as plt
559
+
560
+ Create a test signal with two overlaid harmonics
561
+
562
+ >>> x = np.linspace(0, 6 * np.pi, 1000)
563
+ >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x)
564
+
565
+ Find all peaks and calculate their widths at the relative height of 0.5
566
+ (contour line at half the prominence height) and 1 (at the lowest contour
567
+ line at full prominence height).
568
+
569
+ >>> peaks, _ = find_peaks(x)
570
+ >>> results_half = peak_widths(x, peaks, rel_height=0.5)
571
+ >>> results_half[0] # widths
572
+ array([ 64.25172825, 41.29465463, 35.46943289, 104.71586081,
573
+ 35.46729324, 41.30429622, 181.93835853, 45.37078546])
574
+ >>> results_full = peak_widths(x, peaks, rel_height=1)
575
+ >>> results_full[0] # widths
576
+ array([181.9396084 , 72.99284945, 61.28657872, 373.84622694,
577
+ 61.78404617, 72.48822812, 253.09161876, 79.36860878])
578
+
579
+ Plot signal, peaks and contour lines at which the widths where calculated
580
+
581
+ >>> plt.plot(x)
582
+ >>> plt.plot(peaks, x[peaks], "x")
583
+ >>> plt.hlines(*results_half[1:], color="C2")
584
+ >>> plt.hlines(*results_full[1:], color="C3")
585
+ >>> plt.show()
586
+ """
587
+ x = _arg_x_as_expected(x)
588
+ peaks = _arg_peaks_as_expected(peaks)
589
+ if prominence_data is None:
590
+ # Calculate prominence if not supplied and use wlen if supplied.
591
+ wlen = _arg_wlen_as_expected(wlen)
592
+ prominence_data = _peak_prominences(x, peaks, wlen)
593
+ return _peak_widths(x, peaks, rel_height, *prominence_data)
594
+
595
+
596
+ def _unpack_condition_args(interval, x, peaks):
597
+ """
598
+ Parse condition arguments for `find_peaks`.
599
+
600
+ Parameters
601
+ ----------
602
+ interval : number or ndarray or sequence
603
+ Either a number or ndarray or a 2-element sequence of the former. The
604
+ first value is always interpreted as `imin` and the second, if supplied,
605
+ as `imax`.
606
+ x : ndarray
607
+ The signal with `peaks`.
608
+ peaks : ndarray
609
+ An array with indices used to reduce `imin` and / or `imax` if those are
610
+ arrays.
611
+
612
+ Returns
613
+ -------
614
+ imin, imax : number or ndarray or None
615
+ Minimal and maximal value in `argument`.
616
+
617
+ Raises
618
+ ------
619
+ ValueError :
620
+ If interval border is given as array and its size does not match the size
621
+ of `x`.
622
+
623
+ Notes
624
+ -----
625
+
626
+ .. versionadded:: 1.1.0
627
+ """
628
+ try:
629
+ imin, imax = interval
630
+ except (TypeError, ValueError):
631
+ imin, imax = (interval, None)
632
+
633
+ # Reduce arrays if arrays
634
+ if isinstance(imin, np.ndarray):
635
+ if imin.size != x.size:
636
+ raise ValueError('array size of lower interval border must match x')
637
+ imin = imin[peaks]
638
+ if isinstance(imax, np.ndarray):
639
+ if imax.size != x.size:
640
+ raise ValueError('array size of upper interval border must match x')
641
+ imax = imax[peaks]
642
+
643
+ return imin, imax
644
+
645
+
646
+ def _select_by_property(peak_properties, pmin, pmax):
647
+ """
648
+ Evaluate where the generic property of peaks confirms to an interval.
649
+
650
+ Parameters
651
+ ----------
652
+ peak_properties : ndarray
653
+ An array with properties for each peak.
654
+ pmin : None or number or ndarray
655
+ Lower interval boundary for `peak_properties`. ``None`` is interpreted as
656
+ an open border.
657
+ pmax : None or number or ndarray
658
+ Upper interval boundary for `peak_properties`. ``None`` is interpreted as
659
+ an open border.
660
+
661
+ Returns
662
+ -------
663
+ keep : bool
664
+ A boolean mask evaluating to true where `peak_properties` confirms to the
665
+ interval.
666
+
667
+ See Also
668
+ --------
669
+ find_peaks
670
+
671
+ Notes
672
+ -----
673
+
674
+ .. versionadded:: 1.1.0
675
+ """
676
+ keep = np.ones(peak_properties.size, dtype=bool)
677
+ if pmin is not None:
678
+ keep &= (pmin <= peak_properties)
679
+ if pmax is not None:
680
+ keep &= (peak_properties <= pmax)
681
+ return keep
682
+
683
+
684
+ def _select_by_peak_threshold(x, peaks, tmin, tmax):
685
+ """
686
+ Evaluate which peaks fulfill the threshold condition.
687
+
688
+ Parameters
689
+ ----------
690
+ x : ndarray
691
+ A 1-D array which is indexable by `peaks`.
692
+ peaks : ndarray
693
+ Indices of peaks in `x`.
694
+ tmin, tmax : scalar or ndarray or None
695
+ Minimal and / or maximal required thresholds. If supplied as ndarrays
696
+ their size must match `peaks`. ``None`` is interpreted as an open
697
+ border.
698
+
699
+ Returns
700
+ -------
701
+ keep : bool
702
+ A boolean mask evaluating to true where `peaks` fulfill the threshold
703
+ condition.
704
+ left_thresholds, right_thresholds : ndarray
705
+ Array matching `peak` containing the thresholds of each peak on
706
+ both sides.
707
+
708
+ Notes
709
+ -----
710
+
711
+ .. versionadded:: 1.1.0
712
+ """
713
+ # Stack thresholds on both sides to make min / max operations easier:
714
+ # tmin is compared with the smaller, and tmax with the greater threshold to
715
+ # each peak's side
716
+ stacked_thresholds = np.vstack([x[peaks] - x[peaks - 1],
717
+ x[peaks] - x[peaks + 1]])
718
+ keep = np.ones(peaks.size, dtype=bool)
719
+ if tmin is not None:
720
+ min_thresholds = np.min(stacked_thresholds, axis=0)
721
+ keep &= (tmin <= min_thresholds)
722
+ if tmax is not None:
723
+ max_thresholds = np.max(stacked_thresholds, axis=0)
724
+ keep &= (max_thresholds <= tmax)
725
+
726
+ return keep, stacked_thresholds[0], stacked_thresholds[1]
727
+
728
+
729
+ def find_peaks(x, height=None, threshold=None, distance=None,
730
+ prominence=None, width=None, wlen=None, rel_height=0.5,
731
+ plateau_size=None):
732
+ """
733
+ Find peaks inside a signal based on peak properties.
734
+
735
+ This function takes a 1-D array and finds all local maxima by
736
+ simple comparison of neighboring values. Optionally, a subset of these
737
+ peaks can be selected by specifying conditions for a peak's properties.
738
+
739
+ Parameters
740
+ ----------
741
+ x : sequence
742
+ A signal with peaks.
743
+ height : number or ndarray or sequence, optional
744
+ Required height of peaks. Either a number, ``None``, an array matching
745
+ `x` or a 2-element sequence of the former. The first element is
746
+ always interpreted as the minimal and the second, if supplied, as the
747
+ maximal required height.
748
+ threshold : number or ndarray or sequence, optional
749
+ Required threshold of peaks, the vertical distance to its neighboring
750
+ samples. Either a number, ``None``, an array matching `x` or a
751
+ 2-element sequence of the former. The first element is always
752
+ interpreted as the minimal and the second, if supplied, as the maximal
753
+ required threshold.
754
+ distance : number, optional
755
+ Required minimal horizontal distance (>= 1) in samples between
756
+ neighbouring peaks. Smaller peaks are removed first until the condition
757
+ is fulfilled for all remaining peaks.
758
+ prominence : number or ndarray or sequence, optional
759
+ Required prominence of peaks. Either a number, ``None``, an array
760
+ matching `x` or a 2-element sequence of the former. The first
761
+ element is always interpreted as the minimal and the second, if
762
+ supplied, as the maximal required prominence.
763
+ width : number or ndarray or sequence, optional
764
+ Required width of peaks in samples. Either a number, ``None``, an array
765
+ matching `x` or a 2-element sequence of the former. The first
766
+ element is always interpreted as the minimal and the second, if
767
+ supplied, as the maximal required width.
768
+ wlen : int, optional
769
+ Used for calculation of the peaks prominences, thus it is only used if
770
+ one of the arguments `prominence` or `width` is given. See argument
771
+ `wlen` in `peak_prominences` for a full description of its effects.
772
+ rel_height : float, optional
773
+ Used for calculation of the peaks width, thus it is only used if `width`
774
+ is given. See argument `rel_height` in `peak_widths` for a full
775
+ description of its effects.
776
+ plateau_size : number or ndarray or sequence, optional
777
+ Required size of the flat top of peaks in samples. Either a number,
778
+ ``None``, an array matching `x` or a 2-element sequence of the former.
779
+ The first element is always interpreted as the minimal and the second,
780
+ if supplied as the maximal required plateau size.
781
+
782
+ .. versionadded:: 1.2.0
783
+
784
+ Returns
785
+ -------
786
+ peaks : ndarray
787
+ Indices of peaks in `x` that satisfy all given conditions.
788
+ properties : dict
789
+ A dictionary containing properties of the returned peaks which were
790
+ calculated as intermediate results during evaluation of the specified
791
+ conditions:
792
+
793
+ * 'peak_heights'
794
+ If `height` is given, the height of each peak in `x`.
795
+ * 'left_thresholds', 'right_thresholds'
796
+ If `threshold` is given, these keys contain a peaks vertical
797
+ distance to its neighbouring samples.
798
+ * 'prominences', 'right_bases', 'left_bases'
799
+ If `prominence` is given, these keys are accessible. See
800
+ `peak_prominences` for a description of their content.
801
+ * 'widths', 'width_heights', 'left_ips', 'right_ips'
802
+ If `width` is given, these keys are accessible. See `peak_widths`
803
+ for a description of their content.
804
+ * 'plateau_sizes', left_edges', 'right_edges'
805
+ If `plateau_size` is given, these keys are accessible and contain
806
+ the indices of a peak's edges (edges are still part of the
807
+ plateau) and the calculated plateau sizes.
808
+
809
+ .. versionadded:: 1.2.0
810
+
811
+ To calculate and return properties without excluding peaks, provide the
812
+ open interval ``(None, None)`` as a value to the appropriate argument
813
+ (excluding `distance`).
814
+
815
+ Warns
816
+ -----
817
+ PeakPropertyWarning
818
+ Raised if a peak's properties have unexpected values (see
819
+ `peak_prominences` and `peak_widths`).
820
+
821
+ Warnings
822
+ --------
823
+ This function may return unexpected results for data containing NaNs. To
824
+ avoid this, NaNs should either be removed or replaced.
825
+
826
+ See Also
827
+ --------
828
+ find_peaks_cwt
829
+ Find peaks using the wavelet transformation.
830
+ peak_prominences
831
+ Directly calculate the prominence of peaks.
832
+ peak_widths
833
+ Directly calculate the width of peaks.
834
+
835
+ Notes
836
+ -----
837
+ In the context of this function, a peak or local maximum is defined as any
838
+ sample whose two direct neighbours have a smaller amplitude. For flat peaks
839
+ (more than one sample of equal amplitude wide) the index of the middle
840
+ sample is returned (rounded down in case the number of samples is even).
841
+ For noisy signals the peak locations can be off because the noise might
842
+ change the position of local maxima. In those cases consider smoothing the
843
+ signal before searching for peaks or use other peak finding and fitting
844
+ methods (like `find_peaks_cwt`).
845
+
846
+ Some additional comments on specifying conditions:
847
+
848
+ * Almost all conditions (excluding `distance`) can be given as half-open or
849
+ closed intervals, e.g., ``1`` or ``(1, None)`` defines the half-open
850
+ interval :math:`[1, \\infty]` while ``(None, 1)`` defines the interval
851
+ :math:`[-\\infty, 1]`. The open interval ``(None, None)`` can be specified
852
+ as well, which returns the matching properties without exclusion of peaks.
853
+ * The border is always included in the interval used to select valid peaks.
854
+ * For several conditions the interval borders can be specified with
855
+ arrays matching `x` in shape which enables dynamic constrains based on
856
+ the sample position.
857
+ * The conditions are evaluated in the following order: `plateau_size`,
858
+ `height`, `threshold`, `distance`, `prominence`, `width`. In most cases
859
+ this order is the fastest one because faster operations are applied first
860
+ to reduce the number of peaks that need to be evaluated later.
861
+ * While indices in `peaks` are guaranteed to be at least `distance` samples
862
+ apart, edges of flat peaks may be closer than the allowed `distance`.
863
+ * Use `wlen` to reduce the time it takes to evaluate the conditions for
864
+ `prominence` or `width` if `x` is large or has many local maxima
865
+ (see `peak_prominences`).
866
+
867
+ .. versionadded:: 1.1.0
868
+
869
+ Examples
870
+ --------
871
+ To demonstrate this function's usage we use a signal `x` supplied with
872
+ SciPy (see `scipy.datasets.electrocardiogram`). Let's find all peaks (local
873
+ maxima) in `x` whose amplitude lies above 0.
874
+
875
+ >>> import numpy as np
876
+ >>> import matplotlib.pyplot as plt
877
+ >>> from scipy.datasets import electrocardiogram
878
+ >>> from scipy.signal import find_peaks
879
+ >>> x = electrocardiogram()[2000:4000]
880
+ >>> peaks, _ = find_peaks(x, height=0)
881
+ >>> plt.plot(x)
882
+ >>> plt.plot(peaks, x[peaks], "x")
883
+ >>> plt.plot(np.zeros_like(x), "--", color="gray")
884
+ >>> plt.show()
885
+
886
+ We can select peaks below 0 with ``height=(None, 0)`` or use arrays matching
887
+ `x` in size to reflect a changing condition for different parts of the
888
+ signal.
889
+
890
+ >>> border = np.sin(np.linspace(0, 3 * np.pi, x.size))
891
+ >>> peaks, _ = find_peaks(x, height=(-border, border))
892
+ >>> plt.plot(x)
893
+ >>> plt.plot(-border, "--", color="gray")
894
+ >>> plt.plot(border, ":", color="gray")
895
+ >>> plt.plot(peaks, x[peaks], "x")
896
+ >>> plt.show()
897
+
898
+ Another useful condition for periodic signals can be given with the
899
+ `distance` argument. In this case, we can easily select the positions of
900
+ QRS complexes within the electrocardiogram (ECG) by demanding a distance of
901
+ at least 150 samples.
902
+
903
+ >>> peaks, _ = find_peaks(x, distance=150)
904
+ >>> np.diff(peaks)
905
+ array([186, 180, 177, 171, 177, 169, 167, 164, 158, 162, 172])
906
+ >>> plt.plot(x)
907
+ >>> plt.plot(peaks, x[peaks], "x")
908
+ >>> plt.show()
909
+
910
+ Especially for noisy signals peaks can be easily grouped by their
911
+ prominence (see `peak_prominences`). E.g., we can select all peaks except
912
+ for the mentioned QRS complexes by limiting the allowed prominence to 0.6.
913
+
914
+ >>> peaks, properties = find_peaks(x, prominence=(None, 0.6))
915
+ >>> properties["prominences"].max()
916
+ 0.5049999999999999
917
+ >>> plt.plot(x)
918
+ >>> plt.plot(peaks, x[peaks], "x")
919
+ >>> plt.show()
920
+
921
+ And, finally, let's examine a different section of the ECG which contains
922
+ beat forms of different shape. To select only the atypical heart beats, we
923
+ combine two conditions: a minimal prominence of 1 and width of at least 20
924
+ samples.
925
+
926
+ >>> x = electrocardiogram()[17000:18000]
927
+ >>> peaks, properties = find_peaks(x, prominence=1, width=20)
928
+ >>> properties["prominences"], properties["widths"]
929
+ (array([1.495, 2.3 ]), array([36.93773946, 39.32723577]))
930
+ >>> plt.plot(x)
931
+ >>> plt.plot(peaks, x[peaks], "x")
932
+ >>> plt.vlines(x=peaks, ymin=x[peaks] - properties["prominences"],
933
+ ... ymax = x[peaks], color = "C1")
934
+ >>> plt.hlines(y=properties["width_heights"], xmin=properties["left_ips"],
935
+ ... xmax=properties["right_ips"], color = "C1")
936
+ >>> plt.show()
937
+ """
938
+ # _argmaxima1d expects array of dtype 'float64'
939
+ x = _arg_x_as_expected(x)
940
+ if distance is not None and distance < 1:
941
+ raise ValueError('`distance` must be greater or equal to 1')
942
+
943
+ peaks, left_edges, right_edges = _local_maxima_1d(x)
944
+ properties = {}
945
+
946
+ if plateau_size is not None:
947
+ # Evaluate plateau size
948
+ plateau_sizes = right_edges - left_edges + 1
949
+ pmin, pmax = _unpack_condition_args(plateau_size, x, peaks)
950
+ keep = _select_by_property(plateau_sizes, pmin, pmax)
951
+ peaks = peaks[keep]
952
+ properties["plateau_sizes"] = plateau_sizes
953
+ properties["left_edges"] = left_edges
954
+ properties["right_edges"] = right_edges
955
+ properties = {key: array[keep] for key, array in properties.items()}
956
+
957
+ if height is not None:
958
+ # Evaluate height condition
959
+ peak_heights = x[peaks]
960
+ hmin, hmax = _unpack_condition_args(height, x, peaks)
961
+ keep = _select_by_property(peak_heights, hmin, hmax)
962
+ peaks = peaks[keep]
963
+ properties["peak_heights"] = peak_heights
964
+ properties = {key: array[keep] for key, array in properties.items()}
965
+
966
+ if threshold is not None:
967
+ # Evaluate threshold condition
968
+ tmin, tmax = _unpack_condition_args(threshold, x, peaks)
969
+ keep, left_thresholds, right_thresholds = _select_by_peak_threshold(
970
+ x, peaks, tmin, tmax)
971
+ peaks = peaks[keep]
972
+ properties["left_thresholds"] = left_thresholds
973
+ properties["right_thresholds"] = right_thresholds
974
+ properties = {key: array[keep] for key, array in properties.items()}
975
+
976
+ if distance is not None:
977
+ # Evaluate distance condition
978
+ keep = _select_by_peak_distance(peaks, x[peaks], distance)
979
+ peaks = peaks[keep]
980
+ properties = {key: array[keep] for key, array in properties.items()}
981
+
982
+ if prominence is not None or width is not None:
983
+ # Calculate prominence (required for both conditions)
984
+ wlen = _arg_wlen_as_expected(wlen)
985
+ properties.update(zip(
986
+ ['prominences', 'left_bases', 'right_bases'],
987
+ _peak_prominences(x, peaks, wlen=wlen)
988
+ ))
989
+
990
+ if prominence is not None:
991
+ # Evaluate prominence condition
992
+ pmin, pmax = _unpack_condition_args(prominence, x, peaks)
993
+ keep = _select_by_property(properties['prominences'], pmin, pmax)
994
+ peaks = peaks[keep]
995
+ properties = {key: array[keep] for key, array in properties.items()}
996
+
997
+ if width is not None:
998
+ # Calculate widths
999
+ properties.update(zip(
1000
+ ['widths', 'width_heights', 'left_ips', 'right_ips'],
1001
+ _peak_widths(x, peaks, rel_height, properties['prominences'],
1002
+ properties['left_bases'], properties['right_bases'])
1003
+ ))
1004
+ # Evaluate width condition
1005
+ wmin, wmax = _unpack_condition_args(width, x, peaks)
1006
+ keep = _select_by_property(properties['widths'], wmin, wmax)
1007
+ peaks = peaks[keep]
1008
+ properties = {key: array[keep] for key, array in properties.items()}
1009
+
1010
+ return peaks, properties
1011
+
1012
+
1013
+ def _identify_ridge_lines(matr, max_distances, gap_thresh):
1014
+ """
1015
+ Identify ridges in the 2-D matrix.
1016
+
1017
+ Expect that the width of the wavelet feature increases with increasing row
1018
+ number.
1019
+
1020
+ Parameters
1021
+ ----------
1022
+ matr : 2-D ndarray
1023
+ Matrix in which to identify ridge lines.
1024
+ max_distances : 1-D sequence
1025
+ At each row, a ridge line is only connected
1026
+ if the relative max at row[n] is within
1027
+ `max_distances`[n] from the relative max at row[n+1].
1028
+ gap_thresh : int
1029
+ If a relative maximum is not found within `max_distances`,
1030
+ there will be a gap. A ridge line is discontinued if
1031
+ there are more than `gap_thresh` points without connecting
1032
+ a new relative maximum.
1033
+
1034
+ Returns
1035
+ -------
1036
+ ridge_lines : tuple
1037
+ Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the
1038
+ ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none
1039
+ found. Each ridge-line will be sorted by row (increasing), but the
1040
+ order of the ridge lines is not specified.
1041
+
1042
+ References
1043
+ ----------
1044
+ .. [1] Bioinformatics (2006) 22 (17): 2059-2065.
1045
+ :doi:`10.1093/bioinformatics/btl355`
1046
+
1047
+ Examples
1048
+ --------
1049
+ >>> import numpy as np
1050
+ >>> from scipy.signal._peak_finding import _identify_ridge_lines
1051
+ >>> rng = np.random.default_rng()
1052
+ >>> data = rng.random((5,5))
1053
+ >>> max_dist = 3
1054
+ >>> max_distances = np.full(20, max_dist)
1055
+ >>> ridge_lines = _identify_ridge_lines(data, max_distances, 1)
1056
+
1057
+ Notes
1058
+ -----
1059
+ This function is intended to be used in conjunction with `cwt`
1060
+ as part of `find_peaks_cwt`.
1061
+
1062
+ """
1063
+ if len(max_distances) < matr.shape[0]:
1064
+ raise ValueError('Max_distances must have at least as many rows '
1065
+ 'as matr')
1066
+
1067
+ all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1)
1068
+ # Highest row for which there are any relative maxima
1069
+ has_relmax = np.nonzero(all_max_cols.any(axis=1))[0]
1070
+ if len(has_relmax) == 0:
1071
+ return []
1072
+ start_row = has_relmax[-1]
1073
+ # Each ridge line is a 3-tuple:
1074
+ # rows, cols,Gap number
1075
+ ridge_lines = [[[start_row],
1076
+ [col],
1077
+ 0] for col in np.nonzero(all_max_cols[start_row])[0]]
1078
+ final_lines = []
1079
+ rows = np.arange(start_row - 1, -1, -1)
1080
+ cols = np.arange(0, matr.shape[1])
1081
+ for row in rows:
1082
+ this_max_cols = cols[all_max_cols[row]]
1083
+
1084
+ # Increment gap number of each line,
1085
+ # set it to zero later if appropriate
1086
+ for line in ridge_lines:
1087
+ line[2] += 1
1088
+
1089
+ # XXX These should always be all_max_cols[row]
1090
+ # But the order might be different. Might be an efficiency gain
1091
+ # to make sure the order is the same and avoid this iteration
1092
+ prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines])
1093
+ # Look through every relative maximum found at current row
1094
+ # Attempt to connect them with existing ridge lines.
1095
+ for ind, col in enumerate(this_max_cols):
1096
+ # If there is a previous ridge line within
1097
+ # the max_distance to connect to, do so.
1098
+ # Otherwise start a new one.
1099
+ line = None
1100
+ if len(prev_ridge_cols) > 0:
1101
+ diffs = np.abs(col - prev_ridge_cols)
1102
+ closest = np.argmin(diffs)
1103
+ if diffs[closest] <= max_distances[row]:
1104
+ line = ridge_lines[closest]
1105
+ if line is not None:
1106
+ # Found a point close enough, extend current ridge line
1107
+ line[1].append(col)
1108
+ line[0].append(row)
1109
+ line[2] = 0
1110
+ else:
1111
+ new_line = [[row],
1112
+ [col],
1113
+ 0]
1114
+ ridge_lines.append(new_line)
1115
+
1116
+ # Remove the ridge lines with gap_number too high
1117
+ # XXX Modifying a list while iterating over it.
1118
+ # Should be safe, since we iterate backwards, but
1119
+ # still tacky.
1120
+ for ind in range(len(ridge_lines) - 1, -1, -1):
1121
+ line = ridge_lines[ind]
1122
+ if line[2] > gap_thresh:
1123
+ final_lines.append(line)
1124
+ del ridge_lines[ind]
1125
+
1126
+ out_lines = []
1127
+ for line in (final_lines + ridge_lines):
1128
+ sortargs = np.array(np.argsort(line[0]))
1129
+ rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs)
1130
+ rows[sortargs] = line[0]
1131
+ cols[sortargs] = line[1]
1132
+ out_lines.append([rows, cols])
1133
+
1134
+ return out_lines
1135
+
1136
+
1137
+ def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None,
1138
+ min_snr=1, noise_perc=10):
1139
+ """
1140
+ Filter ridge lines according to prescribed criteria. Intended
1141
+ to be used for finding relative maxima.
1142
+
1143
+ Parameters
1144
+ ----------
1145
+ cwt : 2-D ndarray
1146
+ Continuous wavelet transform from which the `ridge_lines` were defined.
1147
+ ridge_lines : 1-D sequence
1148
+ Each element should contain 2 sequences, the rows and columns
1149
+ of the ridge line (respectively).
1150
+ window_size : int, optional
1151
+ Size of window to use to calculate noise floor.
1152
+ Default is ``cwt.shape[1] / 20``.
1153
+ min_length : int, optional
1154
+ Minimum length a ridge line needs to be acceptable.
1155
+ Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
1156
+ min_snr : float, optional
1157
+ Minimum SNR ratio. Default 1. The signal is the value of
1158
+ the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
1159
+ noise is the `noise_perc`\\ th percentile of datapoints contained within a
1160
+ window of `window_size` around ``cwt[0, loc]``.
1161
+ noise_perc : float, optional
1162
+ When calculating the noise floor, percentile of data points
1163
+ examined below which to consider noise. Calculated using
1164
+ scipy.stats.scoreatpercentile.
1165
+
1166
+ References
1167
+ ----------
1168
+ .. [1] Bioinformatics (2006) 22 (17): 2059-2065.
1169
+ :doi:`10.1093/bioinformatics/btl355`
1170
+
1171
+ """
1172
+ num_points = cwt.shape[1]
1173
+ if min_length is None:
1174
+ min_length = np.ceil(cwt.shape[0] / 4)
1175
+ if window_size is None:
1176
+ window_size = np.ceil(num_points / 20)
1177
+
1178
+ window_size = int(window_size)
1179
+ hf_window, odd = divmod(window_size, 2)
1180
+
1181
+ # Filter based on SNR
1182
+ row_one = cwt[0, :]
1183
+ noises = np.empty_like(row_one)
1184
+ for ind, val in enumerate(row_one):
1185
+ window_start = max(ind - hf_window, 0)
1186
+ window_end = min(ind + hf_window + odd, num_points)
1187
+ noises[ind] = scoreatpercentile(row_one[window_start:window_end],
1188
+ per=noise_perc)
1189
+
1190
+ def filt_func(line):
1191
+ if len(line[0]) < min_length:
1192
+ return False
1193
+ snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
1194
+ if snr < min_snr:
1195
+ return False
1196
+ return True
1197
+
1198
+ return list(filter(filt_func, ridge_lines))
1199
+
1200
+
1201
+ def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None,
1202
+ gap_thresh=None, min_length=None,
1203
+ min_snr=1, noise_perc=10, window_size=None):
1204
+ """
1205
+ Find peaks in a 1-D array with wavelet transformation.
1206
+
1207
+ The general approach is to smooth `vector` by convolving it with
1208
+ `wavelet(width)` for each width in `widths`. Relative maxima which
1209
+ appear at enough length scales, and with sufficiently high SNR, are
1210
+ accepted.
1211
+
1212
+ Parameters
1213
+ ----------
1214
+ vector : ndarray
1215
+ 1-D array in which to find the peaks.
1216
+ widths : float or sequence
1217
+ Single width or 1-D array-like of widths to use for calculating
1218
+ the CWT matrix. In general,
1219
+ this range should cover the expected width of peaks of interest.
1220
+ wavelet : callable, optional
1221
+ Should take two parameters and return a 1-D array to convolve
1222
+ with `vector`. The first parameter determines the number of points
1223
+ of the returned wavelet array, the second parameter is the scale
1224
+ (`width`) of the wavelet. Should be normalized and symmetric.
1225
+ Default is the ricker wavelet.
1226
+ max_distances : ndarray, optional
1227
+ At each row, a ridge line is only connected if the relative max at
1228
+ row[n] is within ``max_distances[n]`` from the relative max at
1229
+ ``row[n+1]``. Default value is ``widths/4``.
1230
+ gap_thresh : float, optional
1231
+ If a relative maximum is not found within `max_distances`,
1232
+ there will be a gap. A ridge line is discontinued if there are more
1233
+ than `gap_thresh` points without connecting a new relative maximum.
1234
+ Default is the first value of the widths array i.e. widths[0].
1235
+ min_length : int, optional
1236
+ Minimum length a ridge line needs to be acceptable.
1237
+ Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
1238
+ min_snr : float, optional
1239
+ Minimum SNR ratio. Default 1. The signal is the maximum CWT coefficient
1240
+ on the largest ridge line. The noise is `noise_perc` th percentile of
1241
+ datapoints contained within the same ridge line.
1242
+ noise_perc : float, optional
1243
+ When calculating the noise floor, percentile of data points
1244
+ examined below which to consider noise. Calculated using
1245
+ `stats.scoreatpercentile`. Default is 10.
1246
+ window_size : int, optional
1247
+ Size of window to use to calculate noise floor.
1248
+ Default is ``cwt.shape[1] / 20``.
1249
+
1250
+ Returns
1251
+ -------
1252
+ peaks_indices : ndarray
1253
+ Indices of the locations in the `vector` where peaks were found.
1254
+ The list is sorted.
1255
+
1256
+ See Also
1257
+ --------
1258
+ find_peaks
1259
+ Find peaks inside a signal based on peak properties.
1260
+
1261
+ Notes
1262
+ -----
1263
+ This approach was designed for finding sharp peaks among noisy data,
1264
+ however with proper parameter selection it should function well for
1265
+ different peak shapes.
1266
+
1267
+ The algorithm is as follows:
1268
+ 1. Perform a continuous wavelet transform on `vector`, for the supplied
1269
+ `widths`. This is a convolution of `vector` with `wavelet(width)` for
1270
+ each width in `widths`. See `cwt`.
1271
+ 2. Identify "ridge lines" in the cwt matrix. These are relative maxima
1272
+ at each row, connected across adjacent rows. See identify_ridge_lines
1273
+ 3. Filter the ridge_lines using filter_ridge_lines.
1274
+
1275
+ .. versionadded:: 0.11.0
1276
+
1277
+ References
1278
+ ----------
1279
+ .. [1] Bioinformatics (2006) 22 (17): 2059-2065.
1280
+ :doi:`10.1093/bioinformatics/btl355`
1281
+
1282
+ Examples
1283
+ --------
1284
+ >>> import numpy as np
1285
+ >>> from scipy import signal
1286
+ >>> xs = np.arange(0, np.pi, 0.05)
1287
+ >>> data = np.sin(xs)
1288
+ >>> peakind = signal.find_peaks_cwt(data, np.arange(1,10))
1289
+ >>> peakind, xs[peakind], data[peakind]
1290
+ ([32], array([ 1.6]), array([ 0.9995736]))
1291
+
1292
+ """
1293
+ widths = np.atleast_1d(np.asarray(widths))
1294
+
1295
+ if gap_thresh is None:
1296
+ gap_thresh = np.ceil(widths[0])
1297
+ if max_distances is None:
1298
+ max_distances = widths / 4.0
1299
+ if wavelet is None:
1300
+ wavelet = _ricker
1301
+
1302
+ cwt_dat = _cwt(vector, wavelet, widths)
1303
+ ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh)
1304
+ filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length,
1305
+ window_size=window_size, min_snr=min_snr,
1306
+ noise_perc=noise_perc)
1307
+ max_locs = np.asarray([x[1][0] for x in filtered])
1308
+ max_locs.sort()
1309
+
1310
+ return max_locs
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_savitzky_golay.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.linalg import lstsq
3
+ from scipy._lib._util import float_factorial
4
+ from scipy.ndimage import convolve1d # type: ignore[attr-defined]
5
+ from ._arraytools import axis_slice
6
+
7
+
8
+ def savgol_coeffs(window_length, polyorder, deriv=0, delta=1.0, pos=None,
9
+ use="conv"):
10
+ """Compute the coefficients for a 1-D Savitzky-Golay FIR filter.
11
+
12
+ Parameters
13
+ ----------
14
+ window_length : int
15
+ The length of the filter window (i.e., the number of coefficients).
16
+ polyorder : int
17
+ The order of the polynomial used to fit the samples.
18
+ `polyorder` must be less than `window_length`.
19
+ deriv : int, optional
20
+ The order of the derivative to compute. This must be a
21
+ nonnegative integer. The default is 0, which means to filter
22
+ the data without differentiating.
23
+ delta : float, optional
24
+ The spacing of the samples to which the filter will be applied.
25
+ This is only used if deriv > 0.
26
+ pos : int or None, optional
27
+ If pos is not None, it specifies evaluation position within the
28
+ window. The default is the middle of the window.
29
+ use : str, optional
30
+ Either 'conv' or 'dot'. This argument chooses the order of the
31
+ coefficients. The default is 'conv', which means that the
32
+ coefficients are ordered to be used in a convolution. With
33
+ use='dot', the order is reversed, so the filter is applied by
34
+ dotting the coefficients with the data set.
35
+
36
+ Returns
37
+ -------
38
+ coeffs : 1-D ndarray
39
+ The filter coefficients.
40
+
41
+ See Also
42
+ --------
43
+ savgol_filter
44
+
45
+ Notes
46
+ -----
47
+ .. versionadded:: 0.14.0
48
+
49
+ References
50
+ ----------
51
+ A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by
52
+ Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8),
53
+ pp 1627-1639.
54
+ Jianwen Luo, Kui Ying, and Jing Bai. 2005. Savitzky-Golay smoothing and
55
+ differentiation filter for even number data. Signal Process.
56
+ 85, 7 (July 2005), 1429-1434.
57
+
58
+ Examples
59
+ --------
60
+ >>> import numpy as np
61
+ >>> from scipy.signal import savgol_coeffs
62
+ >>> savgol_coeffs(5, 2)
63
+ array([-0.08571429, 0.34285714, 0.48571429, 0.34285714, -0.08571429])
64
+ >>> savgol_coeffs(5, 2, deriv=1)
65
+ array([ 2.00000000e-01, 1.00000000e-01, 2.07548111e-16, -1.00000000e-01,
66
+ -2.00000000e-01])
67
+
68
+ Note that use='dot' simply reverses the coefficients.
69
+
70
+ >>> savgol_coeffs(5, 2, pos=3)
71
+ array([ 0.25714286, 0.37142857, 0.34285714, 0.17142857, -0.14285714])
72
+ >>> savgol_coeffs(5, 2, pos=3, use='dot')
73
+ array([-0.14285714, 0.17142857, 0.34285714, 0.37142857, 0.25714286])
74
+ >>> savgol_coeffs(4, 2, pos=3, deriv=1, use='dot')
75
+ array([0.45, -0.85, -0.65, 1.05])
76
+
77
+ `x` contains data from the parabola x = t**2, sampled at
78
+ t = -1, 0, 1, 2, 3. `c` holds the coefficients that will compute the
79
+ derivative at the last position. When dotted with `x` the result should
80
+ be 6.
81
+
82
+ >>> x = np.array([1, 0, 1, 4, 9])
83
+ >>> c = savgol_coeffs(5, 2, pos=4, deriv=1, use='dot')
84
+ >>> c.dot(x)
85
+ 6.0
86
+ """
87
+
88
+ # An alternative method for finding the coefficients when deriv=0 is
89
+ # t = np.arange(window_length)
90
+ # unit = (t == pos).astype(int)
91
+ # coeffs = np.polyval(np.polyfit(t, unit, polyorder), t)
92
+ # The method implemented here is faster.
93
+
94
+ # To recreate the table of sample coefficients shown in the chapter on
95
+ # the Savitzy-Golay filter in the Numerical Recipes book, use
96
+ # window_length = nL + nR + 1
97
+ # pos = nL + 1
98
+ # c = savgol_coeffs(window_length, M, pos=pos, use='dot')
99
+
100
+ if polyorder >= window_length:
101
+ raise ValueError("polyorder must be less than window_length.")
102
+
103
+ halflen, rem = divmod(window_length, 2)
104
+
105
+ if pos is None:
106
+ if rem == 0:
107
+ pos = halflen - 0.5
108
+ else:
109
+ pos = halflen
110
+
111
+ if not (0 <= pos < window_length):
112
+ raise ValueError("pos must be nonnegative and less than "
113
+ "window_length.")
114
+
115
+ if use not in ['conv', 'dot']:
116
+ raise ValueError("`use` must be 'conv' or 'dot'")
117
+
118
+ if deriv > polyorder:
119
+ coeffs = np.zeros(window_length)
120
+ return coeffs
121
+
122
+ # Form the design matrix A. The columns of A are powers of the integers
123
+ # from -pos to window_length - pos - 1. The powers (i.e., rows) range
124
+ # from 0 to polyorder. (That is, A is a vandermonde matrix, but not
125
+ # necessarily square.)
126
+ x = np.arange(-pos, window_length - pos, dtype=float)
127
+
128
+ if use == "conv":
129
+ # Reverse so that result can be used in a convolution.
130
+ x = x[::-1]
131
+
132
+ order = np.arange(polyorder + 1).reshape(-1, 1)
133
+ A = x ** order
134
+
135
+ # y determines which order derivative is returned.
136
+ y = np.zeros(polyorder + 1)
137
+ # The coefficient assigned to y[deriv] scales the result to take into
138
+ # account the order of the derivative and the sample spacing.
139
+ y[deriv] = float_factorial(deriv) / (delta ** deriv)
140
+
141
+ # Find the least-squares solution of A*c = y
142
+ coeffs, _, _, _ = lstsq(A, y)
143
+
144
+ return coeffs
145
+
146
+
147
+ def _polyder(p, m):
148
+ """Differentiate polynomials represented with coefficients.
149
+
150
+ p must be a 1-D or 2-D array. In the 2-D case, each column gives
151
+ the coefficients of a polynomial; the first row holds the coefficients
152
+ associated with the highest power. m must be a nonnegative integer.
153
+ (numpy.polyder doesn't handle the 2-D case.)
154
+ """
155
+
156
+ if m == 0:
157
+ result = p
158
+ else:
159
+ n = len(p)
160
+ if n <= m:
161
+ result = np.zeros_like(p[:1, ...])
162
+ else:
163
+ dp = p[:-m].copy()
164
+ for k in range(m):
165
+ rng = np.arange(n - k - 1, m - k - 1, -1)
166
+ dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1))
167
+ result = dp
168
+ return result
169
+
170
+
171
+ def _fit_edge(x, window_start, window_stop, interp_start, interp_stop,
172
+ axis, polyorder, deriv, delta, y):
173
+ """
174
+ Given an N-d array `x` and the specification of a slice of `x` from
175
+ `window_start` to `window_stop` along `axis`, create an interpolating
176
+ polynomial of each 1-D slice, and evaluate that polynomial in the slice
177
+ from `interp_start` to `interp_stop`. Put the result into the
178
+ corresponding slice of `y`.
179
+ """
180
+
181
+ # Get the edge into a (window_length, -1) array.
182
+ x_edge = axis_slice(x, start=window_start, stop=window_stop, axis=axis)
183
+ if axis == 0 or axis == -x.ndim:
184
+ xx_edge = x_edge
185
+ swapped = False
186
+ else:
187
+ xx_edge = x_edge.swapaxes(axis, 0)
188
+ swapped = True
189
+ xx_edge = xx_edge.reshape(xx_edge.shape[0], -1)
190
+
191
+ # Fit the edges. poly_coeffs has shape (polyorder + 1, -1),
192
+ # where '-1' is the same as in xx_edge.
193
+ poly_coeffs = np.polyfit(np.arange(0, window_stop - window_start),
194
+ xx_edge, polyorder)
195
+
196
+ if deriv > 0:
197
+ poly_coeffs = _polyder(poly_coeffs, deriv)
198
+
199
+ # Compute the interpolated values for the edge.
200
+ i = np.arange(interp_start - window_start, interp_stop - window_start)
201
+ values = np.polyval(poly_coeffs, i.reshape(-1, 1)) / (delta ** deriv)
202
+
203
+ # Now put the values into the appropriate slice of y.
204
+ # First reshape values to match y.
205
+ shp = list(y.shape)
206
+ shp[0], shp[axis] = shp[axis], shp[0]
207
+ values = values.reshape(interp_stop - interp_start, *shp[1:])
208
+ if swapped:
209
+ values = values.swapaxes(0, axis)
210
+ # Get a view of the data to be replaced by values.
211
+ y_edge = axis_slice(y, start=interp_start, stop=interp_stop, axis=axis)
212
+ y_edge[...] = values
213
+
214
+
215
+ def _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y):
216
+ """
217
+ Use polynomial interpolation of x at the low and high ends of the axis
218
+ to fill in the halflen values in y.
219
+
220
+ This function just calls _fit_edge twice, once for each end of the axis.
221
+ """
222
+ halflen = window_length // 2
223
+ _fit_edge(x, 0, window_length, 0, halflen, axis,
224
+ polyorder, deriv, delta, y)
225
+ n = x.shape[axis]
226
+ _fit_edge(x, n - window_length, n, n - halflen, n, axis,
227
+ polyorder, deriv, delta, y)
228
+
229
+
230
+ def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0,
231
+ axis=-1, mode='interp', cval=0.0):
232
+ """ Apply a Savitzky-Golay filter to an array.
233
+
234
+ This is a 1-D filter. If `x` has dimension greater than 1, `axis`
235
+ determines the axis along which the filter is applied.
236
+
237
+ Parameters
238
+ ----------
239
+ x : array_like
240
+ The data to be filtered. If `x` is not a single or double precision
241
+ floating point array, it will be converted to type ``numpy.float64``
242
+ before filtering.
243
+ window_length : int
244
+ The length of the filter window (i.e., the number of coefficients).
245
+ If `mode` is 'interp', `window_length` must be less than or equal
246
+ to the size of `x`.
247
+ polyorder : int
248
+ The order of the polynomial used to fit the samples.
249
+ `polyorder` must be less than `window_length`.
250
+ deriv : int, optional
251
+ The order of the derivative to compute. This must be a
252
+ nonnegative integer. The default is 0, which means to filter
253
+ the data without differentiating.
254
+ delta : float, optional
255
+ The spacing of the samples to which the filter will be applied.
256
+ This is only used if deriv > 0. Default is 1.0.
257
+ axis : int, optional
258
+ The axis of the array `x` along which the filter is to be applied.
259
+ Default is -1.
260
+ mode : str, optional
261
+ Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'. This
262
+ determines the type of extension to use for the padded signal to
263
+ which the filter is applied. When `mode` is 'constant', the padding
264
+ value is given by `cval`. See the Notes for more details on 'mirror',
265
+ 'constant', 'wrap', and 'nearest'.
266
+ When the 'interp' mode is selected (the default), no extension
267
+ is used. Instead, a degree `polyorder` polynomial is fit to the
268
+ last `window_length` values of the edges, and this polynomial is
269
+ used to evaluate the last `window_length // 2` output values.
270
+ cval : scalar, optional
271
+ Value to fill past the edges of the input if `mode` is 'constant'.
272
+ Default is 0.0.
273
+
274
+ Returns
275
+ -------
276
+ y : ndarray, same shape as `x`
277
+ The filtered data.
278
+
279
+ See Also
280
+ --------
281
+ savgol_coeffs
282
+
283
+ Notes
284
+ -----
285
+ Details on the `mode` options:
286
+
287
+ 'mirror':
288
+ Repeats the values at the edges in reverse order. The value
289
+ closest to the edge is not included.
290
+ 'nearest':
291
+ The extension contains the nearest input value.
292
+ 'constant':
293
+ The extension contains the value given by the `cval` argument.
294
+ 'wrap':
295
+ The extension contains the values from the other end of the array.
296
+
297
+ For example, if the input is [1, 2, 3, 4, 5, 6, 7, 8], and
298
+ `window_length` is 7, the following shows the extended data for
299
+ the various `mode` options (assuming `cval` is 0)::
300
+
301
+ mode | Ext | Input | Ext
302
+ -----------+---------+------------------------+---------
303
+ 'mirror' | 4 3 2 | 1 2 3 4 5 6 7 8 | 7 6 5
304
+ 'nearest' | 1 1 1 | 1 2 3 4 5 6 7 8 | 8 8 8
305
+ 'constant' | 0 0 0 | 1 2 3 4 5 6 7 8 | 0 0 0
306
+ 'wrap' | 6 7 8 | 1 2 3 4 5 6 7 8 | 1 2 3
307
+
308
+ .. versionadded:: 0.14.0
309
+
310
+ Examples
311
+ --------
312
+ >>> import numpy as np
313
+ >>> from scipy.signal import savgol_filter
314
+ >>> np.set_printoptions(precision=2) # For compact display.
315
+ >>> x = np.array([2, 2, 5, 2, 1, 0, 1, 4, 9])
316
+
317
+ Filter with a window length of 5 and a degree 2 polynomial. Use
318
+ the defaults for all other parameters.
319
+
320
+ >>> savgol_filter(x, 5, 2)
321
+ array([1.66, 3.17, 3.54, 2.86, 0.66, 0.17, 1. , 4. , 9. ])
322
+
323
+ Note that the last five values in x are samples of a parabola, so
324
+ when mode='interp' (the default) is used with polyorder=2, the last
325
+ three values are unchanged. Compare that to, for example,
326
+ `mode='nearest'`:
327
+
328
+ >>> savgol_filter(x, 5, 2, mode='nearest')
329
+ array([1.74, 3.03, 3.54, 2.86, 0.66, 0.17, 1. , 4.6 , 7.97])
330
+
331
+ """
332
+ if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]:
333
+ raise ValueError("mode must be 'mirror', 'constant', 'nearest' "
334
+ "'wrap' or 'interp'.")
335
+
336
+ x = np.asarray(x)
337
+ # Ensure that x is either single or double precision floating point.
338
+ if x.dtype != np.float64 and x.dtype != np.float32:
339
+ x = x.astype(np.float64)
340
+
341
+ coeffs = savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta)
342
+
343
+ if mode == "interp":
344
+ if window_length > x.shape[axis]:
345
+ raise ValueError("If mode is 'interp', window_length must be less "
346
+ "than or equal to the size of x.")
347
+
348
+ # Do not pad. Instead, for the elements within `window_length // 2`
349
+ # of the ends of the sequence, use the polynomial that is fitted to
350
+ # the last `window_length` elements.
351
+ y = convolve1d(x, coeffs, axis=axis, mode="constant")
352
+ _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y)
353
+ else:
354
+ # Any mode other than 'interp' is passed on to ndimage.convolve1d.
355
+ y = convolve1d(x, coeffs, axis=axis, mode=mode, cval=cval)
356
+
357
+ return y
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_short_time_fft.py ADDED
@@ -0,0 +1,1710 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Implementation of an FFT-based Short-time Fourier Transform. """
2
+
3
+ # Implementation Notes for this file (as of 2023-07)
4
+ # --------------------------------------------------
5
+ # * MyPy version 1.1.1 does not seem to support decorated property methods
6
+ # properly. Hence, applying ``@property`` to methods decorated with `@cache``
7
+ # (as tried with the ``lower_border_end`` method) causes a mypy error when
8
+ # accessing it as an index (e.g., ``SFT.lower_border_end[0]``).
9
+ # * Since the method `stft` and `istft` have identical names as the legacy
10
+ # functions in the signal module, referencing them as HTML link in the
11
+ # docstrings has to be done by an explicit `~ShortTimeFFT.stft` instead of an
12
+ # ambiguous `stft` (The ``~`` hides the class / module name).
13
+ # * The HTML documentation currently renders each method/property on a separate
14
+ # page without reference to the parent class. Thus, a link to `ShortTimeFFT`
15
+ # was added to the "See Also" section of each method/property. These links
16
+ # can be removed, when SciPy updates ``pydata-sphinx-theme`` to >= 0.13.3
17
+ # (currently 0.9). Consult Issue 18512 and PR 16660 for further details.
18
+ #
19
+
20
+ # Provides typing union operator ``|`` in Python 3.9:
21
+ # Linter does not allow to import ``Generator`` from ``typing`` module:
22
+ from collections.abc import Generator, Callable
23
+ from functools import cache, lru_cache, partial
24
+ from typing import get_args, Literal
25
+
26
+ import numpy as np
27
+
28
+ import scipy.fft as fft_lib
29
+ from scipy.signal import detrend
30
+ from scipy.signal.windows import get_window
31
+
32
+ __all__ = ['ShortTimeFFT']
33
+
34
+
35
+ #: Allowed values for parameter `padding` of method `ShortTimeFFT.stft()`:
36
+ PAD_TYPE = Literal['zeros', 'edge', 'even', 'odd']
37
+
38
+ #: Allowed values for property `ShortTimeFFT.fft_mode`:
39
+ FFT_MODE_TYPE = Literal['twosided', 'centered', 'onesided', 'onesided2X']
40
+
41
+
42
+ def _calc_dual_canonical_window(win: np.ndarray, hop: int) -> np.ndarray:
43
+ """Calculate canonical dual window for 1d window `win` and a time step
44
+ of `hop` samples.
45
+
46
+ A ``ValueError`` is raised, if the inversion fails.
47
+
48
+ This is a separate function not a method, since it is also used in the
49
+ class method ``ShortTimeFFT.from_dual()``.
50
+ """
51
+ if hop > len(win):
52
+ raise ValueError(f"{hop=} is larger than window length of {len(win)}" +
53
+ " => STFT not invertible!")
54
+ if issubclass(win.dtype.type, np.integer):
55
+ raise ValueError("Parameter 'win' cannot be of integer type, but " +
56
+ f"{win.dtype=} => STFT not invertible!")
57
+ # The calculation of `relative_resolution` does not work for ints.
58
+ # Furthermore, `win / DD` casts the integers away, thus an implicit
59
+ # cast is avoided, which can always cause confusion when using 32-Bit
60
+ # floats.
61
+
62
+ w2 = win.real**2 + win.imag**2 # win*win.conj() does not ensure w2 is real
63
+ DD = w2.copy()
64
+ for k_ in range(hop, len(win), hop):
65
+ DD[k_:] += w2[:-k_]
66
+ DD[:-k_] += w2[k_:]
67
+
68
+ # check DD > 0:
69
+ relative_resolution = np.finfo(win.dtype).resolution * max(DD)
70
+ if not np.all(DD >= relative_resolution):
71
+ raise ValueError("Short-time Fourier Transform not invertible!")
72
+
73
+ return win / DD
74
+
75
+
76
+ # noinspection PyShadowingNames
77
+ class ShortTimeFFT:
78
+ r"""Provide a parametrized discrete Short-time Fourier transform (stft)
79
+ and its inverse (istft).
80
+
81
+ .. currentmodule:: scipy.signal.ShortTimeFFT
82
+
83
+ The `~ShortTimeFFT.stft` calculates sequential FFTs by sliding a
84
+ window (`win`) over an input signal by `hop` increments. It can be used to
85
+ quantify the change of the spectrum over time.
86
+
87
+ The `~ShortTimeFFT.stft` is represented by a complex-valued matrix S[q,p]
88
+ where the p-th column represents an FFT with the window centered at the
89
+ time t[p] = p * `delta_t` = p * `hop` * `T` where `T` is the sampling
90
+ interval of the input signal. The q-th row represents the values at the
91
+ frequency f[q] = q * `delta_f` with `delta_f` = 1 / (`mfft` * `T`) being
92
+ the bin width of the FFT.
93
+
94
+ The inverse STFT `~ShortTimeFFT.istft` is calculated by reversing the steps
95
+ of the STFT: Take the IFFT of the p-th slice of S[q,p] and multiply the
96
+ result with the so-called dual window (see `dual_win`). Shift the result by
97
+ p * `delta_t` and add the result to previous shifted results to reconstruct
98
+ the signal. If only the dual window is known and the STFT is invertible,
99
+ `from_dual` can be used to instantiate this class.
100
+
101
+ Due to the convention of time t = 0 being at the first sample of the input
102
+ signal, the STFT values typically have negative time slots. Hence,
103
+ negative indexes like `p_min` or `k_min` do not indicate counting
104
+ backwards from an array's end like in standard Python indexing but being
105
+ left of t = 0.
106
+
107
+ More detailed information can be found in the :ref:`tutorial_stft` section
108
+ of the :ref:`user_guide`.
109
+
110
+ Note that all parameters of the initializer, except `scale_to` (which uses
111
+ `scaling`) have identical named attributes.
112
+
113
+ Parameters
114
+ ----------
115
+ win : np.ndarray
116
+ The window must be a real- or complex-valued 1d array.
117
+ hop : int
118
+ The increment in samples, by which the window is shifted in each step.
119
+ fs : float
120
+ Sampling frequency of input signal and window. Its relation to the
121
+ sampling interval `T` is ``T = 1 / fs``.
122
+ fft_mode : 'twosided', 'centered', 'onesided', 'onesided2X'
123
+ Mode of FFT to be used (default 'onesided').
124
+ See property `fft_mode` for details.
125
+ mfft: int | None
126
+ Length of the FFT used, if a zero padded FFT is desired.
127
+ If ``None`` (default), the length of the window `win` is used.
128
+ dual_win : np.ndarray | None
129
+ The dual window of `win`. If set to ``None``, it is calculated if
130
+ needed.
131
+ scale_to : 'magnitude', 'psd' | None
132
+ If not ``None`` (default) the window function is scaled, so each STFT
133
+ column represents either a 'magnitude' or a power spectral density
134
+ ('psd') spectrum. This parameter sets the property `scaling` to the
135
+ same value. See method `scale_to` for details.
136
+ phase_shift : int | None
137
+ If set, add a linear phase `phase_shift` / `mfft` * `f` to each
138
+ frequency `f`. The default value 0 ensures that there is no phase shift
139
+ on the zeroth slice (in which t=0 is centered). See property
140
+ `phase_shift` for more details.
141
+
142
+ Examples
143
+ --------
144
+ The following example shows the magnitude of the STFT of a sine with
145
+ varying frequency :math:`f_i(t)` (marked by a red dashed line in the plot):
146
+
147
+ >>> import numpy as np
148
+ >>> import matplotlib.pyplot as plt
149
+ >>> from scipy.signal import ShortTimeFFT
150
+ >>> from scipy.signal.windows import gaussian
151
+ ...
152
+ >>> T_x, N = 1 / 20, 1000 # 20 Hz sampling rate for 50 s signal
153
+ >>> t_x = np.arange(N) * T_x # time indexes for signal
154
+ >>> f_i = 1 * np.arctan((t_x - t_x[N // 2]) / 2) + 5 # varying frequency
155
+ >>> x = np.sin(2*np.pi*np.cumsum(f_i)*T_x) # the signal
156
+
157
+ The utilized Gaussian window is 50 samples or 2.5 s long. The parameter
158
+ ``mfft=200`` in `ShortTimeFFT` causes the spectrum to be oversampled
159
+ by a factor of 4:
160
+
161
+ >>> g_std = 8 # standard deviation for Gaussian window in samples
162
+ >>> w = gaussian(50, std=g_std, sym=True) # symmetric Gaussian window
163
+ >>> SFT = ShortTimeFFT(w, hop=10, fs=1/T_x, mfft=200, scale_to='magnitude')
164
+ >>> Sx = SFT.stft(x) # perform the STFT
165
+
166
+ In the plot, the time extent of the signal `x` is marked by vertical dashed
167
+ lines. Note that the SFT produces values outside the time range of `x`. The
168
+ shaded areas on the left and the right indicate border effects caused
169
+ by the window slices in that area not fully being inside time range of
170
+ `x`:
171
+
172
+ >>> fig1, ax1 = plt.subplots(figsize=(6., 4.)) # enlarge plot a bit
173
+ >>> t_lo, t_hi = SFT.extent(N)[:2] # time range of plot
174
+ >>> ax1.set_title(rf"STFT ({SFT.m_num*SFT.T:g}$\,s$ Gaussian window, " +
175
+ ... rf"$\sigma_t={g_std*SFT.T}\,$s)")
176
+ >>> ax1.set(xlabel=f"Time $t$ in seconds ({SFT.p_num(N)} slices, " +
177
+ ... rf"$\Delta t = {SFT.delta_t:g}\,$s)",
178
+ ... ylabel=f"Freq. $f$ in Hz ({SFT.f_pts} bins, " +
179
+ ... rf"$\Delta f = {SFT.delta_f:g}\,$Hz)",
180
+ ... xlim=(t_lo, t_hi))
181
+ ...
182
+ >>> im1 = ax1.imshow(abs(Sx), origin='lower', aspect='auto',
183
+ ... extent=SFT.extent(N), cmap='viridis')
184
+ >>> ax1.plot(t_x, f_i, 'r--', alpha=.5, label='$f_i(t)$')
185
+ >>> fig1.colorbar(im1, label="Magnitude $|S_x(t, f)|$")
186
+ ...
187
+ >>> # Shade areas where window slices stick out to the side:
188
+ >>> for t0_, t1_ in [(t_lo, SFT.lower_border_end[0] * SFT.T),
189
+ ... (SFT.upper_border_begin(N)[0] * SFT.T, t_hi)]:
190
+ ... ax1.axvspan(t0_, t1_, color='w', linewidth=0, alpha=.2)
191
+ >>> for t_ in [0, N * SFT.T]: # mark signal borders with vertical line:
192
+ ... ax1.axvline(t_, color='y', linestyle='--', alpha=0.5)
193
+ >>> ax1.legend()
194
+ >>> fig1.tight_layout()
195
+ >>> plt.show()
196
+
197
+ Reconstructing the signal with the `~ShortTimeFFT.istft` is
198
+ straightforward, but note that the length of `x1` should be specified,
199
+ since the SFT length increases in `hop` steps:
200
+
201
+ >>> SFT.invertible # check if invertible
202
+ True
203
+ >>> x1 = SFT.istft(Sx, k1=N)
204
+ >>> np.allclose(x, x1)
205
+ True
206
+
207
+ It is possible to calculate the SFT of signal parts:
208
+
209
+ >>> p_q = SFT.nearest_k_p(N // 2)
210
+ >>> Sx0 = SFT.stft(x[:p_q])
211
+ >>> Sx1 = SFT.stft(x[p_q:])
212
+
213
+ When assembling sequential STFT parts together, the overlap needs to be
214
+ considered:
215
+
216
+ >>> p0_ub = SFT.upper_border_begin(p_q)[1] - SFT.p_min
217
+ >>> p1_le = SFT.lower_border_end[1] - SFT.p_min
218
+ >>> Sx01 = np.hstack((Sx0[:, :p0_ub],
219
+ ... Sx0[:, p0_ub:] + Sx1[:, :p1_le],
220
+ ... Sx1[:, p1_le:]))
221
+ >>> np.allclose(Sx01, Sx) # Compare with SFT of complete signal
222
+ True
223
+
224
+ It is also possible to calculate the `itsft` for signal parts:
225
+
226
+ >>> y_p = SFT.istft(Sx, N//3, N//2)
227
+ >>> np.allclose(y_p, x[N//3:N//2])
228
+ True
229
+
230
+ """
231
+ # immutable attributes (only have getters but no setters):
232
+ _win: np.ndarray # window
233
+ _dual_win: np.ndarray | None = None # canonical dual window
234
+ _hop: int # Step of STFT in number of samples
235
+
236
+ # mutable attributes:
237
+ _fs: float # sampling frequency of input signal and window
238
+ _fft_mode: FFT_MODE_TYPE = 'onesided' # Mode of FFT to use
239
+ _mfft: int # length of FFT used - defaults to len(win)
240
+ _scaling: Literal['magnitude', 'psd'] | None = None # Scaling of _win
241
+ _phase_shift: int | None # amount to shift phase of FFT in samples
242
+
243
+ # attributes for caching calculated values:
244
+ _fac_mag: float | None = None
245
+ _fac_psd: float | None = None
246
+ _lower_border_end: tuple[int, int] | None = None
247
+
248
+ def __init__(self, win: np.ndarray, hop: int, fs: float, *,
249
+ fft_mode: FFT_MODE_TYPE = 'onesided',
250
+ mfft: int | None = None,
251
+ dual_win: np.ndarray | None = None,
252
+ scale_to: Literal['magnitude', 'psd'] | None = None,
253
+ phase_shift: int | None = 0):
254
+ if not (win.ndim == 1 and win.size > 0):
255
+ raise ValueError(f"Parameter win must be 1d, but {win.shape=}!")
256
+ if not all(np.isfinite(win)):
257
+ raise ValueError("Parameter win must have finite entries!")
258
+ if not (hop >= 1 and isinstance(hop, int)):
259
+ raise ValueError(f"Parameter {hop=} is not an integer >= 1!")
260
+ self._win, self._hop, self.fs = win, hop, fs
261
+
262
+ self.mfft = len(win) if mfft is None else mfft
263
+
264
+ if dual_win is not None:
265
+ if dual_win.shape != win.shape:
266
+ raise ValueError(f"{dual_win.shape=} must equal {win.shape=}!")
267
+ if not all(np.isfinite(dual_win)):
268
+ raise ValueError("Parameter dual_win must be a finite array!")
269
+ self._dual_win = dual_win # needs to be set before scaling
270
+
271
+ if scale_to is not None: # needs to be set before fft_mode
272
+ self.scale_to(scale_to)
273
+
274
+ self.fft_mode, self.phase_shift = fft_mode, phase_shift
275
+
276
+ @classmethod
277
+ def from_dual(cls, dual_win: np.ndarray, hop: int, fs: float, *,
278
+ fft_mode: FFT_MODE_TYPE = 'onesided',
279
+ mfft: int | None = None,
280
+ scale_to: Literal['magnitude', 'psd'] | None = None,
281
+ phase_shift: int | None = 0):
282
+ r"""Instantiate a `ShortTimeFFT` by only providing a dual window.
283
+
284
+ If an STFT is invertible, it is possible to calculate the window `win`
285
+ from a given dual window `dual_win`. All other parameters have the
286
+ same meaning as in the initializer of `ShortTimeFFT`.
287
+
288
+ As explained in the :ref:`tutorial_stft` section of the
289
+ :ref:`user_guide`, an invertible STFT can be interpreted as series
290
+ expansion of time-shifted and frequency modulated dual windows. E.g.,
291
+ the series coefficient S[q,p] belongs to the term, which shifted
292
+ `dual_win` by p * `delta_t` and multiplied it by
293
+ exp( 2 * j * pi * t * q * `delta_f`).
294
+
295
+
296
+ Examples
297
+ --------
298
+ The following example discusses decomposing a signal into time- and
299
+ frequency-shifted Gaussians. A Gaussian with standard deviation of
300
+ one made up of 51 samples will be used:
301
+
302
+ >>> import numpy as np
303
+ >>> import matplotlib.pyplot as plt
304
+ >>> from scipy.signal import ShortTimeFFT
305
+ >>> from scipy.signal.windows import gaussian
306
+ ...
307
+ >>> T, N = 0.1, 51
308
+ >>> d_win = gaussian(N, std=1/T, sym=True) # symmetric Gaussian window
309
+ >>> t = T * (np.arange(N) - N//2)
310
+ ...
311
+ >>> fg1, ax1 = plt.subplots()
312
+ >>> ax1.set_title(r"Dual Window: Gaussian with $\sigma_t=1$")
313
+ >>> ax1.set(xlabel=f"Time $t$ in seconds ({N} samples, $T={T}$ s)",
314
+ ... xlim=(t[0], t[-1]), ylim=(0, 1.1*max(d_win)))
315
+ >>> ax1.plot(t, d_win, 'C0-')
316
+
317
+ The following plot with the overlap of 41, 11 and 2 samples show how
318
+ the `hop` interval affects the shape of the window `win`:
319
+
320
+ >>> fig2, axx = plt.subplots(3, 1, sharex='all')
321
+ ...
322
+ >>> axx[0].set_title(r"Windows for hop$\in\{10, 40, 49\}$")
323
+ >>> for c_, h_ in enumerate([10, 40, 49]):
324
+ ... SFT = ShortTimeFFT.from_dual(d_win, h_, 1/T)
325
+ ... axx[c_].plot(t + h_ * T, SFT.win, 'k--', alpha=.3, label=None)
326
+ ... axx[c_].plot(t - h_ * T, SFT.win, 'k:', alpha=.3, label=None)
327
+ ... axx[c_].plot(t, SFT.win, f'C{c_+1}',
328
+ ... label=r"$\Delta t=%0.1f\,$s" % SFT.delta_t)
329
+ ... axx[c_].set_ylim(0, 1.1*max(SFT.win))
330
+ ... axx[c_].legend(loc='center')
331
+ >>> axx[-1].set(xlabel=f"Time $t$ in seconds ({N} samples, $T={T}$ s)",
332
+ ... xlim=(t[0], t[-1]))
333
+ >>> plt.show()
334
+
335
+ Beside the window `win` centered at t = 0 the previous (t = -`delta_t`)
336
+ and following window (t = `delta_t`) are depicted. It can be seen that
337
+ for small `hop` intervals, the window is compact and smooth, having a
338
+ good time-frequency concentration in the STFT. For the large `hop`
339
+ interval of 4.9 s, the window has small values around t = 0, which are
340
+ not covered by the overlap of the adjacent windows, which could lead to
341
+ numeric inaccuracies. Furthermore, the peaky shape at the beginning and
342
+ the end of the window points to a higher bandwidth, resulting in a
343
+ poorer time-frequency resolution of the STFT.
344
+ Hence, the choice of the `hop` interval will be a compromise between
345
+ a time-frequency resolution and memory requirements demanded by small
346
+ `hop` sizes.
347
+
348
+ See Also
349
+ --------
350
+ from_window: Create instance by wrapping `get_window`.
351
+ ShortTimeFFT: Create instance using standard initializer.
352
+ """
353
+ win = _calc_dual_canonical_window(dual_win, hop)
354
+ return cls(win=win, hop=hop, fs=fs, fft_mode=fft_mode, mfft=mfft,
355
+ dual_win=dual_win, scale_to=scale_to,
356
+ phase_shift=phase_shift)
357
+
358
+ @classmethod
359
+ def from_window(cls, win_param: str | tuple | float,
360
+ fs: float, nperseg: int, noverlap: int, *,
361
+ symmetric_win: bool = False,
362
+ fft_mode: FFT_MODE_TYPE = 'onesided',
363
+ mfft: int | None = None,
364
+ scale_to: Literal['magnitude', 'psd'] | None = None,
365
+ phase_shift: int | None = 0):
366
+ """Instantiate `ShortTimeFFT` by using `get_window`.
367
+
368
+ The method `get_window` is used to create a window of length
369
+ `nperseg`. The parameter names `noverlap`, and `nperseg` are used here,
370
+ since they more inline with other classical STFT libraries.
371
+
372
+ Parameters
373
+ ----------
374
+ win_param: Union[str, tuple, float],
375
+ Parameters passed to `get_window`. For windows with no parameters,
376
+ it may be a string (e.g., ``'hann'``), for parametrized windows a
377
+ tuple, (e.g., ``('gaussian', 2.)``) or a single float specifying
378
+ the shape parameter of a kaiser window (i.e. ``4.`` and
379
+ ``('kaiser', 4.)`` are equal. See `get_window` for more details.
380
+ fs : float
381
+ Sampling frequency of input signal. Its relation to the
382
+ sampling interval `T` is ``T = 1 / fs``.
383
+ nperseg: int
384
+ Window length in samples, which corresponds to the `m_num`.
385
+ noverlap: int
386
+ Window overlap in samples. It relates to the `hop` increment by
387
+ ``hop = npsereg - noverlap``.
388
+ symmetric_win: bool
389
+ If ``True`` then a symmetric window is generated, else a periodic
390
+ window is generated (default). Though symmetric windows seem for
391
+ most applications to be more sensible, the default of a periodic
392
+ windows was chosen to correspond to the default of `get_window`.
393
+ fft_mode : 'twosided', 'centered', 'onesided', 'onesided2X'
394
+ Mode of FFT to be used (default 'onesided').
395
+ See property `fft_mode` for details.
396
+ mfft: int | None
397
+ Length of the FFT used, if a zero padded FFT is desired.
398
+ If ``None`` (default), the length of the window `win` is used.
399
+ scale_to : 'magnitude', 'psd' | None
400
+ If not ``None`` (default) the window function is scaled, so each
401
+ STFT column represents either a 'magnitude' or a power spectral
402
+ density ('psd') spectrum. This parameter sets the property
403
+ `scaling` to the same value. See method `scale_to` for details.
404
+ phase_shift : int | None
405
+ If set, add a linear phase `phase_shift` / `mfft` * `f` to each
406
+ frequency `f`. The default value 0 ensures that there is no phase
407
+ shift on the zeroth slice (in which t=0 is centered). See property
408
+ `phase_shift` for more details.
409
+
410
+ Examples
411
+ --------
412
+ The following instances ``SFT0`` and ``SFT1`` are equivalent:
413
+
414
+ >>> from scipy.signal import ShortTimeFFT, get_window
415
+ >>> nperseg = 9 # window length
416
+ >>> w = get_window(('gaussian', 2.), nperseg)
417
+ >>> fs = 128 # sampling frequency
418
+ >>> hop = 3 # increment of STFT time slice
419
+ >>> SFT0 = ShortTimeFFT(w, hop, fs=fs)
420
+ >>> SFT1 = ShortTimeFFT.from_window(('gaussian', 2.), fs, nperseg,
421
+ ... noverlap=nperseg-hop)
422
+
423
+ See Also
424
+ --------
425
+ scipy.signal.get_window: Return a window of a given length and type.
426
+ from_dual: Create instance using dual window.
427
+ ShortTimeFFT: Create instance using standard initializer.
428
+ """
429
+ win = get_window(win_param, nperseg, fftbins=not symmetric_win)
430
+ return cls(win, hop=nperseg-noverlap, fs=fs, fft_mode=fft_mode,
431
+ mfft=mfft, scale_to=scale_to, phase_shift=phase_shift)
432
+
433
+ @property
434
+ def win(self) -> np.ndarray:
435
+ """Window function as real- or complex-valued 1d array.
436
+
437
+ This attribute is read only, since `dual_win` depends on it.
438
+
439
+ See Also
440
+ --------
441
+ dual_win: Canonical dual window.
442
+ m_num: Number of samples in window `win`.
443
+ m_num_mid: Center index of window `win`.
444
+ mfft: Length of input for the FFT used - may be larger than `m_num`.
445
+ hop: ime increment in signal samples for sliding window.
446
+ win: Window function as real- or complex-valued 1d array.
447
+ ShortTimeFFT: Class this property belongs to.
448
+ """
449
+ return self._win
450
+
451
+ @property
452
+ def hop(self) -> int:
453
+ """Time increment in signal samples for sliding window.
454
+
455
+ This attribute is read only, since `dual_win` depends on it.
456
+
457
+ See Also
458
+ --------
459
+ delta_t: Time increment of STFT (``hop*T``)
460
+ m_num: Number of samples in window `win`.
461
+ m_num_mid: Center index of window `win`.
462
+ mfft: Length of input for the FFT used - may be larger than `m_num`.
463
+ T: Sampling interval of input signal and of the window.
464
+ win: Window function as real- or complex-valued 1d array.
465
+ ShortTimeFFT: Class this property belongs to.
466
+ """
467
+ return self._hop
468
+
469
+ @property
470
+ def T(self) -> float:
471
+ """Sampling interval of input signal and of the window.
472
+
473
+ A ``ValueError`` is raised if it is set to a non-positive value.
474
+
475
+ See Also
476
+ --------
477
+ delta_t: Time increment of STFT (``hop*T``)
478
+ hop: Time increment in signal samples for sliding window.
479
+ fs: Sampling frequency (being ``1/T``)
480
+ t: Times of STFT for an input signal with `n` samples.
481
+ ShortTimeFFT: Class this property belongs to.
482
+ """
483
+ return 1 / self._fs
484
+
485
+ @T.setter
486
+ def T(self, v: float):
487
+ """Sampling interval of input signal and of the window.
488
+
489
+ A ``ValueError`` is raised if it is set to a non-positive value.
490
+ """
491
+ if not (v > 0):
492
+ raise ValueError(f"Sampling interval T={v} must be positive!")
493
+ self._fs = 1 / v
494
+
495
+ @property
496
+ def fs(self) -> float:
497
+ """Sampling frequency of input signal and of the window.
498
+
499
+ The sampling frequency is the inverse of the sampling interval `T`.
500
+ A ``ValueError`` is raised if it is set to a non-positive value.
501
+
502
+ See Also
503
+ --------
504
+ delta_t: Time increment of STFT (``hop*T``)
505
+ hop: Time increment in signal samples for sliding window.
506
+ T: Sampling interval of input signal and of the window (``1/fs``).
507
+ ShortTimeFFT: Class this property belongs to.
508
+ """
509
+ return self._fs
510
+
511
+ @fs.setter
512
+ def fs(self, v: float):
513
+ """Sampling frequency of input signal and of the window.
514
+
515
+ The sampling frequency is the inverse of the sampling interval `T`.
516
+ A ``ValueError`` is raised if it is set to a non-positive value.
517
+ """
518
+ if not (v > 0):
519
+ raise ValueError(f"Sampling frequency fs={v} must be positive!")
520
+ self._fs = v
521
+
522
+ @property
523
+ def fft_mode(self) -> FFT_MODE_TYPE:
524
+ """Mode of utilized FFT ('twosided', 'centered', 'onesided' or
525
+ 'onesided2X').
526
+
527
+ It can have the following values:
528
+
529
+ 'twosided':
530
+ Two-sided FFT, where values for the negative frequencies are in
531
+ upper half of the array. Corresponds to :func:`~scipy.fft.fft()`.
532
+ 'centered':
533
+ Two-sided FFT with the values being ordered along monotonically
534
+ increasing frequencies. Corresponds to applying
535
+ :func:`~scipy.fft.fftshift()` to :func:`~scipy.fft.fft()`.
536
+ 'onesided':
537
+ Calculates only values for non-negative frequency values.
538
+ Corresponds to :func:`~scipy.fft.rfft()`.
539
+ 'onesided2X':
540
+ Like `onesided`, but the non-zero frequencies are doubled if
541
+ `scaling` is set to 'magnitude' or multiplied by ``sqrt(2)`` if
542
+ set to 'psd'. If `scaling` is ``None``, setting `fft_mode` to
543
+ `onesided2X` is not allowed.
544
+ If the FFT length `mfft` is even, the last FFT value is not paired,
545
+ and thus it is not scaled.
546
+
547
+ Note that `onesided` and `onesided2X` do not work for complex-valued signals or
548
+ complex-valued windows. Furthermore, the frequency values can be obtained by
549
+ reading the `f` property, and the number of samples by accessing the `f_pts`
550
+ property.
551
+
552
+ See Also
553
+ --------
554
+ delta_f: Width of the frequency bins of the STFT.
555
+ f: Frequencies values of the STFT.
556
+ f_pts: Width of the frequency bins of the STFT.
557
+ onesided_fft: True if a one-sided FFT is used.
558
+ scaling: Normalization applied to the window function
559
+ ShortTimeFFT: Class this property belongs to.
560
+ """
561
+ return self._fft_mode
562
+
563
+ @fft_mode.setter
564
+ def fft_mode(self, t: FFT_MODE_TYPE):
565
+ """Set mode of FFT.
566
+
567
+ Allowed values are 'twosided', 'centered', 'onesided', 'onesided2X'.
568
+ See the property `fft_mode` for more details.
569
+ """
570
+ if t not in (fft_mode_types := get_args(FFT_MODE_TYPE)):
571
+ raise ValueError(f"fft_mode='{t}' not in {fft_mode_types}!")
572
+
573
+ if t in {'onesided', 'onesided2X'} and np.iscomplexobj(self.win):
574
+ raise ValueError(f"One-sided spectra, i.e., fft_mode='{t}', " +
575
+ "are not allowed for complex-valued windows!")
576
+
577
+ if t == 'onesided2X' and self.scaling is None:
578
+ raise ValueError(f"For scaling is None, fft_mode='{t}' is invalid!"
579
+ "Do scale_to('psd') or scale_to('magnitude')!")
580
+ self._fft_mode = t
581
+
582
+ @property
583
+ def mfft(self) -> int:
584
+ """Length of input for the FFT used - may be larger than window
585
+ length `m_num`.
586
+
587
+ If not set, `mfft` defaults to the window length `m_num`.
588
+
589
+ See Also
590
+ --------
591
+ f_pts: Number of points along the frequency axis.
592
+ f: Frequencies values of the STFT.
593
+ m_num: Number of samples in window `win`.
594
+ ShortTimeFFT: Class this property belongs to.
595
+ """
596
+ return self._mfft
597
+
598
+ @mfft.setter
599
+ def mfft(self, n_: int):
600
+ """Setter for the length of FFT utilized.
601
+
602
+ See the property `mfft` for further details.
603
+ """
604
+ if not (n_ >= self.m_num):
605
+ raise ValueError(f"Attribute mfft={n_} needs to be at least the " +
606
+ f"window length m_num={self.m_num}!")
607
+ self._mfft = n_
608
+
609
+ @property
610
+ def scaling(self) -> Literal['magnitude', 'psd'] | None:
611
+ """Normalization applied to the window function
612
+ ('magnitude', 'psd' or ``None``).
613
+
614
+ If not ``None``, the FFTs can be either interpreted as a magnitude or
615
+ a power spectral density spectrum.
616
+
617
+ The window function can be scaled by calling the `scale_to` method,
618
+ or it is set by the initializer parameter ``scale_to``.
619
+
620
+ See Also
621
+ --------
622
+ fac_magnitude: Scaling factor for to a magnitude spectrum.
623
+ fac_psd: Scaling factor for to a power spectral density spectrum.
624
+ fft_mode: Mode of utilized FFT
625
+ scale_to: Scale window to obtain 'magnitude' or 'psd' scaling.
626
+ ShortTimeFFT: Class this property belongs to.
627
+ """
628
+ return self._scaling
629
+
630
+ def scale_to(self, scaling: Literal['magnitude', 'psd']):
631
+ """Scale window to obtain 'magnitude' or 'psd' scaling for the STFT.
632
+
633
+ The window of a 'magnitude' spectrum has an integral of one, i.e., unit
634
+ area for non-negative windows. This ensures that absolute the values of
635
+ spectrum does not change if the length of the window changes (given
636
+ the input signal is stationary).
637
+
638
+ To represent the power spectral density ('psd') for varying length
639
+ windows the area of the absolute square of the window needs to be
640
+ unity.
641
+
642
+ The `scaling` property shows the current scaling. The properties
643
+ `fac_magnitude` and `fac_psd` show the scaling factors required to
644
+ scale the STFT values to a magnitude or a psd spectrum.
645
+
646
+ This method is called, if the initializer parameter `scale_to` is set.
647
+
648
+ See Also
649
+ --------
650
+ fac_magnitude: Scaling factor for to a magnitude spectrum.
651
+ fac_psd: Scaling factor for to a power spectral density spectrum.
652
+ fft_mode: Mode of utilized FFT
653
+ scaling: Normalization applied to the window function.
654
+ ShortTimeFFT: Class this method belongs to.
655
+ """
656
+ if scaling not in (scaling_values := {'magnitude', 'psd'}):
657
+ raise ValueError(f"{scaling=} not in {scaling_values}!")
658
+ if self._scaling == scaling: # do nothing
659
+ return
660
+
661
+ s_fac = self.fac_psd if scaling == 'psd' else self.fac_magnitude
662
+ self._win = self._win * s_fac
663
+ if self._dual_win is not None:
664
+ self._dual_win = self._dual_win / s_fac
665
+ self._fac_mag, self._fac_psd = None, None # reset scaling factors
666
+ self._scaling = scaling
667
+
668
+ @property
669
+ def phase_shift(self) -> int | None:
670
+ """If set, add linear phase `phase_shift` / `mfft` * `f` to each FFT
671
+ slice of frequency `f`.
672
+
673
+ Shifting (more precisely `rolling`) an `mfft`-point FFT input by
674
+ `phase_shift` samples results in a multiplication of the output by
675
+ ``np.exp(2j*np.pi*q*phase_shift/mfft)`` at the frequency q * `delta_f`.
676
+
677
+ The default value 0 ensures that there is no phase shift on the
678
+ zeroth slice (in which t=0 is centered).
679
+ No phase shift (``phase_shift is None``) is equivalent to
680
+ ``phase_shift = -mfft//2``. In this case slices are not shifted
681
+ before calculating the FFT.
682
+
683
+ The absolute value of `phase_shift` is limited to be less than `mfft`.
684
+
685
+ See Also
686
+ --------
687
+ delta_f: Width of the frequency bins of the STFT.
688
+ f: Frequencies values of the STFT.
689
+ mfft: Length of input for the FFT used
690
+ ShortTimeFFT: Class this property belongs to.
691
+ """
692
+ return self._phase_shift
693
+
694
+ @phase_shift.setter
695
+ def phase_shift(self, v: int | None):
696
+ """The absolute value of the phase shift needs to be less than mfft
697
+ samples.
698
+
699
+ See the `phase_shift` getter method for more details.
700
+ """
701
+ if v is None:
702
+ self._phase_shift = v
703
+ return
704
+ if not isinstance(v, int):
705
+ raise ValueError(f"phase_shift={v} has the unit samples. Hence " +
706
+ "it needs to be an int or it may be None!")
707
+ if not (-self.mfft < v < self.mfft):
708
+ raise ValueError("-mfft < phase_shift < mfft does not hold " +
709
+ f"for mfft={self.mfft}, phase_shift={v}!")
710
+ self._phase_shift = v
711
+
712
+ def _x_slices(self, x: np.ndarray, k_off: int, p0: int, p1: int,
713
+ padding: PAD_TYPE) -> Generator[np.ndarray, None, None]:
714
+ """Generate signal slices along last axis of `x`.
715
+
716
+ This method is only used by `stft_detrend`. The parameters are
717
+ described in `~ShortTimeFFT.stft`.
718
+ """
719
+ if padding not in (padding_types := get_args(PAD_TYPE)):
720
+ raise ValueError(f"Parameter {padding=} not in {padding_types}!")
721
+ pad_kws: dict[str, dict] = { # possible keywords to pass to np.pad:
722
+ 'zeros': dict(mode='constant', constant_values=(0, 0)),
723
+ 'edge': dict(mode='edge'),
724
+ 'even': dict(mode='reflect', reflect_type='even'),
725
+ 'odd': dict(mode='reflect', reflect_type='odd'),
726
+ } # typing of pad_kws is needed to make mypy happy
727
+
728
+ n, n1 = x.shape[-1], (p1 - p0) * self.hop
729
+ k0 = p0 * self.hop - self.m_num_mid + k_off # start sample
730
+ k1 = k0 + n1 + self.m_num # end sample
731
+
732
+ i0, i1 = max(k0, 0), min(k1, n) # indexes to shorten x
733
+ # dimensions for padding x:
734
+ pad_width = [(0, 0)] * (x.ndim-1) + [(-min(k0, 0), max(k1 - n, 0))]
735
+
736
+ x1 = np.pad(x[..., i0:i1], pad_width, **pad_kws[padding])
737
+ for k_ in range(0, n1, self.hop):
738
+ yield x1[..., k_:k_ + self.m_num]
739
+
740
+ def stft(self, x: np.ndarray, p0: int | None = None,
741
+ p1: int | None = None, *, k_offset: int = 0,
742
+ padding: PAD_TYPE = 'zeros', axis: int = -1) \
743
+ -> np.ndarray:
744
+ """Perform the short-time Fourier transform.
745
+
746
+ A two-dimensional matrix with ``p1-p0`` columns is calculated.
747
+ The `f_pts` rows represent value at the frequencies `f`. The q-th
748
+ column of the windowed FFT with the window `win` is centered at t[q].
749
+ The columns represent the values at the frequencies `f`.
750
+
751
+ Parameters
752
+ ----------
753
+ x
754
+ The input signal as real or complex valued array. For complex values, the
755
+ property `fft_mode` must be set to 'twosided' or 'centered'.
756
+ p0
757
+ The first element of the range of slices to calculate. If ``None``
758
+ then it is set to :attr:`p_min`, which is the smallest possible
759
+ slice.
760
+ p1
761
+ The end of the array. If ``None`` then `p_max(n)` is used.
762
+ k_offset
763
+ Index of first sample (t = 0) in `x`.
764
+ padding
765
+ Kind of values which are added, when the sliding window sticks out
766
+ on either the lower or upper end of the input `x`. Zeros are added
767
+ if the default 'zeros' is set. For 'edge' either the first or the
768
+ last value of `x` is used. 'even' pads by reflecting the
769
+ signal on the first or last sample and 'odd' additionally
770
+ multiplies it with -1.
771
+ axis
772
+ The axis of `x` over which to compute the STFT.
773
+ If not given, the last axis is used.
774
+
775
+ Returns
776
+ -------
777
+ S
778
+ A complex array is returned with the dimension always being larger
779
+ by one than of `x`. The last axis always represent the time slices
780
+ of the STFT. `axis` defines the frequency axis (default second to
781
+ last). E.g., for a one-dimensional `x`, a complex 2d array is
782
+ returned, with axis 0 representing frequency and axis 1 the time
783
+ slices.
784
+
785
+ See Also
786
+ --------
787
+ delta_f: Width of the frequency bins of the STFT.
788
+ delta_t: Time increment of STFT
789
+ f: Frequencies values of the STFT.
790
+ invertible: Check if STFT is invertible.
791
+ :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform.
792
+ p_range: Determine and validate slice index range.
793
+ stft_detrend: STFT with detrended segments.
794
+ t: Times of STFT for an input signal with `n` samples.
795
+ :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to.
796
+ """
797
+ return self.stft_detrend(x, None, p0, p1, k_offset=k_offset,
798
+ padding=padding, axis=axis)
799
+
800
+ def stft_detrend(self, x: np.ndarray,
801
+ detr: Callable[[np.ndarray], np.ndarray] | Literal['linear', 'constant'] | None, # noqa: E501
802
+ p0: int | None = None, p1: int | None = None, *,
803
+ k_offset: int = 0, padding: PAD_TYPE = 'zeros',
804
+ axis: int = -1) \
805
+ -> np.ndarray:
806
+ """Short-time Fourier transform with a trend being subtracted from each
807
+ segment beforehand.
808
+
809
+ If `detr` is set to 'constant', the mean is subtracted, if set to
810
+ "linear", the linear trend is removed. This is achieved by calling
811
+ :func:`scipy.signal.detrend`. If `detr` is a function, `detr` is
812
+ applied to each segment.
813
+ All other parameters have the same meaning as in `~ShortTimeFFT.stft`.
814
+
815
+ Note that due to the detrending, the original signal cannot be
816
+ reconstructed by the `~ShortTimeFFT.istft`.
817
+
818
+ See Also
819
+ --------
820
+ invertible: Check if STFT is invertible.
821
+ :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform.
822
+ :meth:`~ShortTimeFFT.stft`: Short-time Fourier transform
823
+ (without detrending).
824
+ :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to.
825
+ """
826
+ if self.onesided_fft and np.iscomplexobj(x):
827
+ raise ValueError(f"Complex-valued `x` not allowed for {self.fft_mode=}'! "
828
+ "Set property `fft_mode` to 'twosided' or 'centered'.")
829
+ if isinstance(detr, str):
830
+ detr = partial(detrend, type=detr)
831
+ elif not (detr is None or callable(detr)):
832
+ raise ValueError(f"Parameter {detr=} is not a str, function or " +
833
+ "None!")
834
+ n = x.shape[axis]
835
+ if not (n >= (m2p := self.m_num-self.m_num_mid)):
836
+ e_str = f'{len(x)=}' if x.ndim == 1 else f'of {axis=} of {x.shape}'
837
+ raise ValueError(f"{e_str} must be >= ceil(m_num/2) = {m2p}!")
838
+
839
+ if x.ndim > 1: # motivated by the NumPy broadcasting mechanisms:
840
+ x = np.moveaxis(x, axis, -1)
841
+ # determine slice index range:
842
+ p0, p1 = self.p_range(n, p0, p1)
843
+ S_shape_1d = (self.f_pts, p1 - p0)
844
+ S_shape = x.shape[:-1] + S_shape_1d if x.ndim > 1 else S_shape_1d
845
+ S = np.zeros(S_shape, dtype=complex)
846
+ for p_, x_ in enumerate(self._x_slices(x, k_offset, p0, p1, padding)):
847
+ if detr is not None:
848
+ x_ = detr(x_)
849
+ S[..., :, p_] = self._fft_func(x_ * self.win.conj())
850
+ if x.ndim > 1:
851
+ return np.moveaxis(S, -2, axis if axis >= 0 else axis-1)
852
+ return S
853
+
854
+ def spectrogram(self, x: np.ndarray, y: np.ndarray | None = None,
855
+ detr: Callable[[np.ndarray], np.ndarray] | Literal['linear', 'constant'] | None = None, # noqa: E501
856
+ *,
857
+ p0: int | None = None, p1: int | None = None,
858
+ k_offset: int = 0, padding: PAD_TYPE = 'zeros',
859
+ axis: int = -1) \
860
+ -> np.ndarray:
861
+ r"""Calculate spectrogram or cross-spectrogram.
862
+
863
+ The spectrogram is the absolute square of the STFT, i.e., it is
864
+ ``abs(S[q,p])**2`` for given ``S[q,p]`` and thus is always
865
+ non-negative.
866
+ For two STFTs ``Sx[q,p], Sy[q,p]``, the cross-spectrogram is defined
867
+ as ``Sx[q,p] * np.conj(Sy[q,p])`` and is complex-valued.
868
+ This is a convenience function for calling `~ShortTimeFFT.stft` /
869
+ `stft_detrend`, hence all parameters are discussed there. If `y` is not
870
+ ``None`` it needs to have the same shape as `x`.
871
+
872
+ Examples
873
+ --------
874
+ The following example shows the spectrogram of a square wave with
875
+ varying frequency :math:`f_i(t)` (marked by a green dashed line in the
876
+ plot) sampled with 20 Hz:
877
+
878
+ >>> import matplotlib.pyplot as plt
879
+ >>> import numpy as np
880
+ >>> from scipy.signal import square, ShortTimeFFT
881
+ >>> from scipy.signal.windows import gaussian
882
+ ...
883
+ >>> T_x, N = 1 / 20, 1000 # 20 Hz sampling rate for 50 s signal
884
+ >>> t_x = np.arange(N) * T_x # time indexes for signal
885
+ >>> f_i = 5e-3*(t_x - t_x[N // 3])**2 + 1 # varying frequency
886
+ >>> x = square(2*np.pi*np.cumsum(f_i)*T_x) # the signal
887
+
888
+ The utilized Gaussian window is 50 samples or 2.5 s long. The
889
+ parameter ``mfft=800`` (oversampling factor 16) and the `hop` interval
890
+ of 2 in `ShortTimeFFT` was chosen to produce a sufficient number of
891
+ points:
892
+
893
+ >>> g_std = 12 # standard deviation for Gaussian window in samples
894
+ >>> win = gaussian(50, std=g_std, sym=True) # symmetric Gaussian wind.
895
+ >>> SFT = ShortTimeFFT(win, hop=2, fs=1/T_x, mfft=800, scale_to='psd')
896
+ >>> Sx2 = SFT.spectrogram(x) # calculate absolute square of STFT
897
+
898
+ The plot's colormap is logarithmically scaled as the power spectral
899
+ density is in dB. The time extent of the signal `x` is marked by
900
+ vertical dashed lines and the shaded areas mark the presence of border
901
+ effects:
902
+
903
+ >>> fig1, ax1 = plt.subplots(figsize=(6., 4.)) # enlarge plot a bit
904
+ >>> t_lo, t_hi = SFT.extent(N)[:2] # time range of plot
905
+ >>> ax1.set_title(rf"Spectrogram ({SFT.m_num*SFT.T:g}$\,s$ Gaussian " +
906
+ ... rf"window, $\sigma_t={g_std*SFT.T:g}\,$s)")
907
+ >>> ax1.set(xlabel=f"Time $t$ in seconds ({SFT.p_num(N)} slices, " +
908
+ ... rf"$\Delta t = {SFT.delta_t:g}\,$s)",
909
+ ... ylabel=f"Freq. $f$ in Hz ({SFT.f_pts} bins, " +
910
+ ... rf"$\Delta f = {SFT.delta_f:g}\,$Hz)",
911
+ ... xlim=(t_lo, t_hi))
912
+ >>> Sx_dB = 10 * np.log10(np.fmax(Sx2, 1e-4)) # limit range to -40 dB
913
+ >>> im1 = ax1.imshow(Sx_dB, origin='lower', aspect='auto',
914
+ ... extent=SFT.extent(N), cmap='magma')
915
+ >>> ax1.plot(t_x, f_i, 'g--', alpha=.5, label='$f_i(t)$')
916
+ >>> fig1.colorbar(im1, label='Power Spectral Density ' +
917
+ ... r"$20\,\log_{10}|S_x(t, f)|$ in dB")
918
+ ...
919
+ >>> # Shade areas where window slices stick out to the side:
920
+ >>> for t0_, t1_ in [(t_lo, SFT.lower_border_end[0] * SFT.T),
921
+ ... (SFT.upper_border_begin(N)[0] * SFT.T, t_hi)]:
922
+ ... ax1.axvspan(t0_, t1_, color='w', linewidth=0, alpha=.3)
923
+ >>> for t_ in [0, N * SFT.T]: # mark signal borders with vertical line
924
+ ... ax1.axvline(t_, color='c', linestyle='--', alpha=0.5)
925
+ >>> ax1.legend()
926
+ >>> fig1.tight_layout()
927
+ >>> plt.show()
928
+
929
+ The logarithmic scaling reveals the odd harmonics of the square wave,
930
+ which are reflected at the Nyquist frequency of 10 Hz. This aliasing
931
+ is also the main source of the noise artifacts in the plot.
932
+
933
+
934
+ See Also
935
+ --------
936
+ :meth:`~ShortTimeFFT.stft`: Perform the short-time Fourier transform.
937
+ stft_detrend: STFT with a trend subtracted from each segment.
938
+ :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to.
939
+ """
940
+ Sx = self.stft_detrend(x, detr, p0, p1, k_offset=k_offset,
941
+ padding=padding, axis=axis)
942
+ if y is None or y is x: # do spectrogram:
943
+ return Sx.real**2 + Sx.imag**2
944
+ # Cross-spectrogram:
945
+ Sy = self.stft_detrend(y, detr, p0, p1, k_offset=k_offset,
946
+ padding=padding, axis=axis)
947
+ return Sx * Sy.conj()
948
+
949
+ @property
950
+ def dual_win(self) -> np.ndarray:
951
+ """Canonical dual window.
952
+
953
+ A STFT can be interpreted as the input signal being expressed as a
954
+ weighted sum of modulated and time-shifted dual windows. Note that for
955
+ a given window there exist many dual windows. The canonical window is
956
+ the one with the minimal energy (i.e., :math:`L_2` norm).
957
+
958
+ `dual_win` has same length as `win`, namely `m_num` samples.
959
+
960
+ If the dual window cannot be calculated a ``ValueError`` is raised.
961
+ This attribute is read only and calculated lazily.
962
+
963
+ See Also
964
+ --------
965
+ dual_win: Canonical dual window.
966
+ m_num: Number of samples in window `win`.
967
+ win: Window function as real- or complex-valued 1d array.
968
+ ShortTimeFFT: Class this property belongs to.
969
+ """
970
+ if self._dual_win is None:
971
+ self._dual_win = _calc_dual_canonical_window(self.win, self.hop)
972
+ return self._dual_win
973
+
974
+ @property
975
+ def invertible(self) -> bool:
976
+ """Check if STFT is invertible.
977
+
978
+ This is achieved by trying to calculate the canonical dual window.
979
+
980
+ See Also
981
+ --------
982
+ :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform.
983
+ m_num: Number of samples in window `win` and `dual_win`.
984
+ dual_win: Canonical dual window.
985
+ win: Window for STFT.
986
+ ShortTimeFFT: Class this property belongs to.
987
+ """
988
+ try:
989
+ return len(self.dual_win) > 0 # call self.dual_win()
990
+ except ValueError:
991
+ return False
992
+
993
+ def istft(self, S: np.ndarray, k0: int = 0, k1: int | None = None, *,
994
+ f_axis: int = -2, t_axis: int = -1) \
995
+ -> np.ndarray:
996
+ """Inverse short-time Fourier transform.
997
+
998
+ It returns an array of dimension ``S.ndim - 1`` which is real
999
+ if `onesided_fft` is set, else complex. If the STFT is not
1000
+ `invertible`, or the parameters are out of bounds a ``ValueError`` is
1001
+ raised.
1002
+
1003
+ Parameters
1004
+ ----------
1005
+ S
1006
+ A complex valued array where `f_axis` denotes the frequency
1007
+ values and the `t-axis` dimension the temporal values of the
1008
+ STFT values.
1009
+ k0, k1
1010
+ The start and the end index of the reconstructed signal. The
1011
+ default (``k0 = 0``, ``k1 = None``) assumes that the maximum length
1012
+ signal should be reconstructed.
1013
+ f_axis, t_axis
1014
+ The axes in `S` denoting the frequency and the time dimension.
1015
+
1016
+ Notes
1017
+ -----
1018
+ It is required that `S` has `f_pts` entries along the `f_axis`. For
1019
+ the `t_axis` it is assumed that the first entry corresponds to
1020
+ `p_min` * `delta_t` (being <= 0). The length of `t_axis` needs to be
1021
+ compatible with `k1`. I.e., ``S.shape[t_axis] >= self.p_max(k1)`` must
1022
+ hold, if `k1` is not ``None``. Else `k1` is set to `k_max` with::
1023
+
1024
+ q_max = S.shape[t_range] + self.p_min
1025
+ k_max = (q_max - 1) * self.hop + self.m_num - self.m_num_mid
1026
+
1027
+ The :ref:`tutorial_stft` section of the :ref:`user_guide` discussed the
1028
+ slicing behavior by means of an example.
1029
+
1030
+ See Also
1031
+ --------
1032
+ invertible: Check if STFT is invertible.
1033
+ :meth:`~ShortTimeFFT.stft`: Perform Short-time Fourier transform.
1034
+ :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to.
1035
+ """
1036
+ if f_axis == t_axis:
1037
+ raise ValueError(f"{f_axis=} may not be equal to {t_axis=}!")
1038
+ if S.shape[f_axis] != self.f_pts:
1039
+ raise ValueError(f"{S.shape[f_axis]=} must be equal to " +
1040
+ f"{self.f_pts=} ({S.shape=})!")
1041
+ n_min = self.m_num-self.m_num_mid # minimum signal length
1042
+ if not (S.shape[t_axis] >= (q_num := self.p_num(n_min))):
1043
+ raise ValueError(f"{S.shape[t_axis]=} needs to have at least " +
1044
+ f"{q_num} slices ({S.shape=})!")
1045
+ if t_axis != S.ndim - 1 or f_axis != S.ndim - 2:
1046
+ t_axis = S.ndim + t_axis if t_axis < 0 else t_axis
1047
+ f_axis = S.ndim + f_axis if f_axis < 0 else f_axis
1048
+ S = np.moveaxis(S, (f_axis, t_axis), (-2, -1))
1049
+
1050
+ q_max = S.shape[-1] + self.p_min
1051
+ k_max = (q_max - 1) * self.hop + self.m_num - self.m_num_mid
1052
+
1053
+ k1 = k_max if k1 is None else k1
1054
+ if not (self.k_min <= k0 < k1 <= k_max):
1055
+ raise ValueError(f"({self.k_min=}) <= ({k0=}) < ({k1=}) <= " +
1056
+ f"({k_max=}) is false!")
1057
+ if not (num_pts := k1 - k0) >= n_min:
1058
+ raise ValueError(f"({k1=}) - ({k0=}) = {num_pts} has to be at " +
1059
+ f"least the half the window length {n_min}!")
1060
+
1061
+ q0 = (k0 // self.hop + self.p_min if k0 >= 0 else # p_min always <= 0
1062
+ k0 // self.hop)
1063
+ q1 = min(self.p_max(k1), q_max)
1064
+ k_q0, k_q1 = self.nearest_k_p(k0), self.nearest_k_p(k1, left=False)
1065
+ n_pts = k_q1 - k_q0 + self.m_num - self.m_num_mid
1066
+ x = np.zeros(S.shape[:-2] + (n_pts,),
1067
+ dtype=float if self.onesided_fft else complex)
1068
+ for q_ in range(q0, q1):
1069
+ xs = self._ifft_func(S[..., :, q_ - self.p_min]) * self.dual_win
1070
+ i0 = q_ * self.hop - self.m_num_mid
1071
+ i1 = min(i0 + self.m_num, n_pts+k0)
1072
+ j0, j1 = 0, i1 - i0
1073
+ if i0 < k0: # xs sticks out to the left on x:
1074
+ j0 += k0 - i0
1075
+ i0 = k0
1076
+ x[..., i0-k0:i1-k0] += xs[..., j0:j1]
1077
+ x = x[..., :k1-k0]
1078
+ if x.ndim > 1:
1079
+ x = np.moveaxis(x, -1, f_axis if f_axis < x.ndim else t_axis)
1080
+ return x
1081
+
1082
+ @property
1083
+ def fac_magnitude(self) -> float:
1084
+ """Factor to multiply the STFT values by to scale each frequency slice
1085
+ to a magnitude spectrum.
1086
+
1087
+ It is 1 if attribute ``scaling == 'magnitude'``.
1088
+ The window can be scaled to a magnitude spectrum by using the method
1089
+ `scale_to`.
1090
+
1091
+ See Also
1092
+ --------
1093
+ fac_psd: Scaling factor for to a power spectral density spectrum.
1094
+ scale_to: Scale window to obtain 'magnitude' or 'psd' scaling.
1095
+ scaling: Normalization applied to the window function.
1096
+ ShortTimeFFT: Class this property belongs to.
1097
+ """
1098
+ if self.scaling == 'magnitude':
1099
+ return 1
1100
+ if self._fac_mag is None:
1101
+ self._fac_mag = 1 / abs(sum(self.win))
1102
+ return self._fac_mag
1103
+
1104
+ @property
1105
+ def fac_psd(self) -> float:
1106
+ """Factor to multiply the STFT values by to scale each frequency slice
1107
+ to a power spectral density (PSD).
1108
+
1109
+ It is 1 if attribute ``scaling == 'psd'``.
1110
+ The window can be scaled to a psd spectrum by using the method
1111
+ `scale_to`.
1112
+
1113
+ See Also
1114
+ --------
1115
+ fac_magnitude: Scaling factor for to a magnitude spectrum.
1116
+ scale_to: Scale window to obtain 'magnitude' or 'psd' scaling.
1117
+ scaling: Normalization applied to the window function.
1118
+ ShortTimeFFT: Class this property belongs to.
1119
+ """
1120
+ if self.scaling == 'psd':
1121
+ return 1
1122
+ if self._fac_psd is None:
1123
+ self._fac_psd = 1 / np.sqrt(
1124
+ sum(self.win.real**2+self.win.imag**2) / self.T)
1125
+ return self._fac_psd
1126
+
1127
+ @property
1128
+ def m_num(self) -> int:
1129
+ """Number of samples in window `win`.
1130
+
1131
+ Note that the FFT can be oversampled by zero-padding. This is achieved
1132
+ by setting the `mfft` property.
1133
+
1134
+ See Also
1135
+ --------
1136
+ m_num_mid: Center index of window `win`.
1137
+ mfft: Length of input for the FFT used - may be larger than `m_num`.
1138
+ hop: Time increment in signal samples for sliding window.
1139
+ win: Window function as real- or complex-valued 1d array.
1140
+ ShortTimeFFT: Class this property belongs to.
1141
+ """
1142
+ return len(self.win)
1143
+
1144
+ @property
1145
+ def m_num_mid(self) -> int:
1146
+ """Center index of window `win`.
1147
+
1148
+ For odd `m_num`, ``(m_num - 1) / 2`` is returned and
1149
+ for even `m_num` (per definition) ``m_num / 2`` is returned.
1150
+
1151
+ See Also
1152
+ --------
1153
+ m_num: Number of samples in window `win`.
1154
+ mfft: Length of input for the FFT used - may be larger than `m_num`.
1155
+ hop: ime increment in signal samples for sliding window.
1156
+ win: Window function as real- or complex-valued 1d array.
1157
+ ShortTimeFFT: Class this property belongs to.
1158
+ """
1159
+ return self.m_num // 2
1160
+
1161
+ @cache
1162
+ def _pre_padding(self) -> tuple[int, int]:
1163
+ """Smallest signal index and slice index due to padding.
1164
+
1165
+ Since, per convention, for time t=0, n,q is zero, the returned values
1166
+ are negative or zero.
1167
+ """
1168
+ w2 = self.win.real**2 + self.win.imag**2
1169
+ # move window to the left until the overlap with t >= 0 vanishes:
1170
+ n0 = -self.m_num_mid
1171
+ for q_, n_ in enumerate(range(n0, n0-self.m_num-1, -self.hop)):
1172
+ n_next = n_ - self.hop
1173
+ if n_next + self.m_num <= 0 or all(w2[n_next:] == 0):
1174
+ return n_, -q_
1175
+ raise RuntimeError("This is code line should not have been reached!")
1176
+ # If this case is reached, it probably means the first slice should be
1177
+ # returned, i.e.: return n0, 0
1178
+
1179
+ @property
1180
+ def k_min(self) -> int:
1181
+ """The smallest possible signal index of the STFT.
1182
+
1183
+ `k_min` is the index of the left-most non-zero value of the lowest
1184
+ slice `p_min`. Since the zeroth slice is centered over the zeroth
1185
+ sample of the input signal, `k_min` is never positive.
1186
+ A detailed example is provided in the :ref:`tutorial_stft_sliding_win`
1187
+ section of the :ref:`user_guide`.
1188
+
1189
+ See Also
1190
+ --------
1191
+ k_max: First sample index after signal end not touched by a time slice.
1192
+ lower_border_end: Where pre-padding effects end.
1193
+ p_min: The smallest possible slice index.
1194
+ p_max: Index of first non-overlapping upper time slice.
1195
+ p_num: Number of time slices, i.e., `p_max` - `p_min`.
1196
+ p_range: Determine and validate slice index range.
1197
+ upper_border_begin: Where post-padding effects start.
1198
+ ShortTimeFFT: Class this property belongs to.
1199
+ """
1200
+ return self._pre_padding()[0]
1201
+
1202
+ @property
1203
+ def p_min(self) -> int:
1204
+ """The smallest possible slice index.
1205
+
1206
+ `p_min` is the index of the left-most slice, where the window still
1207
+ sticks into the signal, i.e., has non-zero part for t >= 0.
1208
+ `k_min` is the smallest index where the window function of the slice
1209
+ `p_min` is non-zero.
1210
+
1211
+ Since, per convention the zeroth slice is centered at t=0,
1212
+ `p_min` <= 0 always holds.
1213
+ A detailed example is provided in the :ref:`tutorial_stft_sliding_win`
1214
+ section of the :ref:`user_guide`.
1215
+
1216
+ See Also
1217
+ --------
1218
+ k_min: The smallest possible signal index.
1219
+ k_max: First sample index after signal end not touched by a time slice.
1220
+ p_max: Index of first non-overlapping upper time slice.
1221
+ p_num: Number of time slices, i.e., `p_max` - `p_min`.
1222
+ p_range: Determine and validate slice index range.
1223
+ ShortTimeFFT: Class this property belongs to.
1224
+ """
1225
+ return self._pre_padding()[1]
1226
+
1227
+ @lru_cache(maxsize=256)
1228
+ def _post_padding(self, n: int) -> tuple[int, int]:
1229
+ """Largest signal index and slice index due to padding."""
1230
+ w2 = self.win.real**2 + self.win.imag**2
1231
+ # move window to the right until the overlap for t < t[n] vanishes:
1232
+ q1 = n // self.hop # last slice index with t[p1] <= t[n]
1233
+ k1 = q1 * self.hop - self.m_num_mid
1234
+ for q_, k_ in enumerate(range(k1, n+self.m_num, self.hop), start=q1):
1235
+ n_next = k_ + self.hop
1236
+ if n_next >= n or all(w2[:n-n_next] == 0):
1237
+ return k_ + self.m_num, q_ + 1
1238
+ raise RuntimeError("This is code line should not have been reached!")
1239
+ # If this case is reached, it probably means the last slice should be
1240
+ # returned, i.e.: return k1 + self.m_num - self.m_num_mid, q1 + 1
1241
+
1242
+ def k_max(self, n: int) -> int:
1243
+ """First sample index after signal end not touched by a time slice.
1244
+
1245
+ `k_max` - 1 is the largest sample index of the slice `p_max` for a
1246
+ given input signal of `n` samples.
1247
+ A detailed example is provided in the :ref:`tutorial_stft_sliding_win`
1248
+ section of the :ref:`user_guide`.
1249
+
1250
+ See Also
1251
+ --------
1252
+ k_min: The smallest possible signal index.
1253
+ p_min: The smallest possible slice index.
1254
+ p_max: Index of first non-overlapping upper time slice.
1255
+ p_num: Number of time slices, i.e., `p_max` - `p_min`.
1256
+ p_range: Determine and validate slice index range.
1257
+ ShortTimeFFT: Class this method belongs to.
1258
+ """
1259
+ return self._post_padding(n)[0]
1260
+
1261
+ def p_max(self, n: int) -> int:
1262
+ """Index of first non-overlapping upper time slice for `n` sample
1263
+ input.
1264
+
1265
+ Note that center point t[p_max] = (p_max(n)-1) * `delta_t` is typically
1266
+ larger than last time index t[n-1] == (`n`-1) * `T`. The upper border
1267
+ of samples indexes covered by the window slices is given by `k_max`.
1268
+ Furthermore, `p_max` does not denote the number of slices `p_num` since
1269
+ `p_min` is typically less than zero.
1270
+ A detailed example is provided in the :ref:`tutorial_stft_sliding_win`
1271
+ section of the :ref:`user_guide`.
1272
+
1273
+ See Also
1274
+ --------
1275
+ k_min: The smallest possible signal index.
1276
+ k_max: First sample index after signal end not touched by a time slice.
1277
+ p_min: The smallest possible slice index.
1278
+ p_num: Number of time slices, i.e., `p_max` - `p_min`.
1279
+ p_range: Determine and validate slice index range.
1280
+ ShortTimeFFT: Class this method belongs to.
1281
+ """
1282
+ return self._post_padding(n)[1]
1283
+
1284
+ def p_num(self, n: int) -> int:
1285
+ """Number of time slices for an input signal with `n` samples.
1286
+
1287
+ It is given by `p_num` = `p_max` - `p_min` with `p_min` typically
1288
+ being negative.
1289
+ A detailed example is provided in the :ref:`tutorial_stft_sliding_win`
1290
+ section of the :ref:`user_guide`.
1291
+
1292
+ See Also
1293
+ --------
1294
+ k_min: The smallest possible signal index.
1295
+ k_max: First sample index after signal end not touched by a time slice.
1296
+ lower_border_end: Where pre-padding effects end.
1297
+ p_min: The smallest possible slice index.
1298
+ p_max: Index of first non-overlapping upper time slice.
1299
+ p_range: Determine and validate slice index range.
1300
+ upper_border_begin: Where post-padding effects start.
1301
+ ShortTimeFFT: Class this method belongs to.
1302
+ """
1303
+ return self.p_max(n) - self.p_min
1304
+
1305
+ @property
1306
+ def lower_border_end(self) -> tuple[int, int]:
1307
+ """First signal index and first slice index unaffected by pre-padding.
1308
+
1309
+ Describes the point where the window does not stick out to the left
1310
+ of the signal domain.
1311
+ A detailed example is provided in the :ref:`tutorial_stft_sliding_win`
1312
+ section of the :ref:`user_guide`.
1313
+
1314
+ See Also
1315
+ --------
1316
+ k_min: The smallest possible signal index.
1317
+ k_max: First sample index after signal end not touched by a time slice.
1318
+ lower_border_end: Where pre-padding effects end.
1319
+ p_min: The smallest possible slice index.
1320
+ p_max: Index of first non-overlapping upper time slice.
1321
+ p_num: Number of time slices, i.e., `p_max` - `p_min`.
1322
+ p_range: Determine and validate slice index range.
1323
+ upper_border_begin: Where post-padding effects start.
1324
+ ShortTimeFFT: Class this property belongs to.
1325
+ """
1326
+ # not using @cache decorator due to MyPy limitations
1327
+ if self._lower_border_end is not None:
1328
+ return self._lower_border_end
1329
+
1330
+ # first non-zero element in self.win:
1331
+ m0 = np.flatnonzero(self.win.real**2 + self.win.imag**2)[0]
1332
+
1333
+ # move window to the right until does not stick out to the left:
1334
+ k0 = -self.m_num_mid + m0
1335
+ for q_, k_ in enumerate(range(k0, self.hop + 1, self.hop)):
1336
+ if k_ + self.hop >= 0: # next entry does not stick out anymore
1337
+ self._lower_border_end = (k_ + self.m_num, q_ + 1)
1338
+ return self._lower_border_end
1339
+ self._lower_border_end = (0, max(self.p_min, 0)) # ends at first slice
1340
+ return self._lower_border_end
1341
+
1342
+ @lru_cache(maxsize=256)
1343
+ def upper_border_begin(self, n: int) -> tuple[int, int]:
1344
+ """First signal index and first slice index affected by post-padding.
1345
+
1346
+ Describes the point where the window does begin stick out to the right
1347
+ of the signal domain.
1348
+ A detailed example is given :ref:`tutorial_stft_sliding_win` section
1349
+ of the :ref:`user_guide`.
1350
+
1351
+ See Also
1352
+ --------
1353
+ k_min: The smallest possible signal index.
1354
+ k_max: First sample index after signal end not touched by a time slice.
1355
+ lower_border_end: Where pre-padding effects end.
1356
+ p_min: The smallest possible slice index.
1357
+ p_max: Index of first non-overlapping upper time slice.
1358
+ p_num: Number of time slices, i.e., `p_max` - `p_min`.
1359
+ p_range: Determine and validate slice index range.
1360
+ ShortTimeFFT: Class this method belongs to.
1361
+ """
1362
+ w2 = self.win.real**2 + self.win.imag**2
1363
+ q2 = n // self.hop + 1 # first t[q] >= t[n]
1364
+ q1 = max((n-self.m_num) // self.hop - 1, -1)
1365
+ # move window left until does not stick out to the right:
1366
+ for q_ in range(q2, q1, -1):
1367
+ k_ = q_ * self.hop + (self.m_num - self.m_num_mid)
1368
+ if k_ < n or all(w2[n-k_:] == 0):
1369
+ return (q_ + 1) * self.hop - self.m_num_mid, q_ + 1
1370
+ return 0, 0 # border starts at first slice
1371
+
1372
+ @property
1373
+ def delta_t(self) -> float:
1374
+ """Time increment of STFT.
1375
+
1376
+ The time increment `delta_t` = `T` * `hop` represents the sample
1377
+ increment `hop` converted to time based on the sampling interval `T`.
1378
+
1379
+ See Also
1380
+ --------
1381
+ delta_f: Width of the frequency bins of the STFT.
1382
+ hop: Hop size in signal samples for sliding window.
1383
+ t: Times of STFT for an input signal with `n` samples.
1384
+ T: Sampling interval of input signal and window `win`.
1385
+ ShortTimeFFT: Class this property belongs to
1386
+ """
1387
+ return self.T * self.hop
1388
+
1389
+ def p_range(self, n: int, p0: int | None = None,
1390
+ p1: int | None = None) -> tuple[int, int]:
1391
+ """Determine and validate slice index range.
1392
+
1393
+ Parameters
1394
+ ----------
1395
+ n : int
1396
+ Number of samples of input signal, assuming t[0] = 0.
1397
+ p0 : int | None
1398
+ First slice index. If 0 then the first slice is centered at t = 0.
1399
+ If ``None`` then `p_min` is used. Note that p0 may be < 0 if
1400
+ slices are left of t = 0.
1401
+ p1 : int | None
1402
+ End of interval (last value is p1-1).
1403
+ If ``None`` then `p_max(n)` is used.
1404
+
1405
+
1406
+ Returns
1407
+ -------
1408
+ p0_ : int
1409
+ The fist slice index
1410
+ p1_ : int
1411
+ End of interval (last value is p1-1).
1412
+
1413
+ Notes
1414
+ -----
1415
+ A ``ValueError`` is raised if ``p_min <= p0 < p1 <= p_max(n)`` does not
1416
+ hold.
1417
+
1418
+ See Also
1419
+ --------
1420
+ k_min: The smallest possible signal index.
1421
+ k_max: First sample index after signal end not touched by a time slice.
1422
+ lower_border_end: Where pre-padding effects end.
1423
+ p_min: The smallest possible slice index.
1424
+ p_max: Index of first non-overlapping upper time slice.
1425
+ p_num: Number of time slices, i.e., `p_max` - `p_min`.
1426
+ upper_border_begin: Where post-padding effects start.
1427
+ ShortTimeFFT: Class this property belongs to.
1428
+ """
1429
+ p_max = self.p_max(n) # shorthand
1430
+ p0_ = self.p_min if p0 is None else p0
1431
+ p1_ = p_max if p1 is None else p1
1432
+ if not (self.p_min <= p0_ < p1_ <= p_max):
1433
+ raise ValueError(f"Invalid Parameter {p0=}, {p1=}, i.e., " +
1434
+ f"{self.p_min=} <= p0 < p1 <= {p_max=} " +
1435
+ f"does not hold for signal length {n=}!")
1436
+ return p0_, p1_
1437
+
1438
+ @lru_cache(maxsize=1)
1439
+ def t(self, n: int, p0: int | None = None, p1: int | None = None,
1440
+ k_offset: int = 0) -> np.ndarray:
1441
+ """Times of STFT for an input signal with `n` samples.
1442
+
1443
+ Returns a 1d array with times of the `~ShortTimeFFT.stft` values with
1444
+ the same parametrization. Note that the slices are
1445
+ ``delta_t = hop * T`` time units apart.
1446
+
1447
+ Parameters
1448
+ ----------
1449
+ n
1450
+ Number of sample of the input signal.
1451
+ p0
1452
+ The first element of the range of slices to calculate. If ``None``
1453
+ then it is set to :attr:`p_min`, which is the smallest possible
1454
+ slice.
1455
+ p1
1456
+ The end of the array. If ``None`` then `p_max(n)` is used.
1457
+ k_offset
1458
+ Index of first sample (t = 0) in `x`.
1459
+
1460
+
1461
+ See Also
1462
+ --------
1463
+ delta_t: Time increment of STFT (``hop*T``)
1464
+ hop: Time increment in signal samples for sliding window.
1465
+ nearest_k_p: Nearest sample index k_p for which t[k_p] == t[p] holds.
1466
+ T: Sampling interval of input signal and of the window (``1/fs``).
1467
+ fs: Sampling frequency (being ``1/T``)
1468
+ ShortTimeFFT: Class this method belongs to.
1469
+ """
1470
+ p0, p1 = self.p_range(n, p0, p1)
1471
+ return np.arange(p0, p1) * self.delta_t + k_offset * self.T
1472
+
1473
+ def nearest_k_p(self, k: int, left: bool = True) -> int:
1474
+ """Return nearest sample index k_p for which t[k_p] == t[p] holds.
1475
+
1476
+ The nearest next smaller time sample p (where t[p] is the center
1477
+ position of the window of the p-th slice) is p_k = k // `hop`.
1478
+ If `hop` is a divisor of `k` than `k` is returned.
1479
+ If `left` is set than p_k * `hop` is returned else (p_k+1) * `hop`.
1480
+
1481
+ This method can be used to slice an input signal into chunks for
1482
+ calculating the STFT and iSTFT incrementally.
1483
+
1484
+ See Also
1485
+ --------
1486
+ delta_t: Time increment of STFT (``hop*T``)
1487
+ hop: Time increment in signal samples for sliding window.
1488
+ T: Sampling interval of input signal and of the window (``1/fs``).
1489
+ fs: Sampling frequency (being ``1/T``)
1490
+ t: Times of STFT for an input signal with `n` samples.
1491
+ ShortTimeFFT: Class this method belongs to.
1492
+ """
1493
+ p_q, remainder = divmod(k, self.hop)
1494
+ if remainder == 0:
1495
+ return k
1496
+ return p_q * self.hop if left else (p_q + 1) * self.hop
1497
+
1498
+ @property
1499
+ def delta_f(self) -> float:
1500
+ """Width of the frequency bins of the STFT.
1501
+
1502
+ Return the frequency interval `delta_f` = 1 / (`mfft` * `T`).
1503
+
1504
+ See Also
1505
+ --------
1506
+ delta_t: Time increment of STFT.
1507
+ f_pts: Number of points along the frequency axis.
1508
+ f: Frequencies values of the STFT.
1509
+ mfft: Length of the input for FFT used.
1510
+ T: Sampling interval.
1511
+ t: Times of STFT for an input signal with `n` samples.
1512
+ ShortTimeFFT: Class this property belongs to.
1513
+ """
1514
+ return 1 / (self.mfft * self.T)
1515
+
1516
+ @property
1517
+ def f_pts(self) -> int:
1518
+ """Number of points along the frequency axis.
1519
+
1520
+ See Also
1521
+ --------
1522
+ delta_f: Width of the frequency bins of the STFT.
1523
+ f: Frequencies values of the STFT.
1524
+ mfft: Length of the input for FFT used.
1525
+ ShortTimeFFT: Class this property belongs to.
1526
+ """
1527
+ return self.mfft // 2 + 1 if self.onesided_fft else self.mfft
1528
+
1529
+ @property
1530
+ def onesided_fft(self) -> bool:
1531
+ """Return True if a one-sided FFT is used.
1532
+
1533
+ Returns ``True`` if `fft_mode` is either 'onesided' or 'onesided2X'.
1534
+
1535
+ See Also
1536
+ --------
1537
+ fft_mode: Utilized FFT ('twosided', 'centered', 'onesided' or
1538
+ 'onesided2X')
1539
+ ShortTimeFFT: Class this property belongs to.
1540
+ """
1541
+ return self.fft_mode in {'onesided', 'onesided2X'}
1542
+
1543
+ @property
1544
+ def f(self) -> np.ndarray:
1545
+ """Frequencies values of the STFT.
1546
+
1547
+ A 1d array of length `f_pts` with `delta_f` spaced entries is returned.
1548
+
1549
+ See Also
1550
+ --------
1551
+ delta_f: Width of the frequency bins of the STFT.
1552
+ f_pts: Number of points along the frequency axis.
1553
+ mfft: Length of the input for FFT used.
1554
+ ShortTimeFFT: Class this property belongs to.
1555
+ """
1556
+ if self.fft_mode in {'onesided', 'onesided2X'}:
1557
+ return fft_lib.rfftfreq(self.mfft, self.T)
1558
+ elif self.fft_mode == 'twosided':
1559
+ return fft_lib.fftfreq(self.mfft, self.T)
1560
+ elif self.fft_mode == 'centered':
1561
+ return fft_lib.fftshift(fft_lib.fftfreq(self.mfft, self.T))
1562
+ # This should never happen but makes the Linters happy:
1563
+ fft_modes = get_args(FFT_MODE_TYPE)
1564
+ raise RuntimeError(f"{self.fft_mode=} not in {fft_modes}!")
1565
+
1566
+ def _fft_func(self, x: np.ndarray) -> np.ndarray:
1567
+ """FFT based on the `fft_mode`, `mfft`, `scaling` and `phase_shift`
1568
+ attributes.
1569
+
1570
+ For multidimensional arrays the transformation is carried out on the
1571
+ last axis.
1572
+ """
1573
+ if self.phase_shift is not None:
1574
+ if x.shape[-1] < self.mfft: # zero pad if needed
1575
+ z_shape = list(x.shape)
1576
+ z_shape[-1] = self.mfft - x.shape[-1]
1577
+ x = np.hstack((x, np.zeros(z_shape, dtype=x.dtype)))
1578
+ p_s = (self.phase_shift + self.m_num_mid) % self.m_num
1579
+ x = np.roll(x, -p_s, axis=-1)
1580
+
1581
+ if self.fft_mode == 'twosided':
1582
+ return fft_lib.fft(x, n=self.mfft, axis=-1)
1583
+ if self.fft_mode == 'centered':
1584
+ return fft_lib.fftshift(fft_lib.fft(x, self.mfft, axis=-1), axes=-1)
1585
+ if self.fft_mode == 'onesided':
1586
+ return fft_lib.rfft(x, n=self.mfft, axis=-1)
1587
+ if self.fft_mode == 'onesided2X':
1588
+ X = fft_lib.rfft(x, n=self.mfft, axis=-1)
1589
+ # Either squared magnitude (psd) or magnitude is doubled:
1590
+ fac = np.sqrt(2) if self.scaling == 'psd' else 2
1591
+ # For even input length, the last entry is unpaired:
1592
+ X[..., 1: -1 if self.mfft % 2 == 0 else None] *= fac
1593
+ return X
1594
+ # This should never happen but makes the Linter happy:
1595
+ fft_modes = get_args(FFT_MODE_TYPE)
1596
+ raise RuntimeError(f"{self.fft_mode=} not in {fft_modes}!")
1597
+
1598
+ def _ifft_func(self, X: np.ndarray) -> np.ndarray:
1599
+ """Inverse to `_fft_func`.
1600
+
1601
+ Returned is an array of length `m_num`. If the FFT is `onesided`
1602
+ then a float array is returned else a complex array is returned.
1603
+ For multidimensional arrays the transformation is carried out on the
1604
+ last axis.
1605
+ """
1606
+ if self.fft_mode == 'twosided':
1607
+ x = fft_lib.ifft(X, n=self.mfft, axis=-1)
1608
+ elif self.fft_mode == 'centered':
1609
+ x = fft_lib.ifft(fft_lib.ifftshift(X, axes=-1), n=self.mfft, axis=-1)
1610
+ elif self.fft_mode == 'onesided':
1611
+ x = fft_lib.irfft(X, n=self.mfft, axis=-1)
1612
+ elif self.fft_mode == 'onesided2X':
1613
+ Xc = X.copy() # we do not want to modify function parameters
1614
+ fac = np.sqrt(2) if self.scaling == 'psd' else 2
1615
+ # For even length X the last value is not paired with a negative
1616
+ # value on the two-sided FFT:
1617
+ q1 = -1 if self.mfft % 2 == 0 else None
1618
+ Xc[..., 1:q1] /= fac
1619
+ x = fft_lib.irfft(Xc, n=self.mfft, axis=-1)
1620
+ else: # This should never happen but makes the Linter happy:
1621
+ error_str = f"{self.fft_mode=} not in {get_args(FFT_MODE_TYPE)}!"
1622
+ raise RuntimeError(error_str)
1623
+
1624
+ if self.phase_shift is None:
1625
+ return x[..., :self.m_num]
1626
+ p_s = (self.phase_shift + self.m_num_mid) % self.m_num
1627
+ return np.roll(x, p_s, axis=-1)[..., :self.m_num]
1628
+
1629
+ def extent(self, n: int, axes_seq: Literal['tf', 'ft'] = 'tf',
1630
+ center_bins: bool = False) -> tuple[float, float, float, float]:
1631
+ """Return minimum and maximum values time-frequency values.
1632
+
1633
+ A tuple with four floats ``(t0, t1, f0, f1)`` for 'tf' and
1634
+ ``(f0, f1, t0, t1)`` for 'ft' is returned describing the corners
1635
+ of the time-frequency domain of the `~ShortTimeFFT.stft`.
1636
+ That tuple can be passed to `matplotlib.pyplot.imshow` as a parameter
1637
+ with the same name.
1638
+
1639
+ Parameters
1640
+ ----------
1641
+ n : int
1642
+ Number of samples in input signal.
1643
+ axes_seq : {'tf', 'ft'}
1644
+ Return time extent first and then frequency extent or vice-versa.
1645
+ center_bins: bool
1646
+ If set (default ``False``), the values of the time slots and
1647
+ frequency bins are moved from the side the middle. This is useful,
1648
+ when plotting the `~ShortTimeFFT.stft` values as step functions,
1649
+ i.e., with no interpolation.
1650
+
1651
+ See Also
1652
+ --------
1653
+ :func:`matplotlib.pyplot.imshow`: Display data as an image.
1654
+ :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to.
1655
+
1656
+ Examples
1657
+ --------
1658
+ The following two plots illustrate the effect of the parameter `center_bins`:
1659
+ The grid lines represent the three time and the four frequency values of the
1660
+ STFT.
1661
+ The left plot, where ``(t0, t1, f0, f1) = (0, 3, 0, 4)`` is passed as parameter
1662
+ ``extent`` to `~matplotlib.pyplot.imshow`, shows the standard behavior of the
1663
+ time and frequency values being at the lower edge of the corrsponding bin.
1664
+ The right plot, with ``(t0, t1, f0, f1) = (-0.5, 2.5, -0.5, 3.5)``, shows that
1665
+ the bins are centered over the respective values when passing
1666
+ ``center_bins=True``.
1667
+
1668
+ >>> import matplotlib.pyplot as plt
1669
+ >>> import numpy as np
1670
+ >>> from scipy.signal import ShortTimeFFT
1671
+ ...
1672
+ >>> n, m = 12, 6
1673
+ >>> SFT = ShortTimeFFT.from_window('hann', fs=m, nperseg=m, noverlap=0)
1674
+ >>> Sxx = SFT.stft(np.cos(np.arange(n))) # produces a colorful plot
1675
+ ...
1676
+ >>> fig, axx = plt.subplots(1, 2, tight_layout=True, figsize=(6., 4.))
1677
+ >>> for ax_, center_bins in zip(axx, (False, True)):
1678
+ ... ax_.imshow(abs(Sxx), origin='lower', interpolation=None, aspect='equal',
1679
+ ... cmap='viridis', extent=SFT.extent(n, 'tf', center_bins))
1680
+ ... ax_.set_title(f"{center_bins=}")
1681
+ ... ax_.set_xlabel(f"Time ({SFT.p_num(n)} points, Δt={SFT.delta_t})")
1682
+ ... ax_.set_ylabel(f"Frequency ({SFT.f_pts} points, Δf={SFT.delta_f})")
1683
+ ... ax_.set_xticks(SFT.t(n)) # vertical grid line are timestamps
1684
+ ... ax_.set_yticks(SFT.f) # horizontal grid line are frequency values
1685
+ ... ax_.grid(True)
1686
+ >>> plt.show()
1687
+
1688
+ Note that the step-like behavior with the constant colors is caused by passing
1689
+ ``interpolation=None`` to `~matplotlib.pyplot.imshow`.
1690
+ """
1691
+ if axes_seq not in ('tf', 'ft'):
1692
+ raise ValueError(f"Parameter {axes_seq=} not in ['tf', 'ft']!")
1693
+
1694
+ if self.onesided_fft:
1695
+ q0, q1 = 0, self.f_pts
1696
+ elif self.fft_mode == 'centered':
1697
+ q0 = -(self.mfft // 2)
1698
+ q1 = self.mfft // 2 if self.mfft % 2 == 0 else self.mfft // 2 + 1
1699
+ else:
1700
+ raise ValueError(f"Attribute fft_mode={self.fft_mode} must be " +
1701
+ "in ['centered', 'onesided', 'onesided2X']")
1702
+
1703
+ p0, p1 = self.p_min, self.p_max(n) # shorthand
1704
+ if center_bins:
1705
+ t0, t1 = self.delta_t * (p0 - 0.5), self.delta_t * (p1 - 0.5)
1706
+ f0, f1 = self.delta_f * (q0 - 0.5), self.delta_f * (q1 - 0.5)
1707
+ else:
1708
+ t0, t1 = self.delta_t * p0, self.delta_t * p1
1709
+ f0, f1 = self.delta_f * q0, self.delta_f * q1
1710
+ return (t0, t1, f0, f1) if axes_seq == 'tf' else (f0, f1, t0, t1)
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_signaltools.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_sigtools.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (99.6 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spectral_py.py ADDED
@@ -0,0 +1,2291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tools for spectral analysis.
2
+ """
3
+ import numpy as np
4
+ import numpy.typing as npt
5
+ from scipy import fft as sp_fft
6
+ from . import _signaltools
7
+ from .windows import get_window
8
+ from ._arraytools import const_ext, even_ext, odd_ext, zero_ext
9
+ import warnings
10
+ from typing import Literal
11
+
12
+
13
+ __all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
14
+ 'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA']
15
+
16
+
17
+ def lombscargle(
18
+ x: npt.ArrayLike,
19
+ y: npt.ArrayLike,
20
+ freqs: npt.ArrayLike,
21
+ precenter: bool = False,
22
+ normalize: bool | Literal["power", "normalize", "amplitude"] = False,
23
+ *,
24
+ weights: npt.NDArray | None = None,
25
+ floating_mean: bool = False,
26
+ ) -> npt.NDArray:
27
+ """
28
+ Compute the generalized Lomb-Scargle periodogram.
29
+
30
+ The Lomb-Scargle periodogram was developed by Lomb [1]_ and further
31
+ extended by Scargle [2]_ to find, and test the significance of weak
32
+ periodic signals with uneven temporal sampling. The algorithm used
33
+ here is based on a weighted least-squares fit of the form
34
+ ``y(ω) = a*cos(ω*x) + b*sin(ω*x) + c``, where the fit is calculated for
35
+ each frequency independently. This algorithm was developed by Zechmeister
36
+ and Kürster which improves the Lomb-Scargle periodogram by enabling
37
+ the weighting of individual samples and calculating an unknown y offset
38
+ (also called a "floating-mean" model) [3]_. For more details, and practical
39
+ considerations, see the excellent reference on the Lomb-Scargle periodogram [4]_.
40
+
41
+ When *normalize* is False (or "power") (default) the computed periodogram
42
+ is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic
43
+ signal with amplitude A for sufficiently large N. Where N is the length of x or y.
44
+
45
+ When *normalize* is True (or "normalize") the computed periodogram is normalized
46
+ by the residuals of the data around a constant reference model (at zero).
47
+
48
+ When *normalize* is "amplitude" the computed periodogram is the complex
49
+ representation of the amplitude and phase.
50
+
51
+ Input arrays should be 1-D of a real floating data type, which are converted into
52
+ float64 arrays before processing.
53
+
54
+ Parameters
55
+ ----------
56
+ x : array_like
57
+ Sample times.
58
+ y : array_like
59
+ Measurement values. Values are assumed to have a baseline of ``y = 0``. If
60
+ there is a possibility of a y offset, it is recommended to set `floating_mean`
61
+ to True.
62
+ freqs : array_like
63
+ Angular frequencies (e.g., having unit rad/s=2π/s for `x` having unit s) for
64
+ output periodogram. Frequencies are normally >= 0, as any peak at ``-freq`` will
65
+ also exist at ``+freq``.
66
+ precenter : bool, optional
67
+ Pre-center measurement values by subtracting the mean, if True. This is
68
+ a legacy parameter and unnecessary if `floating_mean` is True.
69
+ normalize : bool | str, optional
70
+ Compute normalized or complex (amplitude + phase) periodogram.
71
+ Valid options are: ``False``/``"power"``, ``True``/``"normalize"``, or
72
+ ``"amplitude"``.
73
+ weights : array_like, optional
74
+ Weights for each sample. Weights must be nonnegative.
75
+ floating_mean : bool, optional
76
+ Determines a y offset for each frequency independently, if True.
77
+ Else the y offset is assumed to be `0`.
78
+
79
+ Returns
80
+ -------
81
+ pgram : array_like
82
+ Lomb-Scargle periodogram.
83
+
84
+ Raises
85
+ ------
86
+ ValueError
87
+ If any of the input arrays x, y, freqs, or weights are not 1D, or if any are
88
+ zero length. Or, if the input arrays x, y, and weights do not have the same
89
+ shape as each other.
90
+ ValueError
91
+ If any weight is < 0, or the sum of the weights is <= 0.
92
+ ValueError
93
+ If the normalize parameter is not one of the allowed options.
94
+
95
+ See Also
96
+ --------
97
+ periodogram: Power spectral density using a periodogram
98
+ welch: Power spectral density by Welch's method
99
+ csd: Cross spectral density by Welch's method
100
+
101
+ Notes
102
+ -----
103
+ The algorithm used will not automatically account for any unknown y offset, unless
104
+ floating_mean is True. Therefore, for most use cases, if there is a possibility of
105
+ a y offset, it is recommended to set floating_mean to True. If precenter is True,
106
+ it performs the operation ``y -= y.mean()``. However, precenter is a legacy
107
+ parameter, and unnecessary when floating_mean is True. Furthermore, the mean
108
+ removed by precenter does not account for sample weights, nor will it correct for
109
+ any bias due to consistently missing observations at peaks and/or troughs. When the
110
+ normalize parameter is "amplitude", for any frequency in freqs that is below
111
+ ``(2*pi)/(x.max() - x.min())``, the predicted amplitude will tend towards infinity.
112
+ The concept of a "Nyquist frequency" limit (see Nyquist-Shannon sampling theorem)
113
+ is not generally applicable to unevenly sampled data. Therefore, with unevenly
114
+ sampled data, valid frequencies in freqs can often be much higher than expected.
115
+
116
+ References
117
+ ----------
118
+ .. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced
119
+ data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976
120
+ :doi:`10.1007/bf00648343`
121
+
122
+ .. [2] J.D. Scargle "Studies in astronomical time series analysis. II -
123
+ Statistical aspects of spectral analysis of unevenly spaced data",
124
+ The Astrophysical Journal, vol 263, pp. 835-853, 1982
125
+ :doi:`10.1086/160554`
126
+
127
+ .. [3] M. Zechmeister and M. Kürster, "The generalised Lomb-Scargle periodogram.
128
+ A new formalism for the floating-mean and Keplerian periodograms,"
129
+ Astronomy and Astrophysics, vol. 496, pp. 577-584, 2009
130
+ :doi:`10.1051/0004-6361:200811296`
131
+
132
+ .. [4] J.T. VanderPlas, "Understanding the Lomb-Scargle Periodogram,"
133
+ The Astrophysical Journal Supplement Series, vol. 236, no. 1, p. 16,
134
+ May 2018
135
+ :doi:`10.3847/1538-4365/aab766`
136
+
137
+
138
+ Examples
139
+ --------
140
+ >>> import numpy as np
141
+ >>> rng = np.random.default_rng()
142
+
143
+ First define some input parameters for the signal:
144
+
145
+ >>> A = 2. # amplitude
146
+ >>> c = 2. # offset
147
+ >>> w0 = 1. # rad/sec
148
+ >>> nin = 150
149
+ >>> nout = 1002
150
+
151
+ Randomly generate sample times:
152
+
153
+ >>> x = rng.uniform(0, 10*np.pi, nin)
154
+
155
+ Plot a sine wave for the selected times:
156
+
157
+ >>> y = A * np.cos(w0*x) + c
158
+
159
+ Define the array of frequencies for which to compute the periodogram:
160
+
161
+ >>> w = np.linspace(0.25, 10, nout)
162
+
163
+ Calculate Lomb-Scargle periodogram for each of the normalize options:
164
+
165
+ >>> from scipy.signal import lombscargle
166
+ >>> pgram_power = lombscargle(x, y, w, normalize=False)
167
+ >>> pgram_norm = lombscargle(x, y, w, normalize=True)
168
+ >>> pgram_amp = lombscargle(x, y, w, normalize='amplitude')
169
+ ...
170
+ >>> pgram_power_f = lombscargle(x, y, w, normalize=False, floating_mean=True)
171
+ >>> pgram_norm_f = lombscargle(x, y, w, normalize=True, floating_mean=True)
172
+ >>> pgram_amp_f = lombscargle(x, y, w, normalize='amplitude', floating_mean=True)
173
+
174
+ Now make a plot of the input data:
175
+
176
+ >>> import matplotlib.pyplot as plt
177
+ >>> fig, (ax_t, ax_p, ax_n, ax_a) = plt.subplots(4, 1, figsize=(5, 6))
178
+ >>> ax_t.plot(x, y, 'b+')
179
+ >>> ax_t.set_xlabel('Time [s]')
180
+ >>> ax_t.set_ylabel('Amplitude')
181
+
182
+ Then plot the periodogram for each of the normalize options, as well as with and
183
+ without floating_mean=True:
184
+
185
+ >>> ax_p.plot(w, pgram_power, label='default')
186
+ >>> ax_p.plot(w, pgram_power_f, label='floating_mean=True')
187
+ >>> ax_p.set_xlabel('Angular frequency [rad/s]')
188
+ >>> ax_p.set_ylabel('Power')
189
+ >>> ax_p.legend(prop={'size': 7})
190
+ ...
191
+ >>> ax_n.plot(w, pgram_norm, label='default')
192
+ >>> ax_n.plot(w, pgram_norm_f, label='floating_mean=True')
193
+ >>> ax_n.set_xlabel('Angular frequency [rad/s]')
194
+ >>> ax_n.set_ylabel('Normalized')
195
+ >>> ax_n.legend(prop={'size': 7})
196
+ ...
197
+ >>> ax_a.plot(w, np.abs(pgram_amp), label='default')
198
+ >>> ax_a.plot(w, np.abs(pgram_amp_f), label='floating_mean=True')
199
+ >>> ax_a.set_xlabel('Angular frequency [rad/s]')
200
+ >>> ax_a.set_ylabel('Amplitude')
201
+ >>> ax_a.legend(prop={'size': 7})
202
+ ...
203
+ >>> plt.tight_layout()
204
+ >>> plt.show()
205
+
206
+ """
207
+
208
+ # if no weights are provided, assume all data points are equally important
209
+ if weights is None:
210
+ weights = np.ones_like(y, dtype=np.float64)
211
+ else:
212
+ # if provided, make sure weights is an array and cast to float64
213
+ weights = np.asarray(weights, dtype=np.float64)
214
+
215
+ # make sure other inputs are arrays and cast to float64
216
+ # done before validation, in case they were not arrays
217
+ x = np.asarray(x, dtype=np.float64)
218
+ y = np.asarray(y, dtype=np.float64)
219
+ freqs = np.asarray(freqs, dtype=np.float64)
220
+
221
+ # validate input shapes
222
+ if not (x.ndim == 1 and x.size > 0 and x.shape == y.shape == weights.shape):
223
+ raise ValueError("Parameters x, y, weights must be 1-D arrays of "
224
+ "equal non-zero length!")
225
+ if not (freqs.ndim == 1 and freqs.size > 0):
226
+ raise ValueError("Parameter freqs must be a 1-D array of non-zero length!")
227
+
228
+ # validate weights
229
+ if not (np.all(weights >= 0) and np.sum(weights) > 0):
230
+ raise ValueError("Parameter weights must have only non-negative entries "
231
+ "which sum to a positive value!")
232
+
233
+ # validate normalize parameter
234
+ if isinstance(normalize, bool):
235
+ # if bool, convert to str literal
236
+ normalize = "normalize" if normalize else "power"
237
+
238
+ if normalize not in ["power", "normalize", "amplitude"]:
239
+ raise ValueError(
240
+ "Normalize must be: False (or 'power'), True (or 'normalize'), "
241
+ "or 'amplitude'."
242
+ )
243
+
244
+ # weight vector must sum to 1
245
+ weights *= 1.0 / weights.sum()
246
+
247
+ # if requested, perform precenter
248
+ if precenter:
249
+ y -= y.mean()
250
+
251
+ # transform arrays
252
+ # row vector
253
+ freqs = freqs.reshape(1, -1)
254
+ # column vectors
255
+ x = x.reshape(-1, 1)
256
+ y = y.reshape(-1, 1)
257
+ weights = weights.reshape(-1, 1)
258
+
259
+ # store frequent intermediates
260
+ weights_y = weights * y
261
+ freqst = freqs * x
262
+ coswt = np.cos(freqst)
263
+ sinwt = np.sin(freqst)
264
+
265
+ Y = np.dot(weights.T, y) # Eq. 7
266
+ CC = np.dot(weights.T, coswt * coswt) # Eq. 13
267
+ SS = 1.0 - CC # trig identity: S^2 = 1 - C^2 Eq.14
268
+ CS = np.dot(weights.T, coswt * sinwt) # Eq. 15
269
+
270
+ if floating_mean:
271
+ C = np.dot(weights.T, coswt) # Eq. 8
272
+ S = np.dot(weights.T, sinwt) # Eq. 9
273
+ CC -= C * C # Eq. 13
274
+ SS -= S * S # Eq. 14
275
+ CS -= C * S # Eq. 15
276
+
277
+ # calculate tau (phase offset to eliminate CS variable)
278
+ tau = 0.5 * np.arctan2(2.0 * CS, CC - SS) # Eq. 19
279
+ freqst_tau = freqst - tau
280
+
281
+ # coswt and sinwt are now offset by tau, which eliminates CS
282
+ coswt_tau = np.cos(freqst_tau)
283
+ sinwt_tau = np.sin(freqst_tau)
284
+
285
+ YC = np.dot(weights_y.T, coswt_tau) # Eq. 11
286
+ YS = np.dot(weights_y.T, sinwt_tau) # Eq. 12
287
+ CC = np.dot(weights.T, coswt_tau * coswt_tau) # Eq. 13, CC range is [0.5, 1.0]
288
+ SS = 1.0 - CC # trig identity: S^2 = 1 - C^2 Eq. 14, SS range is [0.0, 0.5]
289
+
290
+ if floating_mean:
291
+ C = np.dot(weights.T, coswt_tau) # Eq. 8
292
+ S = np.dot(weights.T, sinwt_tau) # Eq. 9
293
+ YC -= Y * C # Eq. 11
294
+ YS -= Y * S # Eq. 12
295
+ CC -= C * C # Eq. 13, CC range is now [0.0, 1.0]
296
+ SS -= S * S # Eq. 14, SS range is now [0.0, 0.5]
297
+
298
+ # to prevent division by zero errors with a and b, as well as correcting for
299
+ # numerical precision errors that lead to CC or SS being approximately -0.0,
300
+ # make sure CC and SS are both > 0
301
+ epsneg = np.finfo(dtype=y.dtype).epsneg
302
+ CC[CC < epsneg] = epsneg
303
+ SS[SS < epsneg] = epsneg
304
+
305
+ # calculate a and b
306
+ # where: y(w) = a*cos(w) + b*sin(w) + c
307
+ a = YC / CC # Eq. A.4 and 6, eliminating CS
308
+ b = YS / SS # Eq. A.4 and 6, eliminating CS
309
+ # c = Y - a * C - b * S
310
+
311
+ # store final value as power in A^2 (i.e., (y units)^2)
312
+ pgram = 2.0 * (a * YC + b * YS)
313
+
314
+ # squeeze back to a vector
315
+ pgram = np.squeeze(pgram)
316
+
317
+ if normalize == "power": # (default)
318
+ # return the legacy power units ((A**2) * N/4)
319
+
320
+ pgram *= float(x.shape[0]) / 4.0
321
+
322
+ elif normalize == "normalize":
323
+ # return the normalized power (power at current frequency wrt the entire signal)
324
+ # range will be [0, 1]
325
+
326
+ YY = np.dot(weights_y.T, y) # Eq. 10
327
+ if floating_mean:
328
+ YY -= Y * Y # Eq. 10
329
+
330
+ pgram *= 0.5 / np.squeeze(YY) # Eq. 20
331
+
332
+ else: # normalize == "amplitude":
333
+ # return the complex representation of the best-fit amplitude and phase
334
+
335
+ # squeeze back to vectors
336
+ a = np.squeeze(a)
337
+ b = np.squeeze(b)
338
+ tau = np.squeeze(tau)
339
+
340
+ # calculate the complex representation, and correct for tau rotation
341
+ pgram = (a + 1j * b) * np.exp(1j * tau)
342
+
343
+ return pgram
344
+
345
+
346
+ def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant',
347
+ return_onesided=True, scaling='density', axis=-1):
348
+ """
349
+ Estimate power spectral density using a periodogram.
350
+
351
+ Parameters
352
+ ----------
353
+ x : array_like
354
+ Time series of measurement values
355
+ fs : float, optional
356
+ Sampling frequency of the `x` time series. Defaults to 1.0.
357
+ window : str or tuple or array_like, optional
358
+ Desired window to use. If `window` is a string or tuple, it is
359
+ passed to `get_window` to generate the window values, which are
360
+ DFT-even by default. See `get_window` for a list of windows and
361
+ required parameters. If `window` is array_like it will be used
362
+ directly as the window and its length must be equal to the length
363
+ of the axis over which the periodogram is computed. Defaults
364
+ to 'boxcar'.
365
+ nfft : int, optional
366
+ Length of the FFT used. If `None` the length of `x` will be
367
+ used.
368
+ detrend : str or function or `False`, optional
369
+ Specifies how to detrend each segment. If `detrend` is a
370
+ string, it is passed as the `type` argument to the `detrend`
371
+ function. If it is a function, it takes a segment and returns a
372
+ detrended segment. If `detrend` is `False`, no detrending is
373
+ done. Defaults to 'constant'.
374
+ return_onesided : bool, optional
375
+ If `True`, return a one-sided spectrum for real data. If
376
+ `False` return a two-sided spectrum. Defaults to `True`, but for
377
+ complex data, a two-sided spectrum is always returned.
378
+ scaling : { 'density', 'spectrum' }, optional
379
+ Selects between computing the power spectral density ('density')
380
+ where `Pxx` has units of V**2/Hz and computing the squared magnitude
381
+ spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
382
+ is measured in V and `fs` is measured in Hz. Defaults to
383
+ 'density'
384
+ axis : int, optional
385
+ Axis along which the periodogram is computed; the default is
386
+ over the last axis (i.e. ``axis=-1``).
387
+
388
+ Returns
389
+ -------
390
+ f : ndarray
391
+ Array of sample frequencies.
392
+ Pxx : ndarray
393
+ Power spectral density or power spectrum of `x`.
394
+
395
+ See Also
396
+ --------
397
+ welch: Estimate power spectral density using Welch's method
398
+ lombscargle: Lomb-Scargle periodogram for unevenly sampled data
399
+
400
+ Notes
401
+ -----
402
+ Consult the :ref:`tutorial_SpectralAnalysis` section of the :ref:`user_guide`
403
+ for a discussion of the scalings of the power spectral density and
404
+ the magnitude (squared) spectrum.
405
+
406
+ .. versionadded:: 0.12.0
407
+
408
+ Examples
409
+ --------
410
+ >>> import numpy as np
411
+ >>> from scipy import signal
412
+ >>> import matplotlib.pyplot as plt
413
+ >>> rng = np.random.default_rng()
414
+
415
+ Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
416
+ 0.001 V**2/Hz of white noise sampled at 10 kHz.
417
+
418
+ >>> fs = 10e3
419
+ >>> N = 1e5
420
+ >>> amp = 2*np.sqrt(2)
421
+ >>> freq = 1234.0
422
+ >>> noise_power = 0.001 * fs / 2
423
+ >>> time = np.arange(N) / fs
424
+ >>> x = amp*np.sin(2*np.pi*freq*time)
425
+ >>> x += rng.normal(scale=np.sqrt(noise_power), size=time.shape)
426
+
427
+ Compute and plot the power spectral density.
428
+
429
+ >>> f, Pxx_den = signal.periodogram(x, fs)
430
+ >>> plt.semilogy(f, Pxx_den)
431
+ >>> plt.ylim([1e-7, 1e2])
432
+ >>> plt.xlabel('frequency [Hz]')
433
+ >>> plt.ylabel('PSD [V**2/Hz]')
434
+ >>> plt.show()
435
+
436
+ If we average the last half of the spectral density, to exclude the
437
+ peak, we can recover the noise power on the signal.
438
+
439
+ >>> np.mean(Pxx_den[25000:])
440
+ 0.000985320699252543
441
+
442
+ Now compute and plot the power spectrum.
443
+
444
+ >>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
445
+ >>> plt.figure()
446
+ >>> plt.semilogy(f, np.sqrt(Pxx_spec))
447
+ >>> plt.ylim([1e-4, 1e1])
448
+ >>> plt.xlabel('frequency [Hz]')
449
+ >>> plt.ylabel('Linear spectrum [V RMS]')
450
+ >>> plt.show()
451
+
452
+ The peak height in the power spectrum is an estimate of the RMS
453
+ amplitude.
454
+
455
+ >>> np.sqrt(Pxx_spec.max())
456
+ 2.0077340678640727
457
+
458
+ """
459
+ x = np.asarray(x)
460
+
461
+ if x.size == 0:
462
+ return np.empty(x.shape), np.empty(x.shape)
463
+
464
+ if window is None:
465
+ window = 'boxcar'
466
+
467
+ if nfft is None:
468
+ nperseg = x.shape[axis]
469
+ elif nfft == x.shape[axis]:
470
+ nperseg = nfft
471
+ elif nfft > x.shape[axis]:
472
+ nperseg = x.shape[axis]
473
+ elif nfft < x.shape[axis]:
474
+ s = [np.s_[:]]*len(x.shape)
475
+ s[axis] = np.s_[:nfft]
476
+ x = x[tuple(s)]
477
+ nperseg = nfft
478
+ nfft = None
479
+
480
+ if hasattr(window, 'size'):
481
+ if window.size != nperseg:
482
+ raise ValueError('the size of the window must be the same size '
483
+ 'of the input on the specified axis')
484
+
485
+ return welch(x, fs=fs, window=window, nperseg=nperseg, noverlap=0,
486
+ nfft=nfft, detrend=detrend, return_onesided=return_onesided,
487
+ scaling=scaling, axis=axis)
488
+
489
+
490
+ def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
491
+ detrend='constant', return_onesided=True, scaling='density',
492
+ axis=-1, average='mean'):
493
+ r"""
494
+ Estimate power spectral density using Welch's method.
495
+
496
+ Welch's method [1]_ computes an estimate of the power spectral
497
+ density by dividing the data into overlapping segments, computing a
498
+ modified periodogram for each segment and averaging the
499
+ periodograms.
500
+
501
+ Parameters
502
+ ----------
503
+ x : array_like
504
+ Time series of measurement values
505
+ fs : float, optional
506
+ Sampling frequency of the `x` time series. Defaults to 1.0.
507
+ window : str or tuple or array_like, optional
508
+ Desired window to use. If `window` is a string or tuple, it is
509
+ passed to `get_window` to generate the window values, which are
510
+ DFT-even by default. See `get_window` for a list of windows and
511
+ required parameters. If `window` is array_like it will be used
512
+ directly as the window and its length must be nperseg. Defaults
513
+ to a Hann window.
514
+ nperseg : int, optional
515
+ Length of each segment. Defaults to None, but if window is str or
516
+ tuple, is set to 256, and if window is array_like, is set to the
517
+ length of the window.
518
+ noverlap : int, optional
519
+ Number of points to overlap between segments. If `None`,
520
+ ``noverlap = nperseg // 2``. Defaults to `None`.
521
+ nfft : int, optional
522
+ Length of the FFT used, if a zero padded FFT is desired. If
523
+ `None`, the FFT length is `nperseg`. Defaults to `None`.
524
+ detrend : str or function or `False`, optional
525
+ Specifies how to detrend each segment. If `detrend` is a
526
+ string, it is passed as the `type` argument to the `detrend`
527
+ function. If it is a function, it takes a segment and returns a
528
+ detrended segment. If `detrend` is `False`, no detrending is
529
+ done. Defaults to 'constant'.
530
+ return_onesided : bool, optional
531
+ If `True`, return a one-sided spectrum for real data. If
532
+ `False` return a two-sided spectrum. Defaults to `True`, but for
533
+ complex data, a two-sided spectrum is always returned.
534
+ scaling : { 'density', 'spectrum' }, optional
535
+ Selects between computing the power spectral density ('density')
536
+ where `Pxx` has units of V**2/Hz and computing the squared magnitude
537
+ spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
538
+ is measured in V and `fs` is measured in Hz. Defaults to
539
+ 'density'
540
+ axis : int, optional
541
+ Axis along which the periodogram is computed; the default is
542
+ over the last axis (i.e. ``axis=-1``).
543
+ average : { 'mean', 'median' }, optional
544
+ Method to use when averaging periodograms. Defaults to 'mean'.
545
+
546
+ .. versionadded:: 1.2.0
547
+
548
+ Returns
549
+ -------
550
+ f : ndarray
551
+ Array of sample frequencies.
552
+ Pxx : ndarray
553
+ Power spectral density or power spectrum of x.
554
+
555
+ See Also
556
+ --------
557
+ periodogram: Simple, optionally modified periodogram
558
+ lombscargle: Lomb-Scargle periodogram for unevenly sampled data
559
+
560
+ Notes
561
+ -----
562
+ An appropriate amount of overlap will depend on the choice of window
563
+ and on your requirements. For the default Hann window an overlap of
564
+ 50% is a reasonable trade off between accurately estimating the
565
+ signal power, while not over counting any of the data. Narrower
566
+ windows may require a larger overlap.
567
+
568
+ If `noverlap` is 0, this method is equivalent to Bartlett's method
569
+ [2]_.
570
+
571
+ Consult the :ref:`tutorial_SpectralAnalysis` section of the :ref:`user_guide`
572
+ for a discussion of the scalings of the power spectral density and
573
+ the (squared) magnitude spectrum.
574
+
575
+ .. versionadded:: 0.12.0
576
+
577
+ References
578
+ ----------
579
+ .. [1] P. Welch, "The use of the fast Fourier transform for the
580
+ estimation of power spectra: A method based on time averaging
581
+ over short, modified periodograms", IEEE Trans. Audio
582
+ Electroacoust. vol. 15, pp. 70-73, 1967.
583
+ .. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
584
+ Biometrika, vol. 37, pp. 1-16, 1950.
585
+
586
+ Examples
587
+ --------
588
+ >>> import numpy as np
589
+ >>> from scipy import signal
590
+ >>> import matplotlib.pyplot as plt
591
+ >>> rng = np.random.default_rng()
592
+
593
+ Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
594
+ 0.001 V**2/Hz of white noise sampled at 10 kHz.
595
+
596
+ >>> fs = 10e3
597
+ >>> N = 1e5
598
+ >>> amp = 2*np.sqrt(2)
599
+ >>> freq = 1234.0
600
+ >>> noise_power = 0.001 * fs / 2
601
+ >>> time = np.arange(N) / fs
602
+ >>> x = amp*np.sin(2*np.pi*freq*time)
603
+ >>> x += rng.normal(scale=np.sqrt(noise_power), size=time.shape)
604
+
605
+ Compute and plot the power spectral density.
606
+
607
+ >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
608
+ >>> plt.semilogy(f, Pxx_den)
609
+ >>> plt.ylim([0.5e-3, 1])
610
+ >>> plt.xlabel('frequency [Hz]')
611
+ >>> plt.ylabel('PSD [V**2/Hz]')
612
+ >>> plt.show()
613
+
614
+ If we average the last half of the spectral density, to exclude the
615
+ peak, we can recover the noise power on the signal.
616
+
617
+ >>> np.mean(Pxx_den[256:])
618
+ 0.0009924865443739191
619
+
620
+ Now compute and plot the power spectrum.
621
+
622
+ >>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
623
+ >>> plt.figure()
624
+ >>> plt.semilogy(f, np.sqrt(Pxx_spec))
625
+ >>> plt.xlabel('frequency [Hz]')
626
+ >>> plt.ylabel('Linear spectrum [V RMS]')
627
+ >>> plt.show()
628
+
629
+ The peak height in the power spectrum is an estimate of the RMS
630
+ amplitude.
631
+
632
+ >>> np.sqrt(Pxx_spec.max())
633
+ 2.0077340678640727
634
+
635
+ If we now introduce a discontinuity in the signal, by increasing the
636
+ amplitude of a small portion of the signal by 50, we can see the
637
+ corruption of the mean average power spectral density, but using a
638
+ median average better estimates the normal behaviour.
639
+
640
+ >>> x[int(N//2):int(N//2)+10] *= 50.
641
+ >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
642
+ >>> f_med, Pxx_den_med = signal.welch(x, fs, nperseg=1024, average='median')
643
+ >>> plt.semilogy(f, Pxx_den, label='mean')
644
+ >>> plt.semilogy(f_med, Pxx_den_med, label='median')
645
+ >>> plt.ylim([0.5e-3, 1])
646
+ >>> plt.xlabel('frequency [Hz]')
647
+ >>> plt.ylabel('PSD [V**2/Hz]')
648
+ >>> plt.legend()
649
+ >>> plt.show()
650
+
651
+ """
652
+ freqs, Pxx = csd(x, x, fs=fs, window=window, nperseg=nperseg,
653
+ noverlap=noverlap, nfft=nfft, detrend=detrend,
654
+ return_onesided=return_onesided, scaling=scaling,
655
+ axis=axis, average=average)
656
+
657
+ return freqs, Pxx.real
658
+
659
+
660
+ def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
661
+ detrend='constant', return_onesided=True, scaling='density',
662
+ axis=-1, average='mean'):
663
+ r"""
664
+ Estimate the cross power spectral density, Pxy, using Welch's method.
665
+
666
+ Parameters
667
+ ----------
668
+ x : array_like
669
+ Time series of measurement values
670
+ y : array_like
671
+ Time series of measurement values
672
+ fs : float, optional
673
+ Sampling frequency of the `x` and `y` time series. Defaults
674
+ to 1.0.
675
+ window : str or tuple or array_like, optional
676
+ Desired window to use. If `window` is a string or tuple, it is
677
+ passed to `get_window` to generate the window values, which are
678
+ DFT-even by default. See `get_window` for a list of windows and
679
+ required parameters. If `window` is array_like it will be used
680
+ directly as the window and its length must be nperseg. Defaults
681
+ to a Hann window.
682
+ nperseg : int, optional
683
+ Length of each segment. Defaults to None, but if window is str or
684
+ tuple, is set to 256, and if window is array_like, is set to the
685
+ length of the window.
686
+ noverlap: int, optional
687
+ Number of points to overlap between segments. If `None`,
688
+ ``noverlap = nperseg // 2``. Defaults to `None`.
689
+ nfft : int, optional
690
+ Length of the FFT used, if a zero padded FFT is desired. If
691
+ `None`, the FFT length is `nperseg`. Defaults to `None`.
692
+ detrend : str or function or `False`, optional
693
+ Specifies how to detrend each segment. If `detrend` is a
694
+ string, it is passed as the `type` argument to the `detrend`
695
+ function. If it is a function, it takes a segment and returns a
696
+ detrended segment. If `detrend` is `False`, no detrending is
697
+ done. Defaults to 'constant'.
698
+ return_onesided : bool, optional
699
+ If `True`, return a one-sided spectrum for real data. If
700
+ `False` return a two-sided spectrum. Defaults to `True`, but for
701
+ complex data, a two-sided spectrum is always returned.
702
+ scaling : { 'density', 'spectrum' }, optional
703
+ Selects between computing the cross spectral density ('density')
704
+ where `Pxy` has units of V**2/Hz and computing the cross spectrum
705
+ ('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
706
+ measured in V and `fs` is measured in Hz. Defaults to 'density'
707
+ axis : int, optional
708
+ Axis along which the CSD is computed for both inputs; the
709
+ default is over the last axis (i.e. ``axis=-1``).
710
+ average : { 'mean', 'median' }, optional
711
+ Method to use when averaging periodograms. If the spectrum is
712
+ complex, the average is computed separately for the real and
713
+ imaginary parts. Defaults to 'mean'.
714
+
715
+ .. versionadded:: 1.2.0
716
+
717
+ Returns
718
+ -------
719
+ f : ndarray
720
+ Array of sample frequencies.
721
+ Pxy : ndarray
722
+ Cross spectral density or cross power spectrum of x,y.
723
+
724
+ See Also
725
+ --------
726
+ periodogram: Simple, optionally modified periodogram
727
+ lombscargle: Lomb-Scargle periodogram for unevenly sampled data
728
+ welch: Power spectral density by Welch's method. [Equivalent to
729
+ csd(x,x)]
730
+ coherence: Magnitude squared coherence by Welch's method.
731
+
732
+ Notes
733
+ -----
734
+ By convention, Pxy is computed with the conjugate FFT of X
735
+ multiplied by the FFT of Y.
736
+
737
+ If the input series differ in length, the shorter series will be
738
+ zero-padded to match.
739
+
740
+ An appropriate amount of overlap will depend on the choice of window
741
+ and on your requirements. For the default Hann window an overlap of
742
+ 50% is a reasonable trade off between accurately estimating the
743
+ signal power, while not over counting any of the data. Narrower
744
+ windows may require a larger overlap.
745
+
746
+ Consult the :ref:`tutorial_SpectralAnalysis` section of the :ref:`user_guide`
747
+ for a discussion of the scalings of a spectral density and an (amplitude) spectrum.
748
+
749
+ .. versionadded:: 0.16.0
750
+
751
+ References
752
+ ----------
753
+ .. [1] P. Welch, "The use of the fast Fourier transform for the
754
+ estimation of power spectra: A method based on time averaging
755
+ over short, modified periodograms", IEEE Trans. Audio
756
+ Electroacoust. vol. 15, pp. 70-73, 1967.
757
+ .. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
758
+ Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
759
+
760
+ Examples
761
+ --------
762
+ >>> import numpy as np
763
+ >>> from scipy import signal
764
+ >>> import matplotlib.pyplot as plt
765
+ >>> rng = np.random.default_rng()
766
+
767
+ Generate two test signals with some common features.
768
+
769
+ >>> fs = 10e3
770
+ >>> N = 1e5
771
+ >>> amp = 20
772
+ >>> freq = 1234.0
773
+ >>> noise_power = 0.001 * fs / 2
774
+ >>> time = np.arange(N) / fs
775
+ >>> b, a = signal.butter(2, 0.25, 'low')
776
+ >>> x = rng.normal(scale=np.sqrt(noise_power), size=time.shape)
777
+ >>> y = signal.lfilter(b, a, x)
778
+ >>> x += amp*np.sin(2*np.pi*freq*time)
779
+ >>> y += rng.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
780
+
781
+ Compute and plot the magnitude of the cross spectral density.
782
+
783
+ >>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
784
+ >>> plt.semilogy(f, np.abs(Pxy))
785
+ >>> plt.xlabel('frequency [Hz]')
786
+ >>> plt.ylabel('CSD [V**2/Hz]')
787
+ >>> plt.show()
788
+
789
+ """
790
+ freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap,
791
+ nfft, detrend, return_onesided, scaling,
792
+ axis, mode='psd')
793
+
794
+ # Average over windows.
795
+ if len(Pxy.shape) >= 2 and Pxy.size > 0:
796
+ if Pxy.shape[-1] > 1:
797
+ if average == 'median':
798
+ # np.median must be passed real arrays for the desired result
799
+ bias = _median_bias(Pxy.shape[-1])
800
+ if np.iscomplexobj(Pxy):
801
+ Pxy = (np.median(np.real(Pxy), axis=-1)
802
+ + 1j * np.median(np.imag(Pxy), axis=-1))
803
+ else:
804
+ Pxy = np.median(Pxy, axis=-1)
805
+ Pxy /= bias
806
+ elif average == 'mean':
807
+ Pxy = Pxy.mean(axis=-1)
808
+ else:
809
+ raise ValueError(f'average must be "median" or "mean", got {average}')
810
+ else:
811
+ Pxy = np.reshape(Pxy, Pxy.shape[:-1])
812
+
813
+ return freqs, Pxy
814
+
815
+
816
+ def spectrogram(x, fs=1.0, window=('tukey', .25), nperseg=None, noverlap=None,
817
+ nfft=None, detrend='constant', return_onesided=True,
818
+ scaling='density', axis=-1, mode='psd'):
819
+ """Compute a spectrogram with consecutive Fourier transforms (legacy function).
820
+
821
+ Spectrograms can be used as a way of visualizing the change of a
822
+ nonstationary signal's frequency content over time.
823
+
824
+ .. legacy:: function
825
+
826
+ :class:`ShortTimeFFT` is a newer STFT / ISTFT implementation with more
827
+ features also including a :meth:`~ShortTimeFFT.spectrogram` method.
828
+ A :ref:`comparison <tutorial_stft_legacy_stft>` between the
829
+ implementations can be found in the :ref:`tutorial_stft` section of
830
+ the :ref:`user_guide`.
831
+
832
+ Parameters
833
+ ----------
834
+ x : array_like
835
+ Time series of measurement values
836
+ fs : float, optional
837
+ Sampling frequency of the `x` time series. Defaults to 1.0.
838
+ window : str or tuple or array_like, optional
839
+ Desired window to use. If `window` is a string or tuple, it is
840
+ passed to `get_window` to generate the window values, which are
841
+ DFT-even by default. See `get_window` for a list of windows and
842
+ required parameters. If `window` is array_like it will be used
843
+ directly as the window and its length must be nperseg.
844
+ Defaults to a Tukey window with shape parameter of 0.25.
845
+ nperseg : int, optional
846
+ Length of each segment. Defaults to None, but if window is str or
847
+ tuple, is set to 256, and if window is array_like, is set to the
848
+ length of the window.
849
+ noverlap : int, optional
850
+ Number of points to overlap between segments. If `None`,
851
+ ``noverlap = nperseg // 8``. Defaults to `None`.
852
+ nfft : int, optional
853
+ Length of the FFT used, if a zero padded FFT is desired. If
854
+ `None`, the FFT length is `nperseg`. Defaults to `None`.
855
+ detrend : str or function or `False`, optional
856
+ Specifies how to detrend each segment. If `detrend` is a
857
+ string, it is passed as the `type` argument to the `detrend`
858
+ function. If it is a function, it takes a segment and returns a
859
+ detrended segment. If `detrend` is `False`, no detrending is
860
+ done. Defaults to 'constant'.
861
+ return_onesided : bool, optional
862
+ If `True`, return a one-sided spectrum for real data. If
863
+ `False` return a two-sided spectrum. Defaults to `True`, but for
864
+ complex data, a two-sided spectrum is always returned.
865
+ scaling : { 'density', 'spectrum' }, optional
866
+ Selects between computing the power spectral density ('density')
867
+ where `Sxx` has units of V**2/Hz and computing the power
868
+ spectrum ('spectrum') where `Sxx` has units of V**2, if `x`
869
+ is measured in V and `fs` is measured in Hz. Defaults to
870
+ 'density'.
871
+ axis : int, optional
872
+ Axis along which the spectrogram is computed; the default is over
873
+ the last axis (i.e. ``axis=-1``).
874
+ mode : str, optional
875
+ Defines what kind of return values are expected. Options are
876
+ ['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is
877
+ equivalent to the output of `stft` with no padding or boundary
878
+ extension. 'magnitude' returns the absolute magnitude of the
879
+ STFT. 'angle' and 'phase' return the complex angle of the STFT,
880
+ with and without unwrapping, respectively.
881
+
882
+ Returns
883
+ -------
884
+ f : ndarray
885
+ Array of sample frequencies.
886
+ t : ndarray
887
+ Array of segment times.
888
+ Sxx : ndarray
889
+ Spectrogram of x. By default, the last axis of Sxx corresponds
890
+ to the segment times.
891
+
892
+ See Also
893
+ --------
894
+ periodogram: Simple, optionally modified periodogram
895
+ lombscargle: Lomb-Scargle periodogram for unevenly sampled data
896
+ welch: Power spectral density by Welch's method.
897
+ csd: Cross spectral density by Welch's method.
898
+ ShortTimeFFT: Newer STFT/ISTFT implementation providing more features,
899
+ which also includes a :meth:`~ShortTimeFFT.spectrogram`
900
+ method.
901
+
902
+ Notes
903
+ -----
904
+ An appropriate amount of overlap will depend on the choice of window
905
+ and on your requirements. In contrast to welch's method, where the
906
+ entire data stream is averaged over, one may wish to use a smaller
907
+ overlap (or perhaps none at all) when computing a spectrogram, to
908
+ maintain some statistical independence between individual segments.
909
+ It is for this reason that the default window is a Tukey window with
910
+ 1/8th of a window's length overlap at each end.
911
+
912
+
913
+ .. versionadded:: 0.16.0
914
+
915
+ References
916
+ ----------
917
+ .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
918
+ "Discrete-Time Signal Processing", Prentice Hall, 1999.
919
+
920
+ Examples
921
+ --------
922
+ >>> import numpy as np
923
+ >>> from scipy import signal
924
+ >>> from scipy.fft import fftshift
925
+ >>> import matplotlib.pyplot as plt
926
+ >>> rng = np.random.default_rng()
927
+
928
+ Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
929
+ modulated around 3kHz, corrupted by white noise of exponentially
930
+ decreasing magnitude sampled at 10 kHz.
931
+
932
+ >>> fs = 10e3
933
+ >>> N = 1e5
934
+ >>> amp = 2 * np.sqrt(2)
935
+ >>> noise_power = 0.01 * fs / 2
936
+ >>> time = np.arange(N) / float(fs)
937
+ >>> mod = 500*np.cos(2*np.pi*0.25*time)
938
+ >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
939
+ >>> noise = rng.normal(scale=np.sqrt(noise_power), size=time.shape)
940
+ >>> noise *= np.exp(-time/5)
941
+ >>> x = carrier + noise
942
+
943
+ Compute and plot the spectrogram.
944
+
945
+ >>> f, t, Sxx = signal.spectrogram(x, fs)
946
+ >>> plt.pcolormesh(t, f, Sxx, shading='gouraud')
947
+ >>> plt.ylabel('Frequency [Hz]')
948
+ >>> plt.xlabel('Time [sec]')
949
+ >>> plt.show()
950
+
951
+ Note, if using output that is not one sided, then use the following:
952
+
953
+ >>> f, t, Sxx = signal.spectrogram(x, fs, return_onesided=False)
954
+ >>> plt.pcolormesh(t, fftshift(f), fftshift(Sxx, axes=0), shading='gouraud')
955
+ >>> plt.ylabel('Frequency [Hz]')
956
+ >>> plt.xlabel('Time [sec]')
957
+ >>> plt.show()
958
+
959
+ """
960
+ modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase']
961
+ if mode not in modelist:
962
+ raise ValueError(f'unknown value for mode {mode}, must be one of {modelist}')
963
+
964
+ # need to set default for nperseg before setting default for noverlap below
965
+ window, nperseg = _triage_segments(window, nperseg,
966
+ input_length=x.shape[axis])
967
+
968
+ # Less overlap than welch, so samples are more statistically independent
969
+ if noverlap is None:
970
+ noverlap = nperseg // 8
971
+
972
+ if mode == 'psd':
973
+ freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
974
+ noverlap, nfft, detrend,
975
+ return_onesided, scaling, axis,
976
+ mode='psd')
977
+
978
+ else:
979
+ freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
980
+ noverlap, nfft, detrend,
981
+ return_onesided, scaling, axis,
982
+ mode='stft')
983
+
984
+ if mode == 'magnitude':
985
+ Sxx = np.abs(Sxx)
986
+ elif mode in ['angle', 'phase']:
987
+ Sxx = np.angle(Sxx)
988
+ if mode == 'phase':
989
+ # Sxx has one additional dimension for time strides
990
+ if axis < 0:
991
+ axis -= 1
992
+ Sxx = np.unwrap(Sxx, axis=axis)
993
+
994
+ # mode =='complex' is same as `stft`, doesn't need modification
995
+
996
+ return freqs, time, Sxx
997
+
998
+
999
+ def check_COLA(window, nperseg, noverlap, tol=1e-10):
1000
+ r"""Check whether the Constant OverLap Add (COLA) constraint is met.
1001
+
1002
+ Parameters
1003
+ ----------
1004
+ window : str or tuple or array_like
1005
+ Desired window to use. If `window` is a string or tuple, it is
1006
+ passed to `get_window` to generate the window values, which are
1007
+ DFT-even by default. See `get_window` for a list of windows and
1008
+ required parameters. If `window` is array_like it will be used
1009
+ directly as the window and its length must be nperseg.
1010
+ nperseg : int
1011
+ Length of each segment.
1012
+ noverlap : int
1013
+ Number of points to overlap between segments.
1014
+ tol : float, optional
1015
+ The allowed variance of a bin's weighted sum from the median bin
1016
+ sum.
1017
+
1018
+ Returns
1019
+ -------
1020
+ verdict : bool
1021
+ `True` if chosen combination satisfies COLA within `tol`,
1022
+ `False` otherwise
1023
+
1024
+ See Also
1025
+ --------
1026
+ check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
1027
+ stft: Short Time Fourier Transform
1028
+ istft: Inverse Short Time Fourier Transform
1029
+
1030
+ Notes
1031
+ -----
1032
+ In order to enable inversion of an STFT via the inverse STFT in
1033
+ `istft`, it is sufficient that the signal windowing obeys the constraint of
1034
+ "Constant OverLap Add" (COLA). This ensures that every point in the input
1035
+ data is equally weighted, thereby avoiding aliasing and allowing full
1036
+ reconstruction.
1037
+
1038
+ Some examples of windows that satisfy COLA:
1039
+ - Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ...
1040
+ - Bartlett window at overlap of 1/2, 3/4, 5/6, ...
1041
+ - Hann window at 1/2, 2/3, 3/4, ...
1042
+ - Any Blackman family window at 2/3 overlap
1043
+ - Any window with ``noverlap = nperseg-1``
1044
+
1045
+ A very comprehensive list of other windows may be found in [2]_,
1046
+ wherein the COLA condition is satisfied when the "Amplitude
1047
+ Flatness" is unity.
1048
+
1049
+ .. versionadded:: 0.19.0
1050
+
1051
+ References
1052
+ ----------
1053
+ .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
1054
+ Publishing, 2011,ISBN 978-0-9745607-3-1.
1055
+ .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
1056
+ spectral density estimation by the Discrete Fourier transform
1057
+ (DFT), including a comprehensive list of window functions and
1058
+ some new at-top windows", 2002,
1059
+ http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
1060
+
1061
+ Examples
1062
+ --------
1063
+ >>> from scipy import signal
1064
+
1065
+ Confirm COLA condition for rectangular window of 75% (3/4) overlap:
1066
+
1067
+ >>> signal.check_COLA(signal.windows.boxcar(100), 100, 75)
1068
+ True
1069
+
1070
+ COLA is not true for 25% (1/4) overlap, though:
1071
+
1072
+ >>> signal.check_COLA(signal.windows.boxcar(100), 100, 25)
1073
+ False
1074
+
1075
+ "Symmetrical" Hann window (for filter design) is not COLA:
1076
+
1077
+ >>> signal.check_COLA(signal.windows.hann(120, sym=True), 120, 60)
1078
+ False
1079
+
1080
+ "Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for
1081
+ overlap of 1/2, 2/3, 3/4, etc.:
1082
+
1083
+ >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 60)
1084
+ True
1085
+
1086
+ >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 80)
1087
+ True
1088
+
1089
+ >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 90)
1090
+ True
1091
+
1092
+ """
1093
+ nperseg = int(nperseg)
1094
+
1095
+ if nperseg < 1:
1096
+ raise ValueError('nperseg must be a positive integer')
1097
+
1098
+ if noverlap >= nperseg:
1099
+ raise ValueError('noverlap must be less than nperseg.')
1100
+ noverlap = int(noverlap)
1101
+
1102
+ if isinstance(window, str) or type(window) is tuple:
1103
+ win = get_window(window, nperseg)
1104
+ else:
1105
+ win = np.asarray(window)
1106
+ if len(win.shape) != 1:
1107
+ raise ValueError('window must be 1-D')
1108
+ if win.shape[0] != nperseg:
1109
+ raise ValueError('window must have length of nperseg')
1110
+
1111
+ step = nperseg - noverlap
1112
+ binsums = sum(win[ii*step:(ii+1)*step] for ii in range(nperseg//step))
1113
+
1114
+ if nperseg % step != 0:
1115
+ binsums[:nperseg % step] += win[-(nperseg % step):]
1116
+
1117
+ deviation = binsums - np.median(binsums)
1118
+ return np.max(np.abs(deviation)) < tol
1119
+
1120
+
1121
+ def check_NOLA(window, nperseg, noverlap, tol=1e-10):
1122
+ r"""Check whether the Nonzero Overlap Add (NOLA) constraint is met.
1123
+
1124
+ Parameters
1125
+ ----------
1126
+ window : str or tuple or array_like
1127
+ Desired window to use. If `window` is a string or tuple, it is
1128
+ passed to `get_window` to generate the window values, which are
1129
+ DFT-even by default. See `get_window` for a list of windows and
1130
+ required parameters. If `window` is array_like it will be used
1131
+ directly as the window and its length must be nperseg.
1132
+ nperseg : int
1133
+ Length of each segment.
1134
+ noverlap : int
1135
+ Number of points to overlap between segments.
1136
+ tol : float, optional
1137
+ The allowed variance of a bin's weighted sum from the median bin
1138
+ sum.
1139
+
1140
+ Returns
1141
+ -------
1142
+ verdict : bool
1143
+ `True` if chosen combination satisfies the NOLA constraint within
1144
+ `tol`, `False` otherwise
1145
+
1146
+ See Also
1147
+ --------
1148
+ check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met
1149
+ stft: Short Time Fourier Transform
1150
+ istft: Inverse Short Time Fourier Transform
1151
+
1152
+ Notes
1153
+ -----
1154
+ In order to enable inversion of an STFT via the inverse STFT in
1155
+ `istft`, the signal windowing must obey the constraint of "nonzero
1156
+ overlap add" (NOLA):
1157
+
1158
+ .. math:: \sum_{t}w^{2}[n-tH] \ne 0
1159
+
1160
+ for all :math:`n`, where :math:`w` is the window function, :math:`t` is the
1161
+ frame index, and :math:`H` is the hop size (:math:`H` = `nperseg` -
1162
+ `noverlap`).
1163
+
1164
+ This ensures that the normalization factors in the denominator of the
1165
+ overlap-add inversion equation are not zero. Only very pathological windows
1166
+ will fail the NOLA constraint.
1167
+
1168
+ .. versionadded:: 1.2.0
1169
+
1170
+ References
1171
+ ----------
1172
+ .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
1173
+ Publishing, 2011,ISBN 978-0-9745607-3-1.
1174
+ .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
1175
+ spectral density estimation by the Discrete Fourier transform
1176
+ (DFT), including a comprehensive list of window functions and
1177
+ some new at-top windows", 2002,
1178
+ http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
1179
+
1180
+ Examples
1181
+ --------
1182
+ >>> import numpy as np
1183
+ >>> from scipy import signal
1184
+
1185
+ Confirm NOLA condition for rectangular window of 75% (3/4) overlap:
1186
+
1187
+ >>> signal.check_NOLA(signal.windows.boxcar(100), 100, 75)
1188
+ True
1189
+
1190
+ NOLA is also true for 25% (1/4) overlap:
1191
+
1192
+ >>> signal.check_NOLA(signal.windows.boxcar(100), 100, 25)
1193
+ True
1194
+
1195
+ "Symmetrical" Hann window (for filter design) is also NOLA:
1196
+
1197
+ >>> signal.check_NOLA(signal.windows.hann(120, sym=True), 120, 60)
1198
+ True
1199
+
1200
+ As long as there is overlap, it takes quite a pathological window to fail
1201
+ NOLA:
1202
+
1203
+ >>> w = np.ones(64, dtype="float")
1204
+ >>> w[::2] = 0
1205
+ >>> signal.check_NOLA(w, 64, 32)
1206
+ False
1207
+
1208
+ If there is not enough overlap, a window with zeros at the ends will not
1209
+ work:
1210
+
1211
+ >>> signal.check_NOLA(signal.windows.hann(64), 64, 0)
1212
+ False
1213
+ >>> signal.check_NOLA(signal.windows.hann(64), 64, 1)
1214
+ False
1215
+ >>> signal.check_NOLA(signal.windows.hann(64), 64, 2)
1216
+ True
1217
+
1218
+ """
1219
+ nperseg = int(nperseg)
1220
+
1221
+ if nperseg < 1:
1222
+ raise ValueError('nperseg must be a positive integer')
1223
+
1224
+ if noverlap >= nperseg:
1225
+ raise ValueError('noverlap must be less than nperseg')
1226
+ if noverlap < 0:
1227
+ raise ValueError('noverlap must be a nonnegative integer')
1228
+ noverlap = int(noverlap)
1229
+
1230
+ if isinstance(window, str) or type(window) is tuple:
1231
+ win = get_window(window, nperseg)
1232
+ else:
1233
+ win = np.asarray(window)
1234
+ if len(win.shape) != 1:
1235
+ raise ValueError('window must be 1-D')
1236
+ if win.shape[0] != nperseg:
1237
+ raise ValueError('window must have length of nperseg')
1238
+
1239
+ step = nperseg - noverlap
1240
+ binsums = sum(win[ii*step:(ii+1)*step]**2 for ii in range(nperseg//step))
1241
+
1242
+ if nperseg % step != 0:
1243
+ binsums[:nperseg % step] += win[-(nperseg % step):]**2
1244
+
1245
+ return np.min(binsums) > tol
1246
+
1247
+
1248
+ def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
1249
+ detrend=False, return_onesided=True, boundary='zeros', padded=True,
1250
+ axis=-1, scaling='spectrum'):
1251
+ r"""Compute the Short Time Fourier Transform (legacy function).
1252
+
1253
+ STFTs can be used as a way of quantifying the change of a
1254
+ nonstationary signal's frequency and phase content over time.
1255
+
1256
+ .. legacy:: function
1257
+
1258
+ `ShortTimeFFT` is a newer STFT / ISTFT implementation with more
1259
+ features. A :ref:`comparison <tutorial_stft_legacy_stft>` between the
1260
+ implementations can be found in the :ref:`tutorial_stft` section of the
1261
+ :ref:`user_guide`.
1262
+
1263
+ Parameters
1264
+ ----------
1265
+ x : array_like
1266
+ Time series of measurement values
1267
+ fs : float, optional
1268
+ Sampling frequency of the `x` time series. Defaults to 1.0.
1269
+ window : str or tuple or array_like, optional
1270
+ Desired window to use. If `window` is a string or tuple, it is
1271
+ passed to `get_window` to generate the window values, which are
1272
+ DFT-even by default. See `get_window` for a list of windows and
1273
+ required parameters. If `window` is array_like it will be used
1274
+ directly as the window and its length must be nperseg. Defaults
1275
+ to a Hann window.
1276
+ nperseg : int, optional
1277
+ Length of each segment. Defaults to 256.
1278
+ noverlap : int, optional
1279
+ Number of points to overlap between segments. If `None`,
1280
+ ``noverlap = nperseg // 2``. Defaults to `None`. When
1281
+ specified, the COLA constraint must be met (see Notes below).
1282
+ nfft : int, optional
1283
+ Length of the FFT used, if a zero padded FFT is desired. If
1284
+ `None`, the FFT length is `nperseg`. Defaults to `None`.
1285
+ detrend : str or function or `False`, optional
1286
+ Specifies how to detrend each segment. If `detrend` is a
1287
+ string, it is passed as the `type` argument to the `detrend`
1288
+ function. If it is a function, it takes a segment and returns a
1289
+ detrended segment. If `detrend` is `False`, no detrending is
1290
+ done. Defaults to `False`.
1291
+ return_onesided : bool, optional
1292
+ If `True`, return a one-sided spectrum for real data. If
1293
+ `False` return a two-sided spectrum. Defaults to `True`, but for
1294
+ complex data, a two-sided spectrum is always returned.
1295
+ boundary : str or None, optional
1296
+ Specifies whether the input signal is extended at both ends, and
1297
+ how to generate the new values, in order to center the first
1298
+ windowed segment on the first input point. This has the benefit
1299
+ of enabling reconstruction of the first input point when the
1300
+ employed window function starts at zero. Valid options are
1301
+ ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
1302
+ 'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is
1303
+ extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``.
1304
+ padded : bool, optional
1305
+ Specifies whether the input signal is zero-padded at the end to
1306
+ make the signal fit exactly into an integer number of window
1307
+ segments, so that all of the signal is included in the output.
1308
+ Defaults to `True`. Padding occurs after boundary extension, if
1309
+ `boundary` is not `None`, and `padded` is `True`, as is the
1310
+ default.
1311
+ axis : int, optional
1312
+ Axis along which the STFT is computed; the default is over the
1313
+ last axis (i.e. ``axis=-1``).
1314
+ scaling: {'spectrum', 'psd'}
1315
+ The default 'spectrum' scaling allows each frequency line of `Zxx` to
1316
+ be interpreted as a magnitude spectrum. The 'psd' option scales each
1317
+ line to a power spectral density - it allows to calculate the signal's
1318
+ energy by numerically integrating over ``abs(Zxx)**2``.
1319
+
1320
+ .. versionadded:: 1.9.0
1321
+
1322
+ Returns
1323
+ -------
1324
+ f : ndarray
1325
+ Array of sample frequencies.
1326
+ t : ndarray
1327
+ Array of segment times.
1328
+ Zxx : ndarray
1329
+ STFT of `x`. By default, the last axis of `Zxx` corresponds
1330
+ to the segment times.
1331
+
1332
+ See Also
1333
+ --------
1334
+ istft: Inverse Short Time Fourier Transform
1335
+ ShortTimeFFT: Newer STFT/ISTFT implementation providing more features.
1336
+ check_COLA: Check whether the Constant OverLap Add (COLA) constraint
1337
+ is met
1338
+ check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
1339
+ welch: Power spectral density by Welch's method.
1340
+ spectrogram: Spectrogram by Welch's method.
1341
+ csd: Cross spectral density by Welch's method.
1342
+ lombscargle: Lomb-Scargle periodogram for unevenly sampled data
1343
+
1344
+ Notes
1345
+ -----
1346
+ In order to enable inversion of an STFT via the inverse STFT in
1347
+ `istft`, the signal windowing must obey the constraint of "Nonzero
1348
+ OverLap Add" (NOLA), and the input signal must have complete
1349
+ windowing coverage (i.e. ``(x.shape[axis] - nperseg) %
1350
+ (nperseg-noverlap) == 0``). The `padded` argument may be used to
1351
+ accomplish this.
1352
+
1353
+ Given a time-domain signal :math:`x[n]`, a window :math:`w[n]`, and a hop
1354
+ size :math:`H` = `nperseg - noverlap`, the windowed frame at time index
1355
+ :math:`t` is given by
1356
+
1357
+ .. math:: x_{t}[n]=x[n]w[n-tH]
1358
+
1359
+ The overlap-add (OLA) reconstruction equation is given by
1360
+
1361
+ .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]}
1362
+
1363
+ The NOLA constraint ensures that every normalization term that appears
1364
+ in the denominator of the OLA reconstruction equation is nonzero. Whether a
1365
+ choice of `window`, `nperseg`, and `noverlap` satisfy this constraint can
1366
+ be tested with `check_NOLA`.
1367
+
1368
+
1369
+ .. versionadded:: 0.19.0
1370
+
1371
+ References
1372
+ ----------
1373
+ .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
1374
+ "Discrete-Time Signal Processing", Prentice Hall, 1999.
1375
+ .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from
1376
+ Modified Short-Time Fourier Transform", IEEE 1984,
1377
+ 10.1109/TASSP.1984.1164317
1378
+
1379
+ Examples
1380
+ --------
1381
+ >>> import numpy as np
1382
+ >>> from scipy import signal
1383
+ >>> import matplotlib.pyplot as plt
1384
+ >>> rng = np.random.default_rng()
1385
+
1386
+ Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
1387
+ modulated around 3kHz, corrupted by white noise of exponentially
1388
+ decreasing magnitude sampled at 10 kHz.
1389
+
1390
+ >>> fs = 10e3
1391
+ >>> N = 1e5
1392
+ >>> amp = 2 * np.sqrt(2)
1393
+ >>> noise_power = 0.01 * fs / 2
1394
+ >>> time = np.arange(N) / float(fs)
1395
+ >>> mod = 500*np.cos(2*np.pi*0.25*time)
1396
+ >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
1397
+ >>> noise = rng.normal(scale=np.sqrt(noise_power),
1398
+ ... size=time.shape)
1399
+ >>> noise *= np.exp(-time/5)
1400
+ >>> x = carrier + noise
1401
+
1402
+ Compute and plot the STFT's magnitude.
1403
+
1404
+ >>> f, t, Zxx = signal.stft(x, fs, nperseg=1000)
1405
+ >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp, shading='gouraud')
1406
+ >>> plt.title('STFT Magnitude')
1407
+ >>> plt.ylabel('Frequency [Hz]')
1408
+ >>> plt.xlabel('Time [sec]')
1409
+ >>> plt.show()
1410
+
1411
+ Compare the energy of the signal `x` with the energy of its STFT:
1412
+
1413
+ >>> E_x = sum(x**2) / fs # Energy of x
1414
+ >>> # Calculate a two-sided STFT with PSD scaling:
1415
+ >>> f, t, Zxx = signal.stft(x, fs, nperseg=1000, return_onesided=False,
1416
+ ... scaling='psd')
1417
+ >>> # Integrate numerically over abs(Zxx)**2:
1418
+ >>> df, dt = f[1] - f[0], t[1] - t[0]
1419
+ >>> E_Zxx = sum(np.sum(Zxx.real**2 + Zxx.imag**2, axis=0) * df) * dt
1420
+ >>> # The energy is the same, but the numerical errors are quite large:
1421
+ >>> np.isclose(E_x, E_Zxx, rtol=1e-2)
1422
+ True
1423
+
1424
+ """
1425
+ if scaling == 'psd':
1426
+ scaling = 'density'
1427
+ elif scaling != 'spectrum':
1428
+ raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!")
1429
+
1430
+ freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap,
1431
+ nfft, detrend, return_onesided,
1432
+ scaling=scaling, axis=axis,
1433
+ mode='stft', boundary=boundary,
1434
+ padded=padded)
1435
+
1436
+ return freqs, time, Zxx
1437
+
1438
+
1439
+ def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
1440
+ input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2,
1441
+ scaling='spectrum'):
1442
+ r"""Perform the inverse Short Time Fourier transform (legacy function).
1443
+
1444
+ .. legacy:: function
1445
+
1446
+ `ShortTimeFFT` is a newer STFT / ISTFT implementation with more
1447
+ features. A :ref:`comparison <tutorial_stft_legacy_stft>` between the
1448
+ implementations can be found in the :ref:`tutorial_stft` section of the
1449
+ :ref:`user_guide`.
1450
+
1451
+ Parameters
1452
+ ----------
1453
+ Zxx : array_like
1454
+ STFT of the signal to be reconstructed. If a purely real array
1455
+ is passed, it will be cast to a complex data type.
1456
+ fs : float, optional
1457
+ Sampling frequency of the time series. Defaults to 1.0.
1458
+ window : str or tuple or array_like, optional
1459
+ Desired window to use. If `window` is a string or tuple, it is
1460
+ passed to `get_window` to generate the window values, which are
1461
+ DFT-even by default. See `get_window` for a list of windows and
1462
+ required parameters. If `window` is array_like it will be used
1463
+ directly as the window and its length must be nperseg. Defaults
1464
+ to a Hann window. Must match the window used to generate the
1465
+ STFT for faithful inversion.
1466
+ nperseg : int, optional
1467
+ Number of data points corresponding to each STFT segment. This
1468
+ parameter must be specified if the number of data points per
1469
+ segment is odd, or if the STFT was padded via ``nfft >
1470
+ nperseg``. If `None`, the value depends on the shape of
1471
+ `Zxx` and `input_onesided`. If `input_onesided` is `True`,
1472
+ ``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise,
1473
+ ``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`.
1474
+ noverlap : int, optional
1475
+ Number of points to overlap between segments. If `None`, half
1476
+ of the segment length. Defaults to `None`. When specified, the
1477
+ COLA constraint must be met (see Notes below), and should match
1478
+ the parameter used to generate the STFT. Defaults to `None`.
1479
+ nfft : int, optional
1480
+ Number of FFT points corresponding to each STFT segment. This
1481
+ parameter must be specified if the STFT was padded via ``nfft >
1482
+ nperseg``. If `None`, the default values are the same as for
1483
+ `nperseg`, detailed above, with one exception: if
1484
+ `input_onesided` is True and
1485
+ ``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on
1486
+ that value. This case allows the proper inversion of an
1487
+ odd-length unpadded STFT using ``nfft=None``. Defaults to
1488
+ `None`.
1489
+ input_onesided : bool, optional
1490
+ If `True`, interpret the input array as one-sided FFTs, such
1491
+ as is returned by `stft` with ``return_onesided=True`` and
1492
+ `numpy.fft.rfft`. If `False`, interpret the input as a a
1493
+ two-sided FFT. Defaults to `True`.
1494
+ boundary : bool, optional
1495
+ Specifies whether the input signal was extended at its
1496
+ boundaries by supplying a non-`None` ``boundary`` argument to
1497
+ `stft`. Defaults to `True`.
1498
+ time_axis : int, optional
1499
+ Where the time segments of the STFT is located; the default is
1500
+ the last axis (i.e. ``axis=-1``).
1501
+ freq_axis : int, optional
1502
+ Where the frequency axis of the STFT is located; the default is
1503
+ the penultimate axis (i.e. ``axis=-2``).
1504
+ scaling: {'spectrum', 'psd'}
1505
+ The default 'spectrum' scaling allows each frequency line of `Zxx` to
1506
+ be interpreted as a magnitude spectrum. The 'psd' option scales each
1507
+ line to a power spectral density - it allows to calculate the signal's
1508
+ energy by numerically integrating over ``abs(Zxx)**2``.
1509
+
1510
+ Returns
1511
+ -------
1512
+ t : ndarray
1513
+ Array of output data times.
1514
+ x : ndarray
1515
+ iSTFT of `Zxx`.
1516
+
1517
+ See Also
1518
+ --------
1519
+ stft: Short Time Fourier Transform
1520
+ ShortTimeFFT: Newer STFT/ISTFT implementation providing more features.
1521
+ check_COLA: Check whether the Constant OverLap Add (COLA) constraint
1522
+ is met
1523
+ check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
1524
+
1525
+ Notes
1526
+ -----
1527
+ In order to enable inversion of an STFT via the inverse STFT with
1528
+ `istft`, the signal windowing must obey the constraint of "nonzero
1529
+ overlap add" (NOLA):
1530
+
1531
+ .. math:: \sum_{t}w^{2}[n-tH] \ne 0
1532
+
1533
+ This ensures that the normalization factors that appear in the denominator
1534
+ of the overlap-add reconstruction equation
1535
+
1536
+ .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]}
1537
+
1538
+ are not zero. The NOLA constraint can be checked with the `check_NOLA`
1539
+ function.
1540
+
1541
+ An STFT which has been modified (via masking or otherwise) is not
1542
+ guaranteed to correspond to a exactly realizible signal. This
1543
+ function implements the iSTFT via the least-squares estimation
1544
+ algorithm detailed in [2]_, which produces a signal that minimizes
1545
+ the mean squared error between the STFT of the returned signal and
1546
+ the modified STFT.
1547
+
1548
+
1549
+ .. versionadded:: 0.19.0
1550
+
1551
+ References
1552
+ ----------
1553
+ .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
1554
+ "Discrete-Time Signal Processing", Prentice Hall, 1999.
1555
+ .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from
1556
+ Modified Short-Time Fourier Transform", IEEE 1984,
1557
+ 10.1109/TASSP.1984.1164317
1558
+
1559
+ Examples
1560
+ --------
1561
+ >>> import numpy as np
1562
+ >>> from scipy import signal
1563
+ >>> import matplotlib.pyplot as plt
1564
+ >>> rng = np.random.default_rng()
1565
+
1566
+ Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by
1567
+ 0.001 V**2/Hz of white noise sampled at 1024 Hz.
1568
+
1569
+ >>> fs = 1024
1570
+ >>> N = 10*fs
1571
+ >>> nperseg = 512
1572
+ >>> amp = 2 * np.sqrt(2)
1573
+ >>> noise_power = 0.001 * fs / 2
1574
+ >>> time = np.arange(N) / float(fs)
1575
+ >>> carrier = amp * np.sin(2*np.pi*50*time)
1576
+ >>> noise = rng.normal(scale=np.sqrt(noise_power),
1577
+ ... size=time.shape)
1578
+ >>> x = carrier + noise
1579
+
1580
+ Compute the STFT, and plot its magnitude
1581
+
1582
+ >>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
1583
+ >>> plt.figure()
1584
+ >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp, shading='gouraud')
1585
+ >>> plt.ylim([f[1], f[-1]])
1586
+ >>> plt.title('STFT Magnitude')
1587
+ >>> plt.ylabel('Frequency [Hz]')
1588
+ >>> plt.xlabel('Time [sec]')
1589
+ >>> plt.yscale('log')
1590
+ >>> plt.show()
1591
+
1592
+ Zero the components that are 10% or less of the carrier magnitude,
1593
+ then convert back to a time series via inverse STFT
1594
+
1595
+ >>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
1596
+ >>> _, xrec = signal.istft(Zxx, fs)
1597
+
1598
+ Compare the cleaned signal with the original and true carrier signals.
1599
+
1600
+ >>> plt.figure()
1601
+ >>> plt.plot(time, x, time, xrec, time, carrier)
1602
+ >>> plt.xlim([2, 2.1])
1603
+ >>> plt.xlabel('Time [sec]')
1604
+ >>> plt.ylabel('Signal')
1605
+ >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
1606
+ >>> plt.show()
1607
+
1608
+ Note that the cleaned signal does not start as abruptly as the original,
1609
+ since some of the coefficients of the transient were also removed:
1610
+
1611
+ >>> plt.figure()
1612
+ >>> plt.plot(time, x, time, xrec, time, carrier)
1613
+ >>> plt.xlim([0, 0.1])
1614
+ >>> plt.xlabel('Time [sec]')
1615
+ >>> plt.ylabel('Signal')
1616
+ >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
1617
+ >>> plt.show()
1618
+
1619
+ """
1620
+ # Make sure input is an ndarray of appropriate complex dtype
1621
+ Zxx = np.asarray(Zxx) + 0j
1622
+ freq_axis = int(freq_axis)
1623
+ time_axis = int(time_axis)
1624
+
1625
+ if Zxx.ndim < 2:
1626
+ raise ValueError('Input stft must be at least 2d!')
1627
+
1628
+ if freq_axis == time_axis:
1629
+ raise ValueError('Must specify differing time and frequency axes!')
1630
+
1631
+ nseg = Zxx.shape[time_axis]
1632
+
1633
+ if input_onesided:
1634
+ # Assume even segment length
1635
+ n_default = 2*(Zxx.shape[freq_axis] - 1)
1636
+ else:
1637
+ n_default = Zxx.shape[freq_axis]
1638
+
1639
+ # Check windowing parameters
1640
+ if nperseg is None:
1641
+ nperseg = n_default
1642
+ else:
1643
+ nperseg = int(nperseg)
1644
+ if nperseg < 1:
1645
+ raise ValueError('nperseg must be a positive integer')
1646
+
1647
+ if nfft is None:
1648
+ if (input_onesided) and (nperseg == n_default + 1):
1649
+ # Odd nperseg, no FFT padding
1650
+ nfft = nperseg
1651
+ else:
1652
+ nfft = n_default
1653
+ elif nfft < nperseg:
1654
+ raise ValueError('nfft must be greater than or equal to nperseg.')
1655
+ else:
1656
+ nfft = int(nfft)
1657
+
1658
+ if noverlap is None:
1659
+ noverlap = nperseg//2
1660
+ else:
1661
+ noverlap = int(noverlap)
1662
+ if noverlap >= nperseg:
1663
+ raise ValueError('noverlap must be less than nperseg.')
1664
+ nstep = nperseg - noverlap
1665
+
1666
+ # Rearrange axes if necessary
1667
+ if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2:
1668
+ # Turn negative indices to positive for the call to transpose
1669
+ if freq_axis < 0:
1670
+ freq_axis = Zxx.ndim + freq_axis
1671
+ if time_axis < 0:
1672
+ time_axis = Zxx.ndim + time_axis
1673
+ zouter = list(range(Zxx.ndim))
1674
+ for ax in sorted([time_axis, freq_axis], reverse=True):
1675
+ zouter.pop(ax)
1676
+ Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis])
1677
+
1678
+ # Get window as array
1679
+ if isinstance(window, str) or type(window) is tuple:
1680
+ win = get_window(window, nperseg)
1681
+ else:
1682
+ win = np.asarray(window)
1683
+ if len(win.shape) != 1:
1684
+ raise ValueError('window must be 1-D')
1685
+ if win.shape[0] != nperseg:
1686
+ raise ValueError(f'window must have length of {nperseg}')
1687
+
1688
+ ifunc = sp_fft.irfft if input_onesided else sp_fft.ifft
1689
+ xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :]
1690
+
1691
+ # Initialize output and normalization arrays
1692
+ outputlength = nperseg + (nseg-1)*nstep
1693
+ x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype)
1694
+ norm = np.zeros(outputlength, dtype=xsubs.dtype)
1695
+
1696
+ if np.result_type(win, xsubs) != xsubs.dtype:
1697
+ win = win.astype(xsubs.dtype)
1698
+
1699
+ if scaling == 'spectrum':
1700
+ xsubs *= win.sum()
1701
+ elif scaling == 'psd':
1702
+ xsubs *= np.sqrt(fs * sum(win**2))
1703
+ else:
1704
+ raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!")
1705
+
1706
+ # Construct the output from the ifft segments
1707
+ # This loop could perhaps be vectorized/strided somehow...
1708
+ for ii in range(nseg):
1709
+ # Window the ifft
1710
+ x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win
1711
+ norm[..., ii*nstep:ii*nstep+nperseg] += win**2
1712
+
1713
+ # Remove extension points
1714
+ if boundary:
1715
+ x = x[..., nperseg//2:-(nperseg//2)]
1716
+ norm = norm[..., nperseg//2:-(nperseg//2)]
1717
+
1718
+ # Divide out normalization where non-tiny
1719
+ if np.sum(norm > 1e-10) != len(norm):
1720
+ warnings.warn(
1721
+ "NOLA condition failed, STFT may not be invertible."
1722
+ + (" Possibly due to missing boundary" if not boundary else ""),
1723
+ stacklevel=2
1724
+ )
1725
+ x /= np.where(norm > 1e-10, norm, 1.0)
1726
+
1727
+ if input_onesided:
1728
+ x = x.real
1729
+
1730
+ # Put axes back
1731
+ if x.ndim > 1:
1732
+ if time_axis != Zxx.ndim-1:
1733
+ if freq_axis < time_axis:
1734
+ time_axis -= 1
1735
+ x = np.moveaxis(x, -1, time_axis)
1736
+
1737
+ time = np.arange(x.shape[0])/float(fs)
1738
+ return time, x
1739
+
1740
+
1741
+ def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
1742
+ nfft=None, detrend='constant', axis=-1):
1743
+ r"""
1744
+ Estimate the magnitude squared coherence estimate, Cxy, of
1745
+ discrete-time signals X and Y using Welch's method.
1746
+
1747
+ ``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power
1748
+ spectral density estimates of X and Y, and `Pxy` is the cross
1749
+ spectral density estimate of X and Y.
1750
+
1751
+ Parameters
1752
+ ----------
1753
+ x : array_like
1754
+ Time series of measurement values
1755
+ y : array_like
1756
+ Time series of measurement values
1757
+ fs : float, optional
1758
+ Sampling frequency of the `x` and `y` time series. Defaults
1759
+ to 1.0.
1760
+ window : str or tuple or array_like, optional
1761
+ Desired window to use. If `window` is a string or tuple, it is
1762
+ passed to `get_window` to generate the window values, which are
1763
+ DFT-even by default. See `get_window` for a list of windows and
1764
+ required parameters. If `window` is array_like it will be used
1765
+ directly as the window and its length must be nperseg. Defaults
1766
+ to a Hann window.
1767
+ nperseg : int, optional
1768
+ Length of each segment. Defaults to None, but if window is str or
1769
+ tuple, is set to 256, and if window is array_like, is set to the
1770
+ length of the window.
1771
+ noverlap: int, optional
1772
+ Number of points to overlap between segments. If `None`,
1773
+ ``noverlap = nperseg // 2``. Defaults to `None`.
1774
+ nfft : int, optional
1775
+ Length of the FFT used, if a zero padded FFT is desired. If
1776
+ `None`, the FFT length is `nperseg`. Defaults to `None`.
1777
+ detrend : str or function or `False`, optional
1778
+ Specifies how to detrend each segment. If `detrend` is a
1779
+ string, it is passed as the `type` argument to the `detrend`
1780
+ function. If it is a function, it takes a segment and returns a
1781
+ detrended segment. If `detrend` is `False`, no detrending is
1782
+ done. Defaults to 'constant'.
1783
+ axis : int, optional
1784
+ Axis along which the coherence is computed for both inputs; the
1785
+ default is over the last axis (i.e. ``axis=-1``).
1786
+
1787
+ Returns
1788
+ -------
1789
+ f : ndarray
1790
+ Array of sample frequencies.
1791
+ Cxy : ndarray
1792
+ Magnitude squared coherence of x and y.
1793
+
1794
+ See Also
1795
+ --------
1796
+ periodogram: Simple, optionally modified periodogram
1797
+ lombscargle: Lomb-Scargle periodogram for unevenly sampled data
1798
+ welch: Power spectral density by Welch's method.
1799
+ csd: Cross spectral density by Welch's method.
1800
+
1801
+ Notes
1802
+ -----
1803
+ An appropriate amount of overlap will depend on the choice of window
1804
+ and on your requirements. For the default Hann window an overlap of
1805
+ 50% is a reasonable trade off between accurately estimating the
1806
+ signal power, while not over counting any of the data. Narrower
1807
+ windows may require a larger overlap.
1808
+
1809
+ .. versionadded:: 0.16.0
1810
+
1811
+ References
1812
+ ----------
1813
+ .. [1] P. Welch, "The use of the fast Fourier transform for the
1814
+ estimation of power spectra: A method based on time averaging
1815
+ over short, modified periodograms", IEEE Trans. Audio
1816
+ Electroacoust. vol. 15, pp. 70-73, 1967.
1817
+ .. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of
1818
+ Signals" Prentice Hall, 2005
1819
+
1820
+ Examples
1821
+ --------
1822
+ >>> import numpy as np
1823
+ >>> from scipy import signal
1824
+ >>> import matplotlib.pyplot as plt
1825
+ >>> rng = np.random.default_rng()
1826
+
1827
+ Generate two test signals with some common features.
1828
+
1829
+ >>> fs = 10e3
1830
+ >>> N = 1e5
1831
+ >>> amp = 20
1832
+ >>> freq = 1234.0
1833
+ >>> noise_power = 0.001 * fs / 2
1834
+ >>> time = np.arange(N) / fs
1835
+ >>> b, a = signal.butter(2, 0.25, 'low')
1836
+ >>> x = rng.normal(scale=np.sqrt(noise_power), size=time.shape)
1837
+ >>> y = signal.lfilter(b, a, x)
1838
+ >>> x += amp*np.sin(2*np.pi*freq*time)
1839
+ >>> y += rng.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
1840
+
1841
+ Compute and plot the coherence.
1842
+
1843
+ >>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
1844
+ >>> plt.semilogy(f, Cxy)
1845
+ >>> plt.xlabel('frequency [Hz]')
1846
+ >>> plt.ylabel('Coherence')
1847
+ >>> plt.show()
1848
+
1849
+ """
1850
+ freqs, Pxx = welch(x, fs=fs, window=window, nperseg=nperseg,
1851
+ noverlap=noverlap, nfft=nfft, detrend=detrend,
1852
+ axis=axis)
1853
+ _, Pyy = welch(y, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap,
1854
+ nfft=nfft, detrend=detrend, axis=axis)
1855
+ _, Pxy = csd(x, y, fs=fs, window=window, nperseg=nperseg,
1856
+ noverlap=noverlap, nfft=nfft, detrend=detrend, axis=axis)
1857
+
1858
+ Cxy = np.abs(Pxy)**2 / Pxx / Pyy
1859
+
1860
+ return freqs, Cxy
1861
+
1862
+
1863
+ def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
1864
+ nfft=None, detrend='constant', return_onesided=True,
1865
+ scaling='density', axis=-1, mode='psd', boundary=None,
1866
+ padded=False):
1867
+ """Calculate various forms of windowed FFTs for PSD, CSD, etc.
1868
+
1869
+ This is a helper function that implements the commonality between
1870
+ the stft, psd, csd, and spectrogram functions. It is not designed to
1871
+ be called externally. The windows are not averaged over; the result
1872
+ from each window is returned.
1873
+
1874
+ Parameters
1875
+ ----------
1876
+ x : array_like
1877
+ Array or sequence containing the data to be analyzed.
1878
+ y : array_like
1879
+ Array or sequence containing the data to be analyzed. If this is
1880
+ the same object in memory as `x` (i.e. ``_spectral_helper(x,
1881
+ x, ...)``), the extra computations are spared.
1882
+ fs : float, optional
1883
+ Sampling frequency of the time series. Defaults to 1.0.
1884
+ window : str or tuple or array_like, optional
1885
+ Desired window to use. If `window` is a string or tuple, it is
1886
+ passed to `get_window` to generate the window values, which are
1887
+ DFT-even by default. See `get_window` for a list of windows and
1888
+ required parameters. If `window` is array_like it will be used
1889
+ directly as the window and its length must be nperseg. Defaults
1890
+ to a Hann window.
1891
+ nperseg : int, optional
1892
+ Length of each segment. Defaults to None, but if window is str or
1893
+ tuple, is set to 256, and if window is array_like, is set to the
1894
+ length of the window.
1895
+ noverlap : int, optional
1896
+ Number of points to overlap between segments. If `None`,
1897
+ ``noverlap = nperseg // 2``. Defaults to `None`.
1898
+ nfft : int, optional
1899
+ Length of the FFT used, if a zero padded FFT is desired. If
1900
+ `None`, the FFT length is `nperseg`. Defaults to `None`.
1901
+ detrend : str or function or `False`, optional
1902
+ Specifies how to detrend each segment. If `detrend` is a
1903
+ string, it is passed as the `type` argument to the `detrend`
1904
+ function. If it is a function, it takes a segment and returns a
1905
+ detrended segment. If `detrend` is `False`, no detrending is
1906
+ done. Defaults to 'constant'.
1907
+ return_onesided : bool, optional
1908
+ If `True`, return a one-sided spectrum for real data. If
1909
+ `False` return a two-sided spectrum. Defaults to `True`, but for
1910
+ complex data, a two-sided spectrum is always returned.
1911
+ scaling : { 'density', 'spectrum' }, optional
1912
+ Selects between computing the cross spectral density ('density')
1913
+ where `Pxy` has units of V**2/Hz and computing the cross
1914
+ spectrum ('spectrum') where `Pxy` has units of V**2, if `x`
1915
+ and `y` are measured in V and `fs` is measured in Hz.
1916
+ Defaults to 'density'
1917
+ axis : int, optional
1918
+ Axis along which the FFTs are computed; the default is over the
1919
+ last axis (i.e. ``axis=-1``).
1920
+ mode: str {'psd', 'stft'}, optional
1921
+ Defines what kind of return values are expected. Defaults to
1922
+ 'psd'.
1923
+ boundary : str or None, optional
1924
+ Specifies whether the input signal is extended at both ends, and
1925
+ how to generate the new values, in order to center the first
1926
+ windowed segment on the first input point. This has the benefit
1927
+ of enabling reconstruction of the first input point when the
1928
+ employed window function starts at zero. Valid options are
1929
+ ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
1930
+ `None`.
1931
+ padded : bool, optional
1932
+ Specifies whether the input signal is zero-padded at the end to
1933
+ make the signal fit exactly into an integer number of window
1934
+ segments, so that all of the signal is included in the output.
1935
+ Defaults to `False`. Padding occurs after boundary extension, if
1936
+ `boundary` is not `None`, and `padded` is `True`.
1937
+
1938
+ Returns
1939
+ -------
1940
+ freqs : ndarray
1941
+ Array of sample frequencies.
1942
+ t : ndarray
1943
+ Array of times corresponding to each data segment
1944
+ result : ndarray
1945
+ Array of output data, contents dependent on *mode* kwarg.
1946
+
1947
+ Notes
1948
+ -----
1949
+ Adapted from matplotlib.mlab
1950
+
1951
+ .. versionadded:: 0.16.0
1952
+ """
1953
+ if mode not in ['psd', 'stft']:
1954
+ raise ValueError(f"Unknown value for mode {mode}, must be one of: "
1955
+ "{'psd', 'stft'}")
1956
+
1957
+ boundary_funcs = {'even': even_ext,
1958
+ 'odd': odd_ext,
1959
+ 'constant': const_ext,
1960
+ 'zeros': zero_ext,
1961
+ None: None}
1962
+
1963
+ if boundary not in boundary_funcs:
1964
+ raise ValueError(f"Unknown boundary option '{boundary}', "
1965
+ f"must be one of: {list(boundary_funcs.keys())}")
1966
+
1967
+ # If x and y are the same object we can save ourselves some computation.
1968
+ same_data = y is x
1969
+
1970
+ if not same_data and mode != 'psd':
1971
+ raise ValueError("x and y must be equal if mode is 'stft'")
1972
+
1973
+ axis = int(axis)
1974
+
1975
+ # Ensure we have np.arrays, get outdtype
1976
+ x = np.asarray(x)
1977
+ if not same_data:
1978
+ y = np.asarray(y)
1979
+ outdtype = np.result_type(x, y, np.complex64)
1980
+ else:
1981
+ outdtype = np.result_type(x, np.complex64)
1982
+
1983
+ if not same_data:
1984
+ # Check if we can broadcast the outer axes together
1985
+ xouter = list(x.shape)
1986
+ youter = list(y.shape)
1987
+ xouter.pop(axis)
1988
+ youter.pop(axis)
1989
+ try:
1990
+ outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
1991
+ except ValueError as e:
1992
+ raise ValueError('x and y cannot be broadcast together.') from e
1993
+
1994
+ if same_data:
1995
+ if x.size == 0:
1996
+ return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
1997
+ else:
1998
+ if x.size == 0 or y.size == 0:
1999
+ outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
2000
+ emptyout = np.moveaxis(np.empty(outshape), -1, axis)
2001
+ return emptyout, emptyout, emptyout
2002
+
2003
+ if x.ndim > 1:
2004
+ if axis != -1:
2005
+ x = np.moveaxis(x, axis, -1)
2006
+ if not same_data and y.ndim > 1:
2007
+ y = np.moveaxis(y, axis, -1)
2008
+
2009
+ # Check if x and y are the same length, zero-pad if necessary
2010
+ if not same_data:
2011
+ if x.shape[-1] != y.shape[-1]:
2012
+ if x.shape[-1] < y.shape[-1]:
2013
+ pad_shape = list(x.shape)
2014
+ pad_shape[-1] = y.shape[-1] - x.shape[-1]
2015
+ x = np.concatenate((x, np.zeros(pad_shape)), -1)
2016
+ else:
2017
+ pad_shape = list(y.shape)
2018
+ pad_shape[-1] = x.shape[-1] - y.shape[-1]
2019
+ y = np.concatenate((y, np.zeros(pad_shape)), -1)
2020
+
2021
+ if nperseg is not None: # if specified by user
2022
+ nperseg = int(nperseg)
2023
+ if nperseg < 1:
2024
+ raise ValueError('nperseg must be a positive integer')
2025
+
2026
+ # parse window; if array like, then set nperseg = win.shape
2027
+ win, nperseg = _triage_segments(window, nperseg, input_length=x.shape[-1])
2028
+
2029
+ if nfft is None:
2030
+ nfft = nperseg
2031
+ elif nfft < nperseg:
2032
+ raise ValueError('nfft must be greater than or equal to nperseg.')
2033
+ else:
2034
+ nfft = int(nfft)
2035
+
2036
+ if noverlap is None:
2037
+ noverlap = nperseg//2
2038
+ else:
2039
+ noverlap = int(noverlap)
2040
+ if noverlap >= nperseg:
2041
+ raise ValueError('noverlap must be less than nperseg.')
2042
+ nstep = nperseg - noverlap
2043
+
2044
+ # Padding occurs after boundary extension, so that the extended signal ends
2045
+ # in zeros, instead of introducing an impulse at the end.
2046
+ # I.e. if x = [..., 3, 2]
2047
+ # extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
2048
+ # pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
2049
+
2050
+ if boundary is not None:
2051
+ ext_func = boundary_funcs[boundary]
2052
+ x = ext_func(x, nperseg//2, axis=-1)
2053
+ if not same_data:
2054
+ y = ext_func(y, nperseg//2, axis=-1)
2055
+
2056
+ if padded:
2057
+ # Pad to integer number of windowed segments
2058
+ # I.e. make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
2059
+ nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
2060
+ zeros_shape = list(x.shape[:-1]) + [nadd]
2061
+ x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
2062
+ if not same_data:
2063
+ zeros_shape = list(y.shape[:-1]) + [nadd]
2064
+ y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
2065
+
2066
+ # Handle detrending and window functions
2067
+ if not detrend:
2068
+ def detrend_func(d):
2069
+ return d
2070
+ elif not hasattr(detrend, '__call__'):
2071
+ def detrend_func(d):
2072
+ return _signaltools.detrend(d, type=detrend, axis=-1)
2073
+ elif axis != -1:
2074
+ # Wrap this function so that it receives a shape that it could
2075
+ # reasonably expect to receive.
2076
+ def detrend_func(d):
2077
+ d = np.moveaxis(d, -1, axis)
2078
+ d = detrend(d)
2079
+ return np.moveaxis(d, axis, -1)
2080
+ else:
2081
+ detrend_func = detrend
2082
+
2083
+ if np.result_type(win, np.complex64) != outdtype:
2084
+ win = win.astype(outdtype)
2085
+
2086
+ if scaling == 'density':
2087
+ scale = 1.0 / (fs * (win*win).sum())
2088
+ elif scaling == 'spectrum':
2089
+ scale = 1.0 / win.sum()**2
2090
+ else:
2091
+ raise ValueError(f'Unknown scaling: {scaling!r}')
2092
+
2093
+ if mode == 'stft':
2094
+ scale = np.sqrt(scale)
2095
+
2096
+ if return_onesided:
2097
+ if np.iscomplexobj(x):
2098
+ sides = 'twosided'
2099
+ warnings.warn('Input data is complex, switching to return_onesided=False',
2100
+ stacklevel=3)
2101
+ else:
2102
+ sides = 'onesided'
2103
+ if not same_data:
2104
+ if np.iscomplexobj(y):
2105
+ sides = 'twosided'
2106
+ warnings.warn('Input data is complex, switching to '
2107
+ 'return_onesided=False',
2108
+ stacklevel=3)
2109
+ else:
2110
+ sides = 'twosided'
2111
+
2112
+ if sides == 'twosided':
2113
+ freqs = sp_fft.fftfreq(nfft, 1/fs)
2114
+ elif sides == 'onesided':
2115
+ freqs = sp_fft.rfftfreq(nfft, 1/fs)
2116
+
2117
+ # Perform the windowed FFTs
2118
+ result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
2119
+
2120
+ if not same_data:
2121
+ # All the same operations on the y data
2122
+ result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
2123
+ sides)
2124
+ result = np.conjugate(result) * result_y
2125
+ elif mode == 'psd':
2126
+ result = np.conjugate(result) * result
2127
+
2128
+ result *= scale
2129
+ if sides == 'onesided' and mode == 'psd':
2130
+ if nfft % 2:
2131
+ result[..., 1:] *= 2
2132
+ else:
2133
+ # Last point is unpaired Nyquist freq point, don't double
2134
+ result[..., 1:-1] *= 2
2135
+
2136
+ time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
2137
+ nperseg - noverlap)/float(fs)
2138
+ if boundary is not None:
2139
+ time -= (nperseg/2) / fs
2140
+
2141
+ result = result.astype(outdtype)
2142
+
2143
+ # All imaginary parts are zero anyways
2144
+ if same_data and mode != 'stft':
2145
+ result = result.real
2146
+
2147
+ # Output is going to have new last axis for time/window index, so a
2148
+ # negative axis index shifts down one
2149
+ if axis < 0:
2150
+ axis -= 1
2151
+
2152
+ # Roll frequency axis back to axis where the data came from
2153
+ result = np.moveaxis(result, -1, axis)
2154
+
2155
+ return freqs, time, result
2156
+
2157
+
2158
+ def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
2159
+ """
2160
+ Calculate windowed FFT, for internal use by
2161
+ `scipy.signal._spectral_helper`.
2162
+
2163
+ This is a helper function that does the main FFT calculation for
2164
+ `_spectral helper`. All input validation is performed there, and the
2165
+ data axis is assumed to be the last axis of x. It is not designed to
2166
+ be called externally. The windows are not averaged over; the result
2167
+ from each window is returned.
2168
+
2169
+ Returns
2170
+ -------
2171
+ result : ndarray
2172
+ Array of FFT data
2173
+
2174
+ Notes
2175
+ -----
2176
+ Adapted from matplotlib.mlab
2177
+
2178
+ .. versionadded:: 0.16.0
2179
+ """
2180
+ # Created sliding window view of array
2181
+ if nperseg == 1 and noverlap == 0:
2182
+ result = x[..., np.newaxis]
2183
+ else:
2184
+ step = nperseg - noverlap
2185
+ result = np.lib.stride_tricks.sliding_window_view(
2186
+ x, window_shape=nperseg, axis=-1, writeable=True
2187
+ )
2188
+ result = result[..., 0::step, :]
2189
+
2190
+ # Detrend each data segment individually
2191
+ result = detrend_func(result)
2192
+
2193
+ # Apply window by multiplication
2194
+ result = win * result
2195
+
2196
+ # Perform the fft. Acts on last axis by default. Zero-pads automatically
2197
+ if sides == 'twosided':
2198
+ func = sp_fft.fft
2199
+ else:
2200
+ result = result.real
2201
+ func = sp_fft.rfft
2202
+ result = func(result, n=nfft)
2203
+
2204
+ return result
2205
+
2206
+
2207
+ def _triage_segments(window, nperseg, input_length):
2208
+ """
2209
+ Parses window and nperseg arguments for spectrogram and _spectral_helper.
2210
+ This is a helper function, not meant to be called externally.
2211
+
2212
+ Parameters
2213
+ ----------
2214
+ window : string, tuple, or ndarray
2215
+ If window is specified by a string or tuple and nperseg is not
2216
+ specified, nperseg is set to the default of 256 and returns a window of
2217
+ that length.
2218
+ If instead the window is array_like and nperseg is not specified, then
2219
+ nperseg is set to the length of the window. A ValueError is raised if
2220
+ the user supplies both an array_like window and a value for nperseg but
2221
+ nperseg does not equal the length of the window.
2222
+
2223
+ nperseg : int
2224
+ Length of each segment
2225
+
2226
+ input_length: int
2227
+ Length of input signal, i.e. x.shape[-1]. Used to test for errors.
2228
+
2229
+ Returns
2230
+ -------
2231
+ win : ndarray
2232
+ window. If function was called with string or tuple than this will hold
2233
+ the actual array used as a window.
2234
+
2235
+ nperseg : int
2236
+ Length of each segment. If window is str or tuple, nperseg is set to
2237
+ 256. If window is array_like, nperseg is set to the length of the
2238
+ window.
2239
+ """
2240
+ # parse window; if array like, then set nperseg = win.shape
2241
+ if isinstance(window, str) or isinstance(window, tuple):
2242
+ # if nperseg not specified
2243
+ if nperseg is None:
2244
+ nperseg = 256 # then change to default
2245
+ if nperseg > input_length:
2246
+ warnings.warn(f'nperseg = {nperseg:d} is greater than input length '
2247
+ f' = {input_length:d}, using nperseg = {input_length:d}',
2248
+ stacklevel=3)
2249
+ nperseg = input_length
2250
+ win = get_window(window, nperseg)
2251
+ else:
2252
+ win = np.asarray(window)
2253
+ if len(win.shape) != 1:
2254
+ raise ValueError('window must be 1-D')
2255
+ if input_length < win.shape[-1]:
2256
+ raise ValueError('window is longer than input signal')
2257
+ if nperseg is None:
2258
+ nperseg = win.shape[0]
2259
+ elif nperseg is not None:
2260
+ if nperseg != win.shape[0]:
2261
+ raise ValueError("value specified for nperseg is different"
2262
+ " from length of window")
2263
+ return win, nperseg
2264
+
2265
+
2266
+ def _median_bias(n):
2267
+ """
2268
+ Returns the bias of the median of a set of periodograms relative to
2269
+ the mean.
2270
+
2271
+ See Appendix B from [1]_ for details.
2272
+
2273
+ Parameters
2274
+ ----------
2275
+ n : int
2276
+ Numbers of periodograms being averaged.
2277
+
2278
+ Returns
2279
+ -------
2280
+ bias : float
2281
+ Calculated bias.
2282
+
2283
+ References
2284
+ ----------
2285
+ .. [1] B. Allen, W.G. Anderson, P.R. Brady, D.A. Brown, J.D.E. Creighton.
2286
+ "FINDCHIRP: an algorithm for detection of gravitational waves from
2287
+ inspiraling compact binaries", Physical Review D 85, 2012,
2288
+ :arxiv:`gr-qc/0509116`
2289
+ """
2290
+ ii_2 = 2 * np.arange(1., (n-1) // 2 + 1)
2291
+ return 1 + np.sum(1. / (ii_2 + 1) - 1. / ii_2)
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (55.9 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline.pyi ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+ from numpy.typing import NDArray
4
+
5
+ FloatingArray = NDArray[np.float32] | NDArray[np.float64]
6
+ ComplexArray = NDArray[np.complex64] | NDArray[np.complex128]
7
+ FloatingComplexArray = FloatingArray | ComplexArray
8
+
9
+
10
+ def symiirorder1_ic(signal: FloatingComplexArray,
11
+ c0: float,
12
+ z1: float,
13
+ precision: float) -> FloatingComplexArray:
14
+ ...
15
+
16
+
17
+ def symiirorder2_ic_fwd(signal: FloatingArray,
18
+ r: float,
19
+ omega: float,
20
+ precision: float) -> FloatingArray:
21
+ ...
22
+
23
+
24
+ def symiirorder2_ic_bwd(signal: FloatingArray,
25
+ r: float,
26
+ omega: float,
27
+ precision: float) -> FloatingArray:
28
+ ...
29
+
30
+
31
+ def sepfir2d(input: FloatingComplexArray,
32
+ hrow: FloatingComplexArray,
33
+ hcol: FloatingComplexArray) -> FloatingComplexArray:
34
+ ...
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_spline_filters.py ADDED
@@ -0,0 +1,808 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy import (asarray, pi, zeros_like,
2
+ array, arctan2, tan, ones, arange, floor,
3
+ r_, atleast_1d, sqrt, exp, greater, cos, add, sin,
4
+ moveaxis, abs, arctan, complex64, float32)
5
+ import numpy as np
6
+
7
+ from scipy._lib._util import normalize_axis_index
8
+
9
+ # From splinemodule.c
10
+ from ._spline import sepfir2d, symiirorder1_ic, symiirorder2_ic_fwd, symiirorder2_ic_bwd
11
+ from ._signaltools import lfilter, sosfilt, lfiltic
12
+ from ._arraytools import axis_slice, axis_reverse
13
+
14
+ from scipy.interpolate import BSpline
15
+
16
+
17
+ __all__ = ['spline_filter', 'gauss_spline',
18
+ 'cspline1d', 'qspline1d', 'qspline2d', 'cspline2d',
19
+ 'cspline1d_eval', 'qspline1d_eval', 'symiirorder1', 'symiirorder2']
20
+
21
+
22
+ def spline_filter(Iin, lmbda=5.0):
23
+ """Smoothing spline (cubic) filtering of a rank-2 array.
24
+
25
+ Filter an input data set, `Iin`, using a (cubic) smoothing spline of
26
+ fall-off `lmbda`.
27
+
28
+ Parameters
29
+ ----------
30
+ Iin : array_like
31
+ input data set
32
+ lmbda : float, optional
33
+ spline smoothing fall-off value, default is `5.0`.
34
+
35
+ Returns
36
+ -------
37
+ res : ndarray
38
+ filtered input data
39
+
40
+ Examples
41
+ --------
42
+ We can filter an multi dimensional signal (ex: 2D image) using cubic
43
+ B-spline filter:
44
+
45
+ >>> import numpy as np
46
+ >>> from scipy.signal import spline_filter
47
+ >>> import matplotlib.pyplot as plt
48
+ >>> orig_img = np.eye(20) # create an image
49
+ >>> orig_img[10, :] = 1.0
50
+ >>> sp_filter = spline_filter(orig_img, lmbda=0.1)
51
+ >>> f, ax = plt.subplots(1, 2, sharex=True)
52
+ >>> for ind, data in enumerate([[orig_img, "original image"],
53
+ ... [sp_filter, "spline filter"]]):
54
+ ... ax[ind].imshow(data[0], cmap='gray_r')
55
+ ... ax[ind].set_title(data[1])
56
+ >>> plt.tight_layout()
57
+ >>> plt.show()
58
+
59
+ """
60
+ if Iin.dtype not in [np.float32, np.float64, np.complex64, np.complex128]:
61
+ raise TypeError(f"Invalid data type for Iin: {Iin.dtype = }")
62
+
63
+ # XXX: note that complex-valued computations are done in single precision
64
+ # this is historic, and the root reason is unclear,
65
+ # see https://github.com/scipy/scipy/issues/9209
66
+ # Attempting to work in complex double precision leads to symiirorder1
67
+ # failing to converge for the boundary conditions.
68
+ intype = Iin.dtype
69
+ hcol = array([1.0, 4.0, 1.0], np.float32) / 6.0
70
+ if intype == np.complex128:
71
+ Iin = Iin.astype(np.complex64)
72
+
73
+ ck = cspline2d(Iin, lmbda)
74
+ out = sepfir2d(ck, hcol, hcol)
75
+ out = out.astype(intype)
76
+ return out
77
+
78
+
79
+ _splinefunc_cache = {}
80
+
81
+
82
+ def gauss_spline(x, n):
83
+ r"""Gaussian approximation to B-spline basis function of order n.
84
+
85
+ Parameters
86
+ ----------
87
+ x : array_like
88
+ a knot vector
89
+ n : int
90
+ The order of the spline. Must be non-negative, i.e., n >= 0
91
+
92
+ Returns
93
+ -------
94
+ res : ndarray
95
+ B-spline basis function values approximated by a zero-mean Gaussian
96
+ function.
97
+
98
+ Notes
99
+ -----
100
+ The B-spline basis function can be approximated well by a zero-mean
101
+ Gaussian function with standard-deviation equal to :math:`\sigma=(n+1)/12`
102
+ for large `n` :
103
+
104
+ .. math:: \frac{1}{\sqrt {2\pi\sigma^2}}exp(-\frac{x^2}{2\sigma})
105
+
106
+ References
107
+ ----------
108
+ .. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen
109
+ F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In:
110
+ Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational
111
+ Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer
112
+ Science, vol 4485. Springer, Berlin, Heidelberg
113
+ .. [2] http://folk.uio.no/inf3330/scripting/doc/python/SciPy/tutorial/old/node24.html
114
+
115
+ Examples
116
+ --------
117
+ We can calculate B-Spline basis functions approximated by a gaussian
118
+ distribution:
119
+
120
+ >>> import numpy as np
121
+ >>> from scipy.signal import gauss_spline
122
+ >>> knots = np.array([-1.0, 0.0, -1.0])
123
+ >>> gauss_spline(knots, 3)
124
+ array([0.15418033, 0.6909883, 0.15418033]) # may vary
125
+
126
+ """
127
+ x = asarray(x)
128
+ signsq = (n + 1) / 12.0
129
+ return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
130
+
131
+
132
+ def _cubic(x):
133
+ x = asarray(x, dtype=float)
134
+ b = BSpline.basis_element([-2, -1, 0, 1, 2], extrapolate=False)
135
+ out = b(x)
136
+ out[(x < -2) | (x > 2)] = 0
137
+ return out
138
+
139
+
140
+ def _quadratic(x):
141
+ x = abs(asarray(x, dtype=float))
142
+ b = BSpline.basis_element([-1.5, -0.5, 0.5, 1.5], extrapolate=False)
143
+ out = b(x)
144
+ out[(x < -1.5) | (x > 1.5)] = 0
145
+ return out
146
+
147
+
148
+ def _coeff_smooth(lam):
149
+ xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
150
+ omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
151
+ rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
152
+ rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
153
+ return rho, omeg
154
+
155
+
156
+ def _hc(k, cs, rho, omega):
157
+ return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
158
+ greater(k, -1))
159
+
160
+
161
+ def _hs(k, cs, rho, omega):
162
+ c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
163
+ (1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
164
+ gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
165
+ ak = abs(k)
166
+ return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
167
+
168
+
169
+ def _cubic_smooth_coeff(signal, lamb):
170
+ rho, omega = _coeff_smooth(lamb)
171
+ cs = 1 - 2 * rho * cos(omega) + rho * rho
172
+ K = len(signal)
173
+ k = arange(K)
174
+
175
+ zi_2 = (_hc(0, cs, rho, omega) * signal[0] +
176
+ add.reduce(_hc(k + 1, cs, rho, omega) * signal))
177
+ zi_1 = (_hc(0, cs, rho, omega) * signal[0] +
178
+ _hc(1, cs, rho, omega) * signal[1] +
179
+ add.reduce(_hc(k + 2, cs, rho, omega) * signal))
180
+
181
+ # Forward filter:
182
+ # for n in range(2, K):
183
+ # yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
184
+ # rho * rho * yp[n - 2])
185
+ zi = lfiltic(cs, r_[1, -2 * rho * cos(omega), rho * rho], r_[zi_1, zi_2])
186
+ zi = zi.reshape(1, -1)
187
+
188
+ sos = r_[cs, 0, 0, 1, -2 * rho * cos(omega), rho * rho]
189
+ sos = sos.reshape(1, -1)
190
+
191
+ yp, _ = sosfilt(sos, signal[2:], zi=zi)
192
+ yp = r_[zi_2, zi_1, yp]
193
+
194
+ # Reverse filter:
195
+ # for n in range(K - 3, -1, -1):
196
+ # y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
197
+ # rho * rho * y[n + 2])
198
+
199
+ zi_2 = add.reduce((_hs(k, cs, rho, omega) +
200
+ _hs(k + 1, cs, rho, omega)) * signal[::-1])
201
+ zi_1 = add.reduce((_hs(k - 1, cs, rho, omega) +
202
+ _hs(k + 2, cs, rho, omega)) * signal[::-1])
203
+
204
+ zi = lfiltic(cs, r_[1, -2 * rho * cos(omega), rho * rho], r_[zi_1, zi_2])
205
+ zi = zi.reshape(1, -1)
206
+ y, _ = sosfilt(sos, yp[-3::-1], zi=zi)
207
+ y = r_[y[::-1], zi_1, zi_2]
208
+ return y
209
+
210
+
211
+ def _cubic_coeff(signal):
212
+ zi = -2 + sqrt(3)
213
+ K = len(signal)
214
+ powers = zi ** arange(K)
215
+
216
+ if K == 1:
217
+ yplus = signal[0] + zi * add.reduce(powers * signal)
218
+ output = zi / (zi - 1) * yplus
219
+ return atleast_1d(output)
220
+
221
+ # Forward filter:
222
+ # yplus[0] = signal[0] + zi * add.reduce(powers * signal)
223
+ # for k in range(1, K):
224
+ # yplus[k] = signal[k] + zi * yplus[k - 1]
225
+
226
+ state = lfiltic(1, r_[1, -zi], atleast_1d(add.reduce(powers * signal)))
227
+
228
+ b = ones(1)
229
+ a = r_[1, -zi]
230
+ yplus, _ = lfilter(b, a, signal, zi=state)
231
+
232
+ # Reverse filter:
233
+ # output[K - 1] = zi / (zi - 1) * yplus[K - 1]
234
+ # for k in range(K - 2, -1, -1):
235
+ # output[k] = zi * (output[k + 1] - yplus[k])
236
+ out_last = zi / (zi - 1) * yplus[K - 1]
237
+ state = lfiltic(-zi, r_[1, -zi], atleast_1d(out_last))
238
+
239
+ b = asarray([-zi])
240
+ output, _ = lfilter(b, a, yplus[-2::-1], zi=state)
241
+ output = r_[output[::-1], out_last]
242
+ return output * 6.0
243
+
244
+
245
+ def _quadratic_coeff(signal):
246
+ zi = -3 + 2 * sqrt(2.0)
247
+ K = len(signal)
248
+ powers = zi ** arange(K)
249
+
250
+ if K == 1:
251
+ yplus = signal[0] + zi * add.reduce(powers * signal)
252
+ output = zi / (zi - 1) * yplus
253
+ return atleast_1d(output)
254
+
255
+ # Forward filter:
256
+ # yplus[0] = signal[0] + zi * add.reduce(powers * signal)
257
+ # for k in range(1, K):
258
+ # yplus[k] = signal[k] + zi * yplus[k - 1]
259
+
260
+ state = lfiltic(1, r_[1, -zi], atleast_1d(add.reduce(powers * signal)))
261
+
262
+ b = ones(1)
263
+ a = r_[1, -zi]
264
+ yplus, _ = lfilter(b, a, signal, zi=state)
265
+
266
+ # Reverse filter:
267
+ # output[K - 1] = zi / (zi - 1) * yplus[K - 1]
268
+ # for k in range(K - 2, -1, -1):
269
+ # output[k] = zi * (output[k + 1] - yplus[k])
270
+ out_last = zi / (zi - 1) * yplus[K - 1]
271
+ state = lfiltic(-zi, r_[1, -zi], atleast_1d(out_last))
272
+
273
+ b = asarray([-zi])
274
+ output, _ = lfilter(b, a, yplus[-2::-1], zi=state)
275
+ output = r_[output[::-1], out_last]
276
+ return output * 8.0
277
+
278
+
279
+ def compute_root_from_lambda(lamb):
280
+ tmp = sqrt(3 + 144 * lamb)
281
+ xi = 1 - 96 * lamb + 24 * lamb * tmp
282
+ omega = arctan(sqrt((144 * lamb - 1.0) / xi))
283
+ tmp2 = sqrt(xi)
284
+ r = ((24 * lamb - 1 - tmp2) / (24 * lamb) *
285
+ sqrt(48*lamb + 24 * lamb * tmp) / tmp2)
286
+ return r, omega
287
+
288
+
289
+ def cspline1d(signal, lamb=0.0):
290
+ """
291
+ Compute cubic spline coefficients for rank-1 array.
292
+
293
+ Find the cubic spline coefficients for a 1-D signal assuming
294
+ mirror-symmetric boundary conditions. To obtain the signal back from the
295
+ spline representation mirror-symmetric-convolve these coefficients with a
296
+ length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
297
+
298
+ Parameters
299
+ ----------
300
+ signal : ndarray
301
+ A rank-1 array representing samples of a signal.
302
+ lamb : float, optional
303
+ Smoothing coefficient, default is 0.0.
304
+
305
+ Returns
306
+ -------
307
+ c : ndarray
308
+ Cubic spline coefficients.
309
+
310
+ See Also
311
+ --------
312
+ cspline1d_eval : Evaluate a cubic spline at the new set of points.
313
+
314
+ Examples
315
+ --------
316
+ We can filter a signal to reduce and smooth out high-frequency noise with
317
+ a cubic spline:
318
+
319
+ >>> import numpy as np
320
+ >>> import matplotlib.pyplot as plt
321
+ >>> from scipy.signal import cspline1d, cspline1d_eval
322
+ >>> rng = np.random.default_rng()
323
+ >>> sig = np.repeat([0., 1., 0.], 100)
324
+ >>> sig += rng.standard_normal(len(sig))*0.05 # add noise
325
+ >>> time = np.linspace(0, len(sig))
326
+ >>> filtered = cspline1d_eval(cspline1d(sig), time)
327
+ >>> plt.plot(sig, label="signal")
328
+ >>> plt.plot(time, filtered, label="filtered")
329
+ >>> plt.legend()
330
+ >>> plt.show()
331
+
332
+ """
333
+ if lamb != 0.0:
334
+ return _cubic_smooth_coeff(signal, lamb)
335
+ else:
336
+ return _cubic_coeff(signal)
337
+
338
+
339
+ def qspline1d(signal, lamb=0.0):
340
+ """Compute quadratic spline coefficients for rank-1 array.
341
+
342
+ Parameters
343
+ ----------
344
+ signal : ndarray
345
+ A rank-1 array representing samples of a signal.
346
+ lamb : float, optional
347
+ Smoothing coefficient (must be zero for now).
348
+
349
+ Returns
350
+ -------
351
+ c : ndarray
352
+ Quadratic spline coefficients.
353
+
354
+ See Also
355
+ --------
356
+ qspline1d_eval : Evaluate a quadratic spline at the new set of points.
357
+
358
+ Notes
359
+ -----
360
+ Find the quadratic spline coefficients for a 1-D signal assuming
361
+ mirror-symmetric boundary conditions. To obtain the signal back from the
362
+ spline representation mirror-symmetric-convolve these coefficients with a
363
+ length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
364
+
365
+ Examples
366
+ --------
367
+ We can filter a signal to reduce and smooth out high-frequency noise with
368
+ a quadratic spline:
369
+
370
+ >>> import numpy as np
371
+ >>> import matplotlib.pyplot as plt
372
+ >>> from scipy.signal import qspline1d, qspline1d_eval
373
+ >>> rng = np.random.default_rng()
374
+ >>> sig = np.repeat([0., 1., 0.], 100)
375
+ >>> sig += rng.standard_normal(len(sig))*0.05 # add noise
376
+ >>> time = np.linspace(0, len(sig))
377
+ >>> filtered = qspline1d_eval(qspline1d(sig), time)
378
+ >>> plt.plot(sig, label="signal")
379
+ >>> plt.plot(time, filtered, label="filtered")
380
+ >>> plt.legend()
381
+ >>> plt.show()
382
+
383
+ """
384
+ if lamb != 0.0:
385
+ raise ValueError("Smoothing quadratic splines not supported yet.")
386
+ else:
387
+ return _quadratic_coeff(signal)
388
+
389
+
390
+ def collapse_2d(x, axis):
391
+ x = moveaxis(x, axis, -1)
392
+ x_shape = x.shape
393
+ x = x.reshape(-1, x.shape[-1])
394
+ if not x.flags.c_contiguous:
395
+ x = x.copy()
396
+ return x, x_shape
397
+
398
+
399
+ def symiirorder_nd(func, input, *args, axis=-1, **kwargs):
400
+ axis = normalize_axis_index(axis, input.ndim)
401
+ input_shape = input.shape
402
+ input_ndim = input.ndim
403
+ if input.ndim > 1:
404
+ input, input_shape = collapse_2d(input, axis)
405
+
406
+ out = func(input, *args, **kwargs)
407
+
408
+ if input_ndim > 1:
409
+ out = out.reshape(input_shape)
410
+ out = moveaxis(out, -1, axis)
411
+ if not out.flags.c_contiguous:
412
+ out = out.copy()
413
+ return out
414
+
415
+
416
+ def qspline2d(signal, lamb=0.0, precision=-1.0):
417
+ """
418
+ Coefficients for 2-D quadratic (2nd order) B-spline.
419
+
420
+ Return the second-order B-spline coefficients over a regularly spaced
421
+ input grid for the two-dimensional input image.
422
+
423
+ Parameters
424
+ ----------
425
+ input : ndarray
426
+ The input signal.
427
+ lamb : float
428
+ Specifies the amount of smoothing in the transfer function.
429
+ precision : float
430
+ Specifies the precision for computing the infinite sum needed to apply
431
+ mirror-symmetric boundary conditions.
432
+
433
+ Returns
434
+ -------
435
+ output : ndarray
436
+ The filtered signal.
437
+ """
438
+ if precision < 0.0 or precision >= 1.0:
439
+ if signal.dtype in [float32, complex64]:
440
+ precision = 1e-3
441
+ else:
442
+ precision = 1e-6
443
+
444
+ if lamb > 0:
445
+ raise ValueError('lambda must be negative or zero')
446
+
447
+ # normal quadratic spline
448
+ r = -3 + 2 * sqrt(2.0)
449
+ c0 = -r * 8.0
450
+ z1 = r
451
+
452
+ out = symiirorder_nd(symiirorder1, signal, c0, z1, precision, axis=-1)
453
+ out = symiirorder_nd(symiirorder1, out, c0, z1, precision, axis=0)
454
+ return out
455
+
456
+
457
+ def cspline2d(signal, lamb=0.0, precision=-1.0):
458
+ """
459
+ Coefficients for 2-D cubic (3rd order) B-spline.
460
+
461
+ Return the third-order B-spline coefficients over a regularly spaced
462
+ input grid for the two-dimensional input image.
463
+
464
+ Parameters
465
+ ----------
466
+ input : ndarray
467
+ The input signal.
468
+ lamb : float
469
+ Specifies the amount of smoothing in the transfer function.
470
+ precision : float
471
+ Specifies the precision for computing the infinite sum needed to apply
472
+ mirror-symmetric boundary conditions.
473
+
474
+ Returns
475
+ -------
476
+ output : ndarray
477
+ The filtered signal.
478
+ """
479
+ if precision < 0.0 or precision >= 1.0:
480
+ if signal.dtype in [float32, complex64]:
481
+ precision = 1e-3
482
+ else:
483
+ precision = 1e-6
484
+
485
+ if lamb <= 1 / 144.0:
486
+ # Normal cubic spline
487
+ r = -2 + sqrt(3.0)
488
+ out = symiirorder_nd(
489
+ symiirorder1, signal, -r * 6.0, r, precision=precision, axis=-1)
490
+ out = symiirorder_nd(
491
+ symiirorder1, out, -r * 6.0, r, precision=precision, axis=0)
492
+ return out
493
+
494
+ r, omega = compute_root_from_lambda(lamb)
495
+ out = symiirorder_nd(symiirorder2, signal, r, omega,
496
+ precision=precision, axis=-1)
497
+ out = symiirorder_nd(symiirorder2, out, r, omega,
498
+ precision=precision, axis=0)
499
+ return out
500
+
501
+
502
+ def cspline1d_eval(cj, newx, dx=1.0, x0=0):
503
+ """Evaluate a cubic spline at the new set of points.
504
+
505
+ `dx` is the old sample-spacing while `x0` was the old origin. In
506
+ other-words the old-sample points (knot-points) for which the `cj`
507
+ represent spline coefficients were at equally-spaced points of:
508
+
509
+ oldx = x0 + j*dx j=0...N-1, with N=len(cj)
510
+
511
+ Edges are handled using mirror-symmetric boundary conditions.
512
+
513
+ Parameters
514
+ ----------
515
+ cj : ndarray
516
+ cublic spline coefficients
517
+ newx : ndarray
518
+ New set of points.
519
+ dx : float, optional
520
+ Old sample-spacing, the default value is 1.0.
521
+ x0 : int, optional
522
+ Old origin, the default value is 0.
523
+
524
+ Returns
525
+ -------
526
+ res : ndarray
527
+ Evaluated a cubic spline points.
528
+
529
+ See Also
530
+ --------
531
+ cspline1d : Compute cubic spline coefficients for rank-1 array.
532
+
533
+ Examples
534
+ --------
535
+ We can filter a signal to reduce and smooth out high-frequency noise with
536
+ a cubic spline:
537
+
538
+ >>> import numpy as np
539
+ >>> import matplotlib.pyplot as plt
540
+ >>> from scipy.signal import cspline1d, cspline1d_eval
541
+ >>> rng = np.random.default_rng()
542
+ >>> sig = np.repeat([0., 1., 0.], 100)
543
+ >>> sig += rng.standard_normal(len(sig))*0.05 # add noise
544
+ >>> time = np.linspace(0, len(sig))
545
+ >>> filtered = cspline1d_eval(cspline1d(sig), time)
546
+ >>> plt.plot(sig, label="signal")
547
+ >>> plt.plot(time, filtered, label="filtered")
548
+ >>> plt.legend()
549
+ >>> plt.show()
550
+
551
+ """
552
+ newx = (asarray(newx) - x0) / float(dx)
553
+ res = zeros_like(newx, dtype=cj.dtype)
554
+ if res.size == 0:
555
+ return res
556
+ N = len(cj)
557
+ cond1 = newx < 0
558
+ cond2 = newx > (N - 1)
559
+ cond3 = ~(cond1 | cond2)
560
+ # handle general mirror-symmetry
561
+ res[cond1] = cspline1d_eval(cj, -newx[cond1])
562
+ res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
563
+ newx = newx[cond3]
564
+ if newx.size == 0:
565
+ return res
566
+ result = zeros_like(newx, dtype=cj.dtype)
567
+ jlower = floor(newx - 2).astype(int) + 1
568
+ for i in range(4):
569
+ thisj = jlower + i
570
+ indj = thisj.clip(0, N - 1) # handle edge cases
571
+ result += cj[indj] * _cubic(newx - thisj)
572
+ res[cond3] = result
573
+ return res
574
+
575
+
576
+ def qspline1d_eval(cj, newx, dx=1.0, x0=0):
577
+ """Evaluate a quadratic spline at the new set of points.
578
+
579
+ Parameters
580
+ ----------
581
+ cj : ndarray
582
+ Quadratic spline coefficients
583
+ newx : ndarray
584
+ New set of points.
585
+ dx : float, optional
586
+ Old sample-spacing, the default value is 1.0.
587
+ x0 : int, optional
588
+ Old origin, the default value is 0.
589
+
590
+ Returns
591
+ -------
592
+ res : ndarray
593
+ Evaluated a quadratic spline points.
594
+
595
+ See Also
596
+ --------
597
+ qspline1d : Compute quadratic spline coefficients for rank-1 array.
598
+
599
+ Notes
600
+ -----
601
+ `dx` is the old sample-spacing while `x0` was the old origin. In
602
+ other-words the old-sample points (knot-points) for which the `cj`
603
+ represent spline coefficients were at equally-spaced points of::
604
+
605
+ oldx = x0 + j*dx j=0...N-1, with N=len(cj)
606
+
607
+ Edges are handled using mirror-symmetric boundary conditions.
608
+
609
+ Examples
610
+ --------
611
+ We can filter a signal to reduce and smooth out high-frequency noise with
612
+ a quadratic spline:
613
+
614
+ >>> import numpy as np
615
+ >>> import matplotlib.pyplot as plt
616
+ >>> from scipy.signal import qspline1d, qspline1d_eval
617
+ >>> rng = np.random.default_rng()
618
+ >>> sig = np.repeat([0., 1., 0.], 100)
619
+ >>> sig += rng.standard_normal(len(sig))*0.05 # add noise
620
+ >>> time = np.linspace(0, len(sig))
621
+ >>> filtered = qspline1d_eval(qspline1d(sig), time)
622
+ >>> plt.plot(sig, label="signal")
623
+ >>> plt.plot(time, filtered, label="filtered")
624
+ >>> plt.legend()
625
+ >>> plt.show()
626
+
627
+ """
628
+ newx = (asarray(newx) - x0) / dx
629
+ res = zeros_like(newx)
630
+ if res.size == 0:
631
+ return res
632
+ N = len(cj)
633
+ cond1 = newx < 0
634
+ cond2 = newx > (N - 1)
635
+ cond3 = ~(cond1 | cond2)
636
+ # handle general mirror-symmetry
637
+ res[cond1] = qspline1d_eval(cj, -newx[cond1])
638
+ res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
639
+ newx = newx[cond3]
640
+ if newx.size == 0:
641
+ return res
642
+ result = zeros_like(newx)
643
+ jlower = floor(newx - 1.5).astype(int) + 1
644
+ for i in range(3):
645
+ thisj = jlower + i
646
+ indj = thisj.clip(0, N - 1) # handle edge cases
647
+ result += cj[indj] * _quadratic(newx - thisj)
648
+ res[cond3] = result
649
+ return res
650
+
651
+
652
+ def symiirorder1(signal, c0, z1, precision=-1.0):
653
+ """
654
+ Implement a smoothing IIR filter with mirror-symmetric boundary conditions
655
+ using a cascade of first-order sections.
656
+
657
+ The second section uses a reversed sequence. This implements a system with
658
+ the following transfer function and mirror-symmetric boundary conditions::
659
+
660
+ c0
661
+ H(z) = ---------------------
662
+ (1-z1/z) (1 - z1 z)
663
+
664
+ The resulting signal will have mirror symmetric boundary conditions
665
+ as well.
666
+
667
+ Parameters
668
+ ----------
669
+ signal : ndarray
670
+ The input signal. If 2D, then the filter will be applied in a batched
671
+ fashion across the last axis.
672
+ c0, z1 : scalar
673
+ Parameters in the transfer function.
674
+ precision :
675
+ Specifies the precision for calculating initial conditions
676
+ of the recursive filter based on mirror-symmetric input.
677
+
678
+ Returns
679
+ -------
680
+ output : ndarray
681
+ The filtered signal.
682
+ """
683
+ if np.abs(z1) >= 1:
684
+ raise ValueError('|z1| must be less than 1.0')
685
+
686
+ if signal.ndim > 2:
687
+ raise ValueError('Input must be 1D or 2D')
688
+
689
+ squeeze_dim = False
690
+ if signal.ndim == 1:
691
+ signal = signal[None, :]
692
+ squeeze_dim = True
693
+
694
+ if np.issubdtype(signal.dtype, np.integer):
695
+ signal = signal.astype(np.promote_types(signal.dtype, np.float32))
696
+
697
+ y0 = symiirorder1_ic(signal, z1, precision)
698
+
699
+ # Apply first the system 1 / (1 - z1 * z^-1)
700
+ b = np.ones(1, dtype=signal.dtype)
701
+ a = np.r_[1, -z1]
702
+ a = a.astype(signal.dtype)
703
+
704
+ # Compute the initial state for lfilter.
705
+ zii = y0 * z1
706
+
707
+ y1, _ = lfilter(b, a, axis_slice(signal, 1), zi=zii)
708
+ y1 = np.c_[y0, y1]
709
+
710
+ # Compute backward symmetric condition and apply the system
711
+ # c0 / (1 - z1 * z)
712
+ b = np.asarray([c0], dtype=signal.dtype)
713
+ out_last = -c0 / (z1 - 1.0) * axis_slice(y1, -1)
714
+
715
+ # Compute the initial state for lfilter.
716
+ zii = out_last * z1
717
+
718
+ # Apply the system c0 / (1 - z1 * z) by reversing the output of the previous stage
719
+ out, _ = lfilter(b, a, axis_slice(y1, -2, step=-1), zi=zii)
720
+ out = np.c_[axis_reverse(out), out_last]
721
+
722
+ if squeeze_dim:
723
+ out = out[0]
724
+
725
+ return out
726
+
727
+
728
+ def symiirorder2(input, r, omega, precision=-1.0):
729
+ """
730
+ Implement a smoothing IIR filter with mirror-symmetric boundary conditions
731
+ using a cascade of second-order sections.
732
+
733
+ The second section uses a reversed sequence. This implements the following
734
+ transfer function::
735
+
736
+ cs^2
737
+ H(z) = ---------------------------------------
738
+ (1 - a2/z - a3/z^2) (1 - a2 z - a3 z^2 )
739
+
740
+ where::
741
+
742
+ a2 = 2 * r * cos(omega)
743
+ a3 = - r ** 2
744
+ cs = 1 - 2 * r * cos(omega) + r ** 2
745
+
746
+ Parameters
747
+ ----------
748
+ input : ndarray
749
+ The input signal.
750
+ r, omega : float
751
+ Parameters in the transfer function.
752
+ precision : float
753
+ Specifies the precision for calculating initial conditions
754
+ of the recursive filter based on mirror-symmetric input.
755
+
756
+ Returns
757
+ -------
758
+ output : ndarray
759
+ The filtered signal.
760
+ """
761
+ if r >= 1.0:
762
+ raise ValueError('r must be less than 1.0')
763
+
764
+ if input.ndim > 2:
765
+ raise ValueError('Input must be 1D or 2D')
766
+
767
+ if not input.flags.c_contiguous:
768
+ input = input.copy()
769
+
770
+ squeeze_dim = False
771
+ if input.ndim == 1:
772
+ input = input[None, :]
773
+ squeeze_dim = True
774
+
775
+ if np.issubdtype(input.dtype, np.integer):
776
+ input = input.astype(np.promote_types(input.dtype, np.float32))
777
+
778
+ rsq = r * r
779
+ a2 = 2 * r * np.cos(omega)
780
+ a3 = -rsq
781
+ cs = np.atleast_1d(1 - 2 * r * np.cos(omega) + rsq)
782
+ sos = np.atleast_2d(np.r_[cs, 0, 0, 1, -a2, -a3]).astype(input.dtype)
783
+
784
+ # Find the starting (forward) conditions.
785
+ ic_fwd = symiirorder2_ic_fwd(input, r, omega, precision)
786
+
787
+ # Apply first the system cs / (1 - a2 * z^-1 - a3 * z^-2)
788
+ # Compute the initial conditions in the form expected by sosfilt
789
+ # coef = np.asarray([[a3, a2], [0, a3]], dtype=input.dtype)
790
+ coef = np.r_[a3, a2, 0, a3].reshape(2, 2).astype(input.dtype)
791
+ zi = np.matmul(coef, ic_fwd[:, :, None])[:, :, 0]
792
+
793
+ y_fwd, _ = sosfilt(sos, axis_slice(input, 2), zi=zi[None])
794
+ y_fwd = np.c_[ic_fwd, y_fwd]
795
+
796
+ # Then compute the symmetric backward starting conditions
797
+ ic_bwd = symiirorder2_ic_bwd(input, r, omega, precision)
798
+
799
+ # Apply the system cs / (1 - a2 * z^1 - a3 * z^2)
800
+ # Compute the initial conditions in the form expected by sosfilt
801
+ zi = np.matmul(coef, ic_bwd[:, :, None])[:, :, 0]
802
+ y, _ = sosfilt(sos, axis_slice(y_fwd, -3, step=-1), zi=zi[None])
803
+ out = np.c_[axis_reverse(y), axis_reverse(ic_bwd)]
804
+
805
+ if squeeze_dim:
806
+ out = out[0]
807
+
808
+ return out
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_upfirdn.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code adapted from "upfirdn" python library with permission:
2
+ #
3
+ # Copyright (c) 2009, Motorola, Inc
4
+ #
5
+ # All Rights Reserved.
6
+ #
7
+ # Redistribution and use in source and binary forms, with or without
8
+ # modification, are permitted provided that the following conditions are
9
+ # met:
10
+ #
11
+ # * Redistributions of source code must retain the above copyright notice,
12
+ # this list of conditions and the following disclaimer.
13
+ #
14
+ # * Redistributions in binary form must reproduce the above copyright
15
+ # notice, this list of conditions and the following disclaimer in the
16
+ # documentation and/or other materials provided with the distribution.
17
+ #
18
+ # * Neither the name of Motorola nor the names of its contributors may be
19
+ # used to endorse or promote products derived from this software without
20
+ # specific prior written permission.
21
+ #
22
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23
+ # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24
+ # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25
+ # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26
+ # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27
+ # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28
+ # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29
+ # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30
+ # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
+
34
+ import numpy as np
35
+
36
+ from ._upfirdn_apply import _output_len, _apply, mode_enum
37
+
38
+ __all__ = ['upfirdn', '_output_len']
39
+
40
+ _upfirdn_modes = [
41
+ 'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect',
42
+ 'antisymmetric', 'antireflect', 'line',
43
+ ]
44
+
45
+
46
+ def _pad_h(h, up):
47
+ """Store coefficients in a transposed, flipped arrangement.
48
+
49
+ For example, suppose upRate is 3, and the
50
+ input number of coefficients is 10, represented as h[0], ..., h[9].
51
+
52
+ Then the internal buffer will look like this::
53
+
54
+ h[9], h[6], h[3], h[0], // flipped phase 0 coefs
55
+ 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded)
56
+ 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded)
57
+
58
+ """
59
+ h_padlen = len(h) + (-len(h) % up)
60
+ h_full = np.zeros(h_padlen, h.dtype)
61
+ h_full[:len(h)] = h
62
+ h_full = h_full.reshape(-1, up).T[:, ::-1].ravel()
63
+ return h_full
64
+
65
+
66
+ def _check_mode(mode):
67
+ mode = mode.lower()
68
+ enum = mode_enum(mode)
69
+ return enum
70
+
71
+
72
+ class _UpFIRDn:
73
+ """Helper for resampling."""
74
+
75
+ def __init__(self, h, x_dtype, up, down):
76
+ h = np.asarray(h)
77
+ if h.ndim != 1 or h.size == 0:
78
+ raise ValueError('h must be 1-D with non-zero length')
79
+ self._output_type = np.result_type(h.dtype, x_dtype, np.float32)
80
+ h = np.asarray(h, self._output_type)
81
+ self._up = int(up)
82
+ self._down = int(down)
83
+ if self._up < 1 or self._down < 1:
84
+ raise ValueError('Both up and down must be >= 1')
85
+ # This both transposes, and "flips" each phase for filtering
86
+ self._h_trans_flip = _pad_h(h, self._up)
87
+ self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip)
88
+ self._h_len_orig = len(h)
89
+
90
+ def apply_filter(self, x, axis=-1, mode='constant', cval=0):
91
+ """Apply the prepared filter to the specified axis of N-D signal x."""
92
+ output_len = _output_len(self._h_len_orig, x.shape[axis],
93
+ self._up, self._down)
94
+ # Explicit use of np.int64 for output_shape dtype avoids OverflowError
95
+ # when allocating large array on platforms where intp is 32 bits.
96
+ output_shape = np.asarray(x.shape, dtype=np.int64)
97
+ output_shape[axis] = output_len
98
+ out = np.zeros(output_shape, dtype=self._output_type, order='C')
99
+ axis = axis % x.ndim
100
+ mode = _check_mode(mode)
101
+ _apply(np.asarray(x, self._output_type),
102
+ self._h_trans_flip, out,
103
+ self._up, self._down, axis, mode, cval)
104
+ return out
105
+
106
+
107
+ def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0):
108
+ """Upsample, FIR filter, and downsample.
109
+
110
+ Parameters
111
+ ----------
112
+ h : array_like
113
+ 1-D FIR (finite-impulse response) filter coefficients.
114
+ x : array_like
115
+ Input signal array.
116
+ up : int, optional
117
+ Upsampling rate. Default is 1.
118
+ down : int, optional
119
+ Downsampling rate. Default is 1.
120
+ axis : int, optional
121
+ The axis of the input data array along which to apply the
122
+ linear filter. The filter is applied to each subarray along
123
+ this axis. Default is -1.
124
+ mode : str, optional
125
+ The signal extension mode to use. The set
126
+ ``{"constant", "symmetric", "reflect", "edge", "wrap"}`` correspond to
127
+ modes provided by `numpy.pad`. ``"smooth"`` implements a smooth
128
+ extension by extending based on the slope of the last 2 points at each
129
+ end of the array. ``"antireflect"`` and ``"antisymmetric"`` are
130
+ anti-symmetric versions of ``"reflect"`` and ``"symmetric"``. The mode
131
+ `"line"` extends the signal based on a linear trend defined by the
132
+ first and last points along the ``axis``.
133
+
134
+ .. versionadded:: 1.4.0
135
+ cval : float, optional
136
+ The constant value to use when ``mode == "constant"``.
137
+
138
+ .. versionadded:: 1.4.0
139
+
140
+ Returns
141
+ -------
142
+ y : ndarray
143
+ The output signal array. Dimensions will be the same as `x` except
144
+ for along `axis`, which will change size according to the `h`,
145
+ `up`, and `down` parameters.
146
+
147
+ Notes
148
+ -----
149
+ The algorithm is an implementation of the block diagram shown on page 129
150
+ of the Vaidyanathan text [1]_ (Figure 4.3-8d).
151
+
152
+ The direct approach of upsampling by factor of P with zero insertion,
153
+ FIR filtering of length ``N``, and downsampling by factor of Q is
154
+ O(N*Q) per output sample. The polyphase implementation used here is
155
+ O(N/P).
156
+
157
+ .. versionadded:: 0.18
158
+
159
+ References
160
+ ----------
161
+ .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks,
162
+ Prentice Hall, 1993.
163
+
164
+ Examples
165
+ --------
166
+ Simple operations:
167
+
168
+ >>> import numpy as np
169
+ >>> from scipy.signal import upfirdn
170
+ >>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter
171
+ array([ 1., 2., 3., 2., 1.])
172
+ >>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion
173
+ array([ 1., 0., 0., 2., 0., 0., 3.])
174
+ >>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold
175
+ array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.])
176
+ >>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation
177
+ array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5])
178
+ >>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3
179
+ array([ 0., 3., 6., 9.])
180
+ >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3
181
+ array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5])
182
+
183
+ Apply a single filter to multiple signals:
184
+
185
+ >>> x = np.reshape(np.arange(8), (4, 2))
186
+ >>> x
187
+ array([[0, 1],
188
+ [2, 3],
189
+ [4, 5],
190
+ [6, 7]])
191
+
192
+ Apply along the last dimension of ``x``:
193
+
194
+ >>> h = [1, 1]
195
+ >>> upfirdn(h, x, 2)
196
+ array([[ 0., 0., 1., 1.],
197
+ [ 2., 2., 3., 3.],
198
+ [ 4., 4., 5., 5.],
199
+ [ 6., 6., 7., 7.]])
200
+
201
+ Apply along the 0th dimension of ``x``:
202
+
203
+ >>> upfirdn(h, x, 2, axis=0)
204
+ array([[ 0., 1.],
205
+ [ 0., 1.],
206
+ [ 2., 3.],
207
+ [ 2., 3.],
208
+ [ 4., 5.],
209
+ [ 4., 5.],
210
+ [ 6., 7.],
211
+ [ 6., 7.]])
212
+ """
213
+ x = np.asarray(x)
214
+ ufd = _UpFIRDn(h, x.dtype, up, down)
215
+ # This is equivalent to (but faster than) using np.apply_along_axis
216
+ return ufd.apply_filter(x, axis, mode, cval)
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_waveforms.py ADDED
@@ -0,0 +1,696 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Travis Oliphant
2
+ # 2003
3
+ #
4
+ # Feb. 2010: Updated by Warren Weckesser:
5
+ # Rewrote much of chirp()
6
+ # Added sweep_poly()
7
+ import numpy as np
8
+ from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
9
+ exp, cos, sin, polyval, polyint
10
+
11
+
12
+ __all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly',
13
+ 'unit_impulse']
14
+
15
+
16
+ def sawtooth(t, width=1):
17
+ """
18
+ Return a periodic sawtooth or triangle waveform.
19
+
20
+ The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
21
+ interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
22
+ ``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
23
+
24
+ Note that this is not band-limited. It produces an infinite number
25
+ of harmonics, which are aliased back and forth across the frequency
26
+ spectrum.
27
+
28
+ Parameters
29
+ ----------
30
+ t : array_like
31
+ Time.
32
+ width : array_like, optional
33
+ Width of the rising ramp as a proportion of the total cycle.
34
+ Default is 1, producing a rising ramp, while 0 produces a falling
35
+ ramp. `width` = 0.5 produces a triangle wave.
36
+ If an array, causes wave shape to change over time, and must be the
37
+ same length as t.
38
+
39
+ Returns
40
+ -------
41
+ y : ndarray
42
+ Output array containing the sawtooth waveform.
43
+
44
+ Examples
45
+ --------
46
+ A 5 Hz waveform sampled at 500 Hz for 1 second:
47
+
48
+ >>> import numpy as np
49
+ >>> from scipy import signal
50
+ >>> import matplotlib.pyplot as plt
51
+ >>> t = np.linspace(0, 1, 500)
52
+ >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
53
+
54
+ """
55
+ t, w = asarray(t), asarray(width)
56
+ w = asarray(w + (t - t))
57
+ t = asarray(t + (w - w))
58
+ if t.dtype.char in ['fFdD']:
59
+ ytype = t.dtype.char
60
+ else:
61
+ ytype = 'd'
62
+ y = zeros(t.shape, ytype)
63
+
64
+ # width must be between 0 and 1 inclusive
65
+ mask1 = (w > 1) | (w < 0)
66
+ place(y, mask1, nan)
67
+
68
+ # take t modulo 2*pi
69
+ tmod = mod(t, 2 * pi)
70
+
71
+ # on the interval 0 to width*2*pi function is
72
+ # tmod / (pi*w) - 1
73
+ mask2 = (1 - mask1) & (tmod < w * 2 * pi)
74
+ tsub = extract(mask2, tmod)
75
+ wsub = extract(mask2, w)
76
+ place(y, mask2, tsub / (pi * wsub) - 1)
77
+
78
+ # on the interval width*2*pi to 2*pi function is
79
+ # (pi*(w+1)-tmod) / (pi*(1-w))
80
+
81
+ mask3 = (1 - mask1) & (1 - mask2)
82
+ tsub = extract(mask3, tmod)
83
+ wsub = extract(mask3, w)
84
+ place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
85
+ return y
86
+
87
+
88
+ def square(t, duty=0.5):
89
+ """
90
+ Return a periodic square-wave waveform.
91
+
92
+ The square wave has a period ``2*pi``, has value +1 from 0 to
93
+ ``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
94
+ the interval [0,1].
95
+
96
+ Note that this is not band-limited. It produces an infinite number
97
+ of harmonics, which are aliased back and forth across the frequency
98
+ spectrum.
99
+
100
+ Parameters
101
+ ----------
102
+ t : array_like
103
+ The input time array.
104
+ duty : array_like, optional
105
+ Duty cycle. Default is 0.5 (50% duty cycle).
106
+ If an array, causes wave shape to change over time, and must be the
107
+ same length as t.
108
+
109
+ Returns
110
+ -------
111
+ y : ndarray
112
+ Output array containing the square waveform.
113
+
114
+ Examples
115
+ --------
116
+ A 5 Hz waveform sampled at 500 Hz for 1 second:
117
+
118
+ >>> import numpy as np
119
+ >>> from scipy import signal
120
+ >>> import matplotlib.pyplot as plt
121
+ >>> t = np.linspace(0, 1, 500, endpoint=False)
122
+ >>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
123
+ >>> plt.ylim(-2, 2)
124
+
125
+ A pulse-width modulated sine wave:
126
+
127
+ >>> plt.figure()
128
+ >>> sig = np.sin(2 * np.pi * t)
129
+ >>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
130
+ >>> plt.subplot(2, 1, 1)
131
+ >>> plt.plot(t, sig)
132
+ >>> plt.subplot(2, 1, 2)
133
+ >>> plt.plot(t, pwm)
134
+ >>> plt.ylim(-1.5, 1.5)
135
+
136
+ """
137
+ t, w = asarray(t), asarray(duty)
138
+ w = asarray(w + (t - t))
139
+ t = asarray(t + (w - w))
140
+ if t.dtype.char in ['fFdD']:
141
+ ytype = t.dtype.char
142
+ else:
143
+ ytype = 'd'
144
+
145
+ y = zeros(t.shape, ytype)
146
+
147
+ # width must be between 0 and 1 inclusive
148
+ mask1 = (w > 1) | (w < 0)
149
+ place(y, mask1, nan)
150
+
151
+ # on the interval 0 to duty*2*pi function is 1
152
+ tmod = mod(t, 2 * pi)
153
+ mask2 = (1 - mask1) & (tmod < w * 2 * pi)
154
+ place(y, mask2, 1)
155
+
156
+ # on the interval duty*2*pi to 2*pi function is
157
+ # (pi*(w+1)-tmod) / (pi*(1-w))
158
+ mask3 = (1 - mask1) & (1 - mask2)
159
+ place(y, mask3, -1)
160
+ return y
161
+
162
+
163
+ def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
164
+ retenv=False):
165
+ """
166
+ Return a Gaussian modulated sinusoid:
167
+
168
+ ``exp(-a t^2) exp(1j*2*pi*fc*t).``
169
+
170
+ If `retquad` is True, then return the real and imaginary parts
171
+ (in-phase and quadrature).
172
+ If `retenv` is True, then return the envelope (unmodulated signal).
173
+ Otherwise, return the real part of the modulated sinusoid.
174
+
175
+ Parameters
176
+ ----------
177
+ t : ndarray or the string 'cutoff'
178
+ Input array.
179
+ fc : float, optional
180
+ Center frequency (e.g. Hz). Default is 1000.
181
+ bw : float, optional
182
+ Fractional bandwidth in frequency domain of pulse (e.g. Hz).
183
+ Default is 0.5.
184
+ bwr : float, optional
185
+ Reference level at which fractional bandwidth is calculated (dB).
186
+ Default is -6.
187
+ tpr : float, optional
188
+ If `t` is 'cutoff', then the function returns the cutoff
189
+ time for when the pulse amplitude falls below `tpr` (in dB).
190
+ Default is -60.
191
+ retquad : bool, optional
192
+ If True, return the quadrature (imaginary) as well as the real part
193
+ of the signal. Default is False.
194
+ retenv : bool, optional
195
+ If True, return the envelope of the signal. Default is False.
196
+
197
+ Returns
198
+ -------
199
+ yI : ndarray
200
+ Real part of signal. Always returned.
201
+ yQ : ndarray
202
+ Imaginary part of signal. Only returned if `retquad` is True.
203
+ yenv : ndarray
204
+ Envelope of signal. Only returned if `retenv` is True.
205
+
206
+ Examples
207
+ --------
208
+ Plot real component, imaginary component, and envelope for a 5 Hz pulse,
209
+ sampled at 100 Hz for 2 seconds:
210
+
211
+ >>> import numpy as np
212
+ >>> from scipy import signal
213
+ >>> import matplotlib.pyplot as plt
214
+ >>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
215
+ >>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
216
+ >>> plt.plot(t, i, t, q, t, e, '--')
217
+
218
+ """
219
+ if fc < 0:
220
+ raise ValueError(f"Center frequency (fc={fc:.2f}) must be >=0.")
221
+ if bw <= 0:
222
+ raise ValueError(f"Fractional bandwidth (bw={bw:.2f}) must be > 0.")
223
+ if bwr >= 0:
224
+ raise ValueError(f"Reference level for bandwidth (bwr={bwr:.2f}) "
225
+ "must be < 0 dB")
226
+
227
+ # exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
228
+
229
+ ref = pow(10.0, bwr / 20.0)
230
+ # fdel = fc*bw/2: g(fdel) = ref --- solve this for a
231
+ #
232
+ # pi^2/a * fc^2 * bw^2 /4=-log(ref)
233
+ a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
234
+
235
+ if isinstance(t, str):
236
+ if t == 'cutoff': # compute cut_off point
237
+ # Solve exp(-a tc**2) = tref for tc
238
+ # tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
239
+ if tpr >= 0:
240
+ raise ValueError("Reference level for time cutoff must "
241
+ "be < 0 dB")
242
+ tref = pow(10.0, tpr / 20.0)
243
+ return sqrt(-log(tref) / a)
244
+ else:
245
+ raise ValueError("If `t` is a string, it must be 'cutoff'")
246
+
247
+ yenv = exp(-a * t * t)
248
+ yI = yenv * cos(2 * pi * fc * t)
249
+ yQ = yenv * sin(2 * pi * fc * t)
250
+ if not retquad and not retenv:
251
+ return yI
252
+ if not retquad and retenv:
253
+ return yI, yenv
254
+ if retquad and not retenv:
255
+ return yI, yQ
256
+ if retquad and retenv:
257
+ return yI, yQ, yenv
258
+
259
+
260
+ def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True, *,
261
+ complex=False):
262
+ r"""Frequency-swept cosine generator.
263
+
264
+ In the following, 'Hz' should be interpreted as 'cycles per unit';
265
+ there is no requirement here that the unit is one second. The
266
+ important distinction is that the units of rotation are cycles, not
267
+ radians. Likewise, `t` could be a measurement of space instead of time.
268
+
269
+ Parameters
270
+ ----------
271
+ t : array_like
272
+ Times at which to evaluate the waveform.
273
+ f0 : float
274
+ Frequency (e.g. Hz) at time t=0.
275
+ t1 : float
276
+ Time at which `f1` is specified.
277
+ f1 : float
278
+ Frequency (e.g. Hz) of the waveform at time `t1`.
279
+ method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
280
+ Kind of frequency sweep. If not given, `linear` is assumed. See
281
+ Notes below for more details.
282
+ phi : float, optional
283
+ Phase offset, in degrees. Default is 0.
284
+ vertex_zero : bool, optional
285
+ This parameter is only used when `method` is 'quadratic'.
286
+ It determines whether the vertex of the parabola that is the graph
287
+ of the frequency is at t=0 or t=t1.
288
+ complex : bool, optional
289
+ This parameter creates a complex-valued analytic signal instead of a
290
+ real-valued signal. It allows the use of complex baseband (in communications
291
+ domain). Default is False.
292
+
293
+ .. versionadded:: 1.15.0
294
+
295
+ Returns
296
+ -------
297
+ y : ndarray
298
+ A numpy array containing the signal evaluated at `t` with the requested
299
+ time-varying frequency. More precisely, the function returns
300
+ ``exp(1j*phase + 1j*(pi/180)*phi) if complex else cos(phase + (pi/180)*phi)``
301
+ where `phase` is the integral (from 0 to `t`) of ``2*pi*f(t)``.
302
+ The instantaneous frequency ``f(t)`` is defined below.
303
+
304
+ See Also
305
+ --------
306
+ sweep_poly
307
+
308
+ Notes
309
+ -----
310
+ There are four possible options for the parameter `method`, which have a (long)
311
+ standard form and some allowed abbreviations. The formulas for the instantaneous
312
+ frequency :math:`f(t)` of the generated signal are as follows:
313
+
314
+ 1. Parameter `method` in ``('linear', 'lin', 'li')``:
315
+
316
+ .. math::
317
+ f(t) = f_0 + \beta\, t \quad\text{with}\quad
318
+ \beta = \frac{f_1 - f_0}{t_1}
319
+
320
+ Frequency :math:`f(t)` varies linearly over time with a constant rate
321
+ :math:`\beta`.
322
+
323
+ 2. Parameter `method` in ``('quadratic', 'quad', 'q')``:
324
+
325
+ .. math::
326
+ f(t) =
327
+ \begin{cases}
328
+ f_0 + \beta\, t^2 & \text{if vertex_zero is True,}\\
329
+ f_1 + \beta\, (t_1 - t)^2 & \text{otherwise,}
330
+ \end{cases}
331
+ \quad\text{with}\quad
332
+ \beta = \frac{f_1 - f_0}{t_1^2}
333
+
334
+ The graph of the frequency f(t) is a parabola through :math:`(0, f_0)` and
335
+ :math:`(t_1, f_1)`. By default, the vertex of the parabola is at
336
+ :math:`(0, f_0)`. If `vertex_zero` is ``False``, then the vertex is at
337
+ :math:`(t_1, f_1)`.
338
+ To use a more general quadratic function, or an arbitrary
339
+ polynomial, use the function `scipy.signal.sweep_poly`.
340
+
341
+ 3. Parameter `method` in ``('logarithmic', 'log', 'lo')``:
342
+
343
+ .. math::
344
+ f(t) = f_0 \left(\frac{f_1}{f_0}\right)^{t/t_1}
345
+
346
+ :math:`f_0` and :math:`f_1` must be nonzero and have the same sign.
347
+ This signal is also known as a geometric or exponential chirp.
348
+
349
+ 4. Parameter `method` in ``('hyperbolic', 'hyp')``:
350
+
351
+ .. math::
352
+ f(t) = \frac{\alpha}{\beta\, t + \gamma} \quad\text{with}\quad
353
+ \alpha = f_0 f_1 t_1, \ \beta = f_0 - f_1, \ \gamma = f_1 t_1
354
+
355
+ :math:`f_0` and :math:`f_1` must be nonzero.
356
+
357
+
358
+ Examples
359
+ --------
360
+ For the first example, a linear chirp ranging from 6 Hz to 1 Hz over 10 seconds is
361
+ plotted:
362
+
363
+ >>> import numpy as np
364
+ >>> from matplotlib.pyplot import tight_layout
365
+ >>> from scipy.signal import chirp, square, ShortTimeFFT
366
+ >>> from scipy.signal.windows import gaussian
367
+ >>> import matplotlib.pyplot as plt
368
+ ...
369
+ >>> N, T = 1000, 0.01 # number of samples and sampling interval for 10 s signal
370
+ >>> t = np.arange(N) * T # timestamps
371
+ ...
372
+ >>> x_lin = chirp(t, f0=6, f1=1, t1=10, method='linear')
373
+ ...
374
+ >>> fg0, ax0 = plt.subplots()
375
+ >>> ax0.set_title(r"Linear Chirp from $f(0)=6\,$Hz to $f(10)=1\,$Hz")
376
+ >>> ax0.set(xlabel="Time $t$ in Seconds", ylabel=r"Amplitude $x_\text{lin}(t)$")
377
+ >>> ax0.plot(t, x_lin)
378
+ >>> plt.show()
379
+
380
+ The following four plots each show the short-time Fourier transform of a chirp
381
+ ranging from 45 Hz to 5 Hz with different values for the parameter `method`
382
+ (and `vertex_zero`):
383
+
384
+ >>> x_qu0 = chirp(t, f0=45, f1=5, t1=N*T, method='quadratic', vertex_zero=True)
385
+ >>> x_qu1 = chirp(t, f0=45, f1=5, t1=N*T, method='quadratic', vertex_zero=False)
386
+ >>> x_log = chirp(t, f0=45, f1=5, t1=N*T, method='logarithmic')
387
+ >>> x_hyp = chirp(t, f0=45, f1=5, t1=N*T, method='hyperbolic')
388
+ ...
389
+ >>> win = gaussian(50, std=12, sym=True)
390
+ >>> SFT = ShortTimeFFT(win, hop=2, fs=1/T, mfft=800, scale_to='magnitude')
391
+ >>> ts = ("'quadratic', vertex_zero=True", "'quadratic', vertex_zero=False",
392
+ ... "'logarithmic'", "'hyperbolic'")
393
+ >>> fg1, ax1s = plt.subplots(2, 2, sharex='all', sharey='all',
394
+ ... figsize=(6, 5), layout="constrained")
395
+ >>> for x_, ax_, t_ in zip([x_qu0, x_qu1, x_log, x_hyp], ax1s.ravel(), ts):
396
+ ... aSx = abs(SFT.stft(x_))
397
+ ... im_ = ax_.imshow(aSx, origin='lower', aspect='auto', extent=SFT.extent(N),
398
+ ... cmap='plasma')
399
+ ... ax_.set_title(t_)
400
+ ... if t_ == "'hyperbolic'":
401
+ ... fg1.colorbar(im_, ax=ax1s, label='Magnitude $|S_z(t,f)|$')
402
+ >>> _ = fg1.supxlabel("Time $t$ in Seconds") # `_ =` is needed to pass doctests
403
+ >>> _ = fg1.supylabel("Frequency $f$ in Hertz")
404
+ >>> plt.show()
405
+
406
+ Finally, the short-time Fourier transform of a complex-valued linear chirp
407
+ ranging from -30 Hz to 30 Hz is depicted:
408
+
409
+ >>> z_lin = chirp(t, f0=-30, f1=30, t1=N*T, method="linear", complex=True)
410
+ >>> SFT.fft_mode = 'centered' # needed to work with complex signals
411
+ >>> aSz = abs(SFT.stft(z_lin))
412
+ ...
413
+ >>> fg2, ax2 = plt.subplots()
414
+ >>> ax2.set_title(r"Linear Chirp from $-30\,$Hz to $30\,$Hz")
415
+ >>> ax2.set(xlabel="Time $t$ in Seconds", ylabel="Frequency $f$ in Hertz")
416
+ >>> im2 = ax2.imshow(aSz, origin='lower', aspect='auto',
417
+ ... extent=SFT.extent(N), cmap='viridis')
418
+ >>> fg2.colorbar(im2, label='Magnitude $|S_z(t,f)|$')
419
+ >>> plt.show()
420
+
421
+ Note that using negative frequencies makes only sense with complex-valued signals.
422
+ Furthermore, the magnitude of the complex exponential function is one whereas the
423
+ magnitude of the real-valued cosine function is only 1/2.
424
+ """
425
+ # 'phase' is computed in _chirp_phase, to make testing easier.
426
+ phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero) + np.deg2rad(phi)
427
+ return np.exp(1j*phase) if complex else np.cos(phase)
428
+
429
+
430
+ def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
431
+ """
432
+ Calculate the phase used by `chirp` to generate its output.
433
+
434
+ See `chirp` for a description of the arguments.
435
+
436
+ """
437
+ t = asarray(t)
438
+ f0 = float(f0)
439
+ t1 = float(t1)
440
+ f1 = float(f1)
441
+ if method in ['linear', 'lin', 'li']:
442
+ beta = (f1 - f0) / t1
443
+ phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
444
+
445
+ elif method in ['quadratic', 'quad', 'q']:
446
+ beta = (f1 - f0) / (t1 ** 2)
447
+ if vertex_zero:
448
+ phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
449
+ else:
450
+ phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
451
+
452
+ elif method in ['logarithmic', 'log', 'lo']:
453
+ if f0 * f1 <= 0.0:
454
+ raise ValueError("For a logarithmic chirp, f0 and f1 must be "
455
+ "nonzero and have the same sign.")
456
+ if f0 == f1:
457
+ phase = 2 * pi * f0 * t
458
+ else:
459
+ beta = t1 / log(f1 / f0)
460
+ phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
461
+
462
+ elif method in ['hyperbolic', 'hyp']:
463
+ if f0 == 0 or f1 == 0:
464
+ raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
465
+ "nonzero.")
466
+ if f0 == f1:
467
+ # Degenerate case: constant frequency.
468
+ phase = 2 * pi * f0 * t
469
+ else:
470
+ # Singular point: the instantaneous frequency blows up
471
+ # when t == sing.
472
+ sing = -f1 * t1 / (f0 - f1)
473
+ phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
474
+
475
+ else:
476
+ raise ValueError("method must be 'linear', 'quadratic', 'logarithmic', "
477
+ f"or 'hyperbolic', but a value of {method!r} was given.")
478
+
479
+ return phase
480
+
481
+
482
+ def sweep_poly(t, poly, phi=0):
483
+ """
484
+ Frequency-swept cosine generator, with a time-dependent frequency.
485
+
486
+ This function generates a sinusoidal function whose instantaneous
487
+ frequency varies with time. The frequency at time `t` is given by
488
+ the polynomial `poly`.
489
+
490
+ Parameters
491
+ ----------
492
+ t : ndarray
493
+ Times at which to evaluate the waveform.
494
+ poly : 1-D array_like or instance of numpy.poly1d
495
+ The desired frequency expressed as a polynomial. If `poly` is
496
+ a list or ndarray of length n, then the elements of `poly` are
497
+ the coefficients of the polynomial, and the instantaneous
498
+ frequency is
499
+
500
+ ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
501
+
502
+ If `poly` is an instance of numpy.poly1d, then the
503
+ instantaneous frequency is
504
+
505
+ ``f(t) = poly(t)``
506
+
507
+ phi : float, optional
508
+ Phase offset, in degrees, Default: 0.
509
+
510
+ Returns
511
+ -------
512
+ sweep_poly : ndarray
513
+ A numpy array containing the signal evaluated at `t` with the
514
+ requested time-varying frequency. More precisely, the function
515
+ returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
516
+ (from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
517
+
518
+ See Also
519
+ --------
520
+ chirp
521
+
522
+ Notes
523
+ -----
524
+ .. versionadded:: 0.8.0
525
+
526
+ If `poly` is a list or ndarray of length `n`, then the elements of
527
+ `poly` are the coefficients of the polynomial, and the instantaneous
528
+ frequency is:
529
+
530
+ ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
531
+
532
+ If `poly` is an instance of `numpy.poly1d`, then the instantaneous
533
+ frequency is:
534
+
535
+ ``f(t) = poly(t)``
536
+
537
+ Finally, the output `s` is:
538
+
539
+ ``cos(phase + (pi/180)*phi)``
540
+
541
+ where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
542
+ ``f(t)`` as defined above.
543
+
544
+ Examples
545
+ --------
546
+ Compute the waveform with instantaneous frequency::
547
+
548
+ f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2
549
+
550
+ over the interval 0 <= t <= 10.
551
+
552
+ >>> import numpy as np
553
+ >>> from scipy.signal import sweep_poly
554
+ >>> p = np.poly1d([0.025, -0.36, 1.25, 2.0])
555
+ >>> t = np.linspace(0, 10, 5001)
556
+ >>> w = sweep_poly(t, p)
557
+
558
+ Plot it:
559
+
560
+ >>> import matplotlib.pyplot as plt
561
+ >>> plt.subplot(2, 1, 1)
562
+ >>> plt.plot(t, w)
563
+ >>> plt.title("Sweep Poly\\nwith frequency " +
564
+ ... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$")
565
+ >>> plt.subplot(2, 1, 2)
566
+ >>> plt.plot(t, p(t), 'r', label='f(t)')
567
+ >>> plt.legend()
568
+ >>> plt.xlabel('t')
569
+ >>> plt.tight_layout()
570
+ >>> plt.show()
571
+
572
+ """
573
+ # 'phase' is computed in _sweep_poly_phase, to make testing easier.
574
+ phase = _sweep_poly_phase(t, poly)
575
+ # Convert to radians.
576
+ phi *= pi / 180
577
+ return cos(phase + phi)
578
+
579
+
580
+ def _sweep_poly_phase(t, poly):
581
+ """
582
+ Calculate the phase used by sweep_poly to generate its output.
583
+
584
+ See `sweep_poly` for a description of the arguments.
585
+
586
+ """
587
+ # polyint handles lists, ndarrays and instances of poly1d automatically.
588
+ intpoly = polyint(poly)
589
+ phase = 2 * pi * polyval(intpoly, t)
590
+ return phase
591
+
592
+
593
+ def unit_impulse(shape, idx=None, dtype=float):
594
+ r"""
595
+ Unit impulse signal (discrete delta function) or unit basis vector.
596
+
597
+ Parameters
598
+ ----------
599
+ shape : int or tuple of int
600
+ Number of samples in the output (1-D), or a tuple that represents the
601
+ shape of the output (N-D).
602
+ idx : None or int or tuple of int or 'mid', optional
603
+ Index at which the value is 1. If None, defaults to the 0th element.
604
+ If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in
605
+ all dimensions. If an int, the impulse will be at `idx` in all
606
+ dimensions.
607
+ dtype : data-type, optional
608
+ The desired data-type for the array, e.g., ``numpy.int8``. Default is
609
+ ``numpy.float64``.
610
+
611
+ Returns
612
+ -------
613
+ y : ndarray
614
+ Output array containing an impulse signal.
615
+
616
+ Notes
617
+ -----
618
+ In digital signal processing literature the unit impulse signal is often
619
+ represented by the Kronecker delta. [1]_ I.e., a signal :math:`u_k[n]`,
620
+ which is zero everywhere except being one at the :math:`k`-th sample,
621
+ can be expressed as
622
+
623
+ .. math::
624
+
625
+ u_k[n] = \delta[n-k] \equiv \delta_{n,k}\ .
626
+
627
+ Furthermore, the unit impulse is frequently interpreted as the discrete-time
628
+ version of the continuous-time Dirac distribution. [2]_
629
+
630
+ References
631
+ ----------
632
+ .. [1] "Kronecker delta", *Wikipedia*,
633
+ https://en.wikipedia.org/wiki/Kronecker_delta#Digital_signal_processing
634
+ .. [2] "Dirac delta function" *Wikipedia*,
635
+ https://en.wikipedia.org/wiki/Dirac_delta_function#Relationship_to_the_Kronecker_delta
636
+
637
+ .. versionadded:: 0.19.0
638
+
639
+ Examples
640
+ --------
641
+ An impulse at the 0th element (:math:`\\delta[n]`):
642
+
643
+ >>> from scipy import signal
644
+ >>> signal.unit_impulse(8)
645
+ array([ 1., 0., 0., 0., 0., 0., 0., 0.])
646
+
647
+ Impulse offset by 2 samples (:math:`\\delta[n-2]`):
648
+
649
+ >>> signal.unit_impulse(7, 2)
650
+ array([ 0., 0., 1., 0., 0., 0., 0.])
651
+
652
+ 2-dimensional impulse, centered:
653
+
654
+ >>> signal.unit_impulse((3, 3), 'mid')
655
+ array([[ 0., 0., 0.],
656
+ [ 0., 1., 0.],
657
+ [ 0., 0., 0.]])
658
+
659
+ Impulse at (2, 2), using broadcasting:
660
+
661
+ >>> signal.unit_impulse((4, 4), 2)
662
+ array([[ 0., 0., 0., 0.],
663
+ [ 0., 0., 0., 0.],
664
+ [ 0., 0., 1., 0.],
665
+ [ 0., 0., 0., 0.]])
666
+
667
+ Plot the impulse response of a 4th-order Butterworth lowpass filter:
668
+
669
+ >>> imp = signal.unit_impulse(100, 'mid')
670
+ >>> b, a = signal.butter(4, 0.2)
671
+ >>> response = signal.lfilter(b, a, imp)
672
+
673
+ >>> import numpy as np
674
+ >>> import matplotlib.pyplot as plt
675
+ >>> plt.plot(np.arange(-50, 50), imp)
676
+ >>> plt.plot(np.arange(-50, 50), response)
677
+ >>> plt.margins(0.1, 0.1)
678
+ >>> plt.xlabel('Time [samples]')
679
+ >>> plt.ylabel('Amplitude')
680
+ >>> plt.grid(True)
681
+ >>> plt.show()
682
+
683
+ """
684
+ out = zeros(shape, dtype)
685
+
686
+ shape = np.atleast_1d(shape)
687
+
688
+ if idx is None:
689
+ idx = (0,) * len(shape)
690
+ elif idx == 'mid':
691
+ idx = tuple(shape // 2)
692
+ elif not hasattr(idx, "__iter__"):
693
+ idx = (idx,) * len(shape)
694
+
695
+ out[idx] = 1
696
+ return out
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_wavelets.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.signal import convolve
3
+
4
+
5
+ def _ricker(points, a):
6
+ A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
7
+ wsq = a**2
8
+ vec = np.arange(0, points) - (points - 1.0) / 2
9
+ xsq = vec**2
10
+ mod = (1 - xsq / wsq)
11
+ gauss = np.exp(-xsq / (2 * wsq))
12
+ total = A * mod * gauss
13
+ return total
14
+
15
+
16
+ def _cwt(data, wavelet, widths, dtype=None, **kwargs):
17
+ # Determine output type
18
+ if dtype is None:
19
+ if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG':
20
+ dtype = np.complex128
21
+ else:
22
+ dtype = np.float64
23
+
24
+ output = np.empty((len(widths), len(data)), dtype=dtype)
25
+ for ind, width in enumerate(widths):
26
+ N = np.min([10 * width, len(data)])
27
+ wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1])
28
+ output[ind] = convolve(data, wavelet_data, mode='same')
29
+ return output
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/bsplines.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.signal` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'spline_filter', 'gauss_spline',
9
+ 'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval',
10
+ 'cspline2d', 'sepfir2d'
11
+ ]
12
+
13
+
14
+ def __dir__():
15
+ return __all__
16
+
17
+
18
+ def __getattr__(name):
19
+ return _sub_module_deprecation(sub_package="signal", module="bsplines",
20
+ private_modules=["_spline_filters"], all=__all__,
21
+ attribute=name)
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/filter_design.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.signal` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
9
+ 'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
10
+ 'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
11
+ 'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
12
+ 'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
13
+ 'BadCoefficients', 'freqs_zpk', 'freqz_zpk',
14
+ 'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
15
+ 'sosfreqz', 'freqz_sos', 'iirnotch', 'iirpeak', 'bilinear_zpk',
16
+ 'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk',
17
+ 'gammatone', 'iircomb',
18
+ ]
19
+
20
+
21
+ def __dir__():
22
+ return __all__
23
+
24
+
25
+ def __getattr__(name):
26
+ return _sub_module_deprecation(sub_package="signal", module="filter_design",
27
+ private_modules=["_filter_design"], all=__all__,
28
+ attribute=name)
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/fir_filter_design.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.signal` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'kaiser_beta', 'kaiser_atten', 'kaiserord',
9
+ 'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase',
10
+ ]
11
+
12
+
13
+ def __dir__():
14
+ return __all__
15
+
16
+
17
+ def __getattr__(name):
18
+ return _sub_module_deprecation(sub_package="signal", module="fir_filter_design",
19
+ private_modules=["_fir_filter_design"], all=__all__,
20
+ attribute=name)
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/lti_conversion.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.signal` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk',
9
+ 'cont2discrete', 'tf2zpk', 'zpk2tf', 'normalize'
10
+ ]
11
+
12
+
13
+ def __dir__():
14
+ return __all__
15
+
16
+
17
+ def __getattr__(name):
18
+ return _sub_module_deprecation(sub_package="signal", module="lti_conversion",
19
+ private_modules=["_lti_conversion"], all=__all__,
20
+ attribute=name)
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/ltisys.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.signal` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace',
9
+ 'lsim', 'impulse', 'step', 'bode',
10
+ 'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse',
11
+ 'dfreqresp', 'dbode',
12
+ 'tf2zpk', 'zpk2tf', 'normalize', 'freqs',
13
+ 'freqz', 'freqs_zpk', 'freqz_zpk', 'tf2ss', 'abcd_normalize',
14
+ 'ss2tf', 'zpk2ss', 'ss2zpk', 'cont2discrete',
15
+ ]
16
+
17
+
18
+ def __dir__():
19
+ return __all__
20
+
21
+
22
+ def __getattr__(name):
23
+ return _sub_module_deprecation(sub_package="signal", module="ltisys",
24
+ private_modules=["_ltisys"], all=__all__,
25
+ attribute=name)
infer_4_30_0/lib/python3.10/site-packages/scipy/signal/signaltools.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.signal` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'correlate', 'correlation_lags', 'correlate2d',
9
+ 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve',
10
+ 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
11
+ 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
12
+ 'unique_roots', 'invres', 'invresz', 'residue',
13
+ 'residuez', 'resample', 'resample_poly', 'detrend',
14
+ 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
15
+ 'filtfilt', 'decimate', 'vectorstrength',
16
+ 'dlti', 'upfirdn', 'get_window', 'cheby1', 'firwin'
17
+ ]
18
+
19
+
20
+ def __dir__():
21
+ return __all__
22
+
23
+
24
+ def __getattr__(name):
25
+ return _sub_module_deprecation(sub_package="signal", module="signaltools",
26
+ private_modules=["_signaltools"], all=__all__,
27
+ attribute=name)