reach-vb HF staff commited on
Commit
d2e7957
1 Parent(s): f14e74e

8274b78f238537f55a2fdac5624687534650241ed111eeb7b3bab8b2371bbf9b

Browse files
Files changed (50) hide show
  1. lib/python3.11/site-packages/mpmath/__pycache__/identification.cpython-311.pyc +0 -0
  2. lib/python3.11/site-packages/mpmath/__pycache__/math2.cpython-311.pyc +0 -0
  3. lib/python3.11/site-packages/mpmath/__pycache__/rational.cpython-311.pyc +0 -0
  4. lib/python3.11/site-packages/mpmath/__pycache__/usertools.cpython-311.pyc +0 -0
  5. lib/python3.11/site-packages/mpmath/__pycache__/visualization.cpython-311.pyc +0 -0
  6. lib/python3.11/site-packages/mpmath/calculus/__init__.py +6 -0
  7. lib/python3.11/site-packages/mpmath/calculus/__pycache__/__init__.cpython-311.pyc +0 -0
  8. lib/python3.11/site-packages/mpmath/calculus/__pycache__/approximation.cpython-311.pyc +0 -0
  9. lib/python3.11/site-packages/mpmath/calculus/__pycache__/calculus.cpython-311.pyc +0 -0
  10. lib/python3.11/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-311.pyc +0 -0
  11. lib/python3.11/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-311.pyc +0 -0
  12. lib/python3.11/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-311.pyc +0 -0
  13. lib/python3.11/site-packages/mpmath/calculus/__pycache__/odes.cpython-311.pyc +0 -0
  14. lib/python3.11/site-packages/mpmath/calculus/__pycache__/optimization.cpython-311.pyc +0 -0
  15. lib/python3.11/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-311.pyc +0 -0
  16. lib/python3.11/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-311.pyc +0 -0
  17. lib/python3.11/site-packages/mpmath/calculus/approximation.py +246 -0
  18. lib/python3.11/site-packages/mpmath/calculus/calculus.py +6 -0
  19. lib/python3.11/site-packages/mpmath/calculus/differentiation.py +647 -0
  20. lib/python3.11/site-packages/mpmath/calculus/extrapolation.py +2115 -0
  21. lib/python3.11/site-packages/mpmath/calculus/inverselaplace.py +973 -0
  22. lib/python3.11/site-packages/mpmath/calculus/odes.py +288 -0
  23. lib/python3.11/site-packages/mpmath/calculus/optimization.py +1102 -0
  24. lib/python3.11/site-packages/mpmath/calculus/polynomials.py +213 -0
  25. lib/python3.11/site-packages/mpmath/calculus/quadrature.py +1115 -0
  26. lib/python3.11/site-packages/mpmath/ctx_base.py +494 -0
  27. lib/python3.11/site-packages/mpmath/ctx_fp.py +253 -0
  28. lib/python3.11/site-packages/mpmath/ctx_iv.py +551 -0
  29. lib/python3.11/site-packages/mpmath/ctx_mp.py +1339 -0
  30. lib/python3.11/site-packages/mpmath/ctx_mp_python.py +1149 -0
  31. lib/python3.11/site-packages/mpmath/function_docs.py +0 -0
  32. lib/python3.11/site-packages/mpmath/functions/__init__.py +14 -0
  33. lib/python3.11/site-packages/mpmath/functions/__pycache__/__init__.cpython-311.pyc +0 -0
  34. lib/python3.11/site-packages/mpmath/functions/__pycache__/bessel.cpython-311.pyc +0 -0
  35. lib/python3.11/site-packages/mpmath/functions/__pycache__/elliptic.cpython-311.pyc +0 -0
  36. lib/python3.11/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-311.pyc +0 -0
  37. lib/python3.11/site-packages/mpmath/functions/__pycache__/factorials.cpython-311.pyc +0 -0
  38. lib/python3.11/site-packages/mpmath/functions/__pycache__/functions.cpython-311.pyc +0 -0
  39. lib/python3.11/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-311.pyc +0 -0
  40. lib/python3.11/site-packages/mpmath/functions/__pycache__/orthogonal.cpython-311.pyc +0 -0
  41. lib/python3.11/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-311.pyc +0 -0
  42. lib/python3.11/site-packages/mpmath/functions/__pycache__/rszeta.cpython-311.pyc +0 -0
  43. lib/python3.11/site-packages/mpmath/functions/__pycache__/signals.cpython-311.pyc +0 -0
  44. lib/python3.11/site-packages/mpmath/functions/__pycache__/theta.cpython-311.pyc +0 -0
  45. lib/python3.11/site-packages/mpmath/functions/__pycache__/zeta.cpython-311.pyc +0 -0
  46. lib/python3.11/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-311.pyc +0 -0
  47. lib/python3.11/site-packages/mpmath/functions/bessel.py +1108 -0
  48. lib/python3.11/site-packages/mpmath/functions/elliptic.py +1431 -0
  49. lib/python3.11/site-packages/mpmath/functions/expintegrals.py +425 -0
  50. lib/python3.11/site-packages/mpmath/functions/factorials.py +187 -0
lib/python3.11/site-packages/mpmath/__pycache__/identification.cpython-311.pyc ADDED
Binary file (41.3 kB). View file
 
lib/python3.11/site-packages/mpmath/__pycache__/math2.cpython-311.pyc ADDED
Binary file (28.5 kB). View file
 
lib/python3.11/site-packages/mpmath/__pycache__/rational.cpython-311.pyc ADDED
Binary file (10.4 kB). View file
 
lib/python3.11/site-packages/mpmath/__pycache__/usertools.cpython-311.pyc ADDED
Binary file (4.96 kB). View file
 
lib/python3.11/site-packages/mpmath/__pycache__/visualization.cpython-311.pyc ADDED
Binary file (17.2 kB). View file
 
lib/python3.11/site-packages/mpmath/calculus/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from . import calculus
2
+ # XXX: hack to set methods
3
+ from . import approximation
4
+ from . import differentiation
5
+ from . import extrapolation
6
+ from . import polynomials
lib/python3.11/site-packages/mpmath/calculus/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (485 Bytes). View file
 
lib/python3.11/site-packages/mpmath/calculus/__pycache__/approximation.cpython-311.pyc ADDED
Binary file (12.8 kB). View file
 
lib/python3.11/site-packages/mpmath/calculus/__pycache__/calculus.cpython-311.pyc ADDED
Binary file (672 Bytes). View file
 
lib/python3.11/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-311.pyc ADDED
Binary file (28.4 kB). View file
 
lib/python3.11/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-311.pyc ADDED
Binary file (89.6 kB). View file
 
lib/python3.11/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-311.pyc ADDED
Binary file (41.7 kB). View file
 
lib/python3.11/site-packages/mpmath/calculus/__pycache__/odes.cpython-311.pyc ADDED
Binary file (13.3 kB). View file
 
lib/python3.11/site-packages/mpmath/calculus/__pycache__/optimization.cpython-311.pyc ADDED
Binary file (42.8 kB). View file
 
lib/python3.11/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-311.pyc ADDED
Binary file (10.9 kB). View file
 
lib/python3.11/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-311.pyc ADDED
Binary file (50.9 kB). View file
 
lib/python3.11/site-packages/mpmath/calculus/approximation.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+ from .calculus import defun
3
+
4
+ #----------------------------------------------------------------------------#
5
+ # Approximation methods #
6
+ #----------------------------------------------------------------------------#
7
+
8
+ # The Chebyshev approximation formula is given at:
9
+ # http://mathworld.wolfram.com/ChebyshevApproximationFormula.html
10
+
11
+ # The only major changes in the following code is that we return the
12
+ # expanded polynomial coefficients instead of Chebyshev coefficients,
13
+ # and that we automatically transform [a,b] -> [-1,1] and back
14
+ # for convenience.
15
+
16
+ # Coefficient in Chebyshev approximation
17
+ def chebcoeff(ctx,f,a,b,j,N):
18
+ s = ctx.mpf(0)
19
+ h = ctx.mpf(0.5)
20
+ for k in range(1, N+1):
21
+ t = ctx.cospi((k-h)/N)
22
+ s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N)
23
+ return 2*s/N
24
+
25
+ # Generate Chebyshev polynomials T_n(ax+b) in expanded form
26
+ def chebT(ctx, a=1, b=0):
27
+ Tb = [1]
28
+ yield Tb
29
+ Ta = [b, a]
30
+ while 1:
31
+ yield Ta
32
+ # Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b)
33
+ Tmp = [0] + [2*a*t for t in Ta]
34
+ for i, c in enumerate(Ta): Tmp[i] += 2*b*c
35
+ for i, c in enumerate(Tb): Tmp[i] -= c
36
+ Ta, Tb = Tmp, Ta
37
+
38
+ @defun
39
+ def chebyfit(ctx, f, interval, N, error=False):
40
+ r"""
41
+ Computes a polynomial of degree `N-1` that approximates the
42
+ given function `f` on the interval `[a, b]`. With ``error=True``,
43
+ :func:`~mpmath.chebyfit` also returns an accurate estimate of the
44
+ maximum absolute error; that is, the maximum value of
45
+ `|f(x) - P(x)|` for `x \in [a, b]`.
46
+
47
+ :func:`~mpmath.chebyfit` uses the Chebyshev approximation formula,
48
+ which gives a nearly optimal solution: that is, the maximum
49
+ error of the approximating polynomial is very close to
50
+ the smallest possible for any polynomial of the same degree.
51
+
52
+ Chebyshev approximation is very useful if one needs repeated
53
+ evaluation of an expensive function, such as function defined
54
+ implicitly by an integral or a differential equation. (For
55
+ example, it could be used to turn a slow mpmath function
56
+ into a fast machine-precision version of the same.)
57
+
58
+ **Examples**
59
+
60
+ Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation
61
+ of `f(x) = \cos(x)`, valid on the interval `[1, 2]`::
62
+
63
+ >>> from mpmath import *
64
+ >>> mp.dps = 15; mp.pretty = True
65
+ >>> poly, err = chebyfit(cos, [1, 2], 5, error=True)
66
+ >>> nprint(poly)
67
+ [0.00291682, 0.146166, -0.732491, 0.174141, 0.949553]
68
+ >>> nprint(err, 12)
69
+ 1.61351758081e-5
70
+
71
+ The polynomial can be evaluated using ``polyval``::
72
+
73
+ >>> nprint(polyval(poly, 1.6), 12)
74
+ -0.0291858904138
75
+ >>> nprint(cos(1.6), 12)
76
+ -0.0291995223013
77
+
78
+ Sampling the true error at 1000 points shows that the error
79
+ estimate generated by ``chebyfit`` is remarkably good::
80
+
81
+ >>> error = lambda x: abs(cos(x) - polyval(poly, x))
82
+ >>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12)
83
+ 1.61349954245e-5
84
+
85
+ **Choice of degree**
86
+
87
+ The degree `N` can be set arbitrarily high, to obtain an
88
+ arbitrarily good approximation. As a rule of thumb, an
89
+ `N`-term Chebyshev approximation is good to `N/(b-a)` decimal
90
+ places on a unit interval (although this depends on how
91
+ well-behaved `f` is). The cost grows accordingly: ``chebyfit``
92
+ evaluates the function `(N^2)/2` times to compute the
93
+ coefficients and an additional `N` times to estimate the error.
94
+
95
+ **Possible issues**
96
+
97
+ One should be careful to use a sufficiently high working
98
+ precision both when calling ``chebyfit`` and when evaluating
99
+ the resulting polynomial, as the polynomial is sometimes
100
+ ill-conditioned. It is for example difficult to reach
101
+ 15-digit accuracy when evaluating the polynomial using
102
+ machine precision floats, no matter the theoretical
103
+ accuracy of the polynomial. (The option to return the
104
+ coefficients in Chebyshev form should be made available
105
+ in the future.)
106
+
107
+ It is important to note the Chebyshev approximation works
108
+ poorly if `f` is not smooth. A function containing singularities,
109
+ rapid oscillation, etc can be approximated more effectively by
110
+ multiplying it by a weight function that cancels out the
111
+ nonsmooth features, or by dividing the interval into several
112
+ segments.
113
+ """
114
+ a, b = ctx._as_points(interval)
115
+ orig = ctx.prec
116
+ try:
117
+ ctx.prec = orig + int(N**0.5) + 20
118
+ c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)]
119
+ d = [ctx.zero] * N
120
+ d[0] = -c[0]/2
121
+ h = ctx.mpf(0.5)
122
+ T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a))
123
+ for (k, Tk) in zip(range(N), T):
124
+ for i in range(len(Tk)):
125
+ d[i] += c[k]*Tk[i]
126
+ d = d[::-1]
127
+ # Estimate maximum error
128
+ err = ctx.zero
129
+ for k in range(N):
130
+ x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h
131
+ err = max(err, abs(f(x) - ctx.polyval(d, x)))
132
+ finally:
133
+ ctx.prec = orig
134
+ if error:
135
+ return d, +err
136
+ else:
137
+ return d
138
+
139
+ @defun
140
+ def fourier(ctx, f, interval, N):
141
+ r"""
142
+ Computes the Fourier series of degree `N` of the given function
143
+ on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns
144
+ two lists `(c, s)` of coefficients (the cosine series and sine
145
+ series, respectively), such that
146
+
147
+ .. math ::
148
+
149
+ f(x) \sim \sum_{k=0}^N
150
+ c_k \cos(k m x) + s_k \sin(k m x)
151
+
152
+ where `m = 2 \pi / (b-a)`.
153
+
154
+ Note that many texts define the first coefficient as `2 c_0` instead
155
+ of `c_0`. The easiest way to evaluate the computed series correctly
156
+ is to pass it to :func:`~mpmath.fourierval`.
157
+
158
+ **Examples**
159
+
160
+ The function `f(x) = x` has a simple Fourier series on the standard
161
+ interval `[-\pi, \pi]`. The cosine coefficients are all zero (because
162
+ the function has odd symmetry), and the sine coefficients are
163
+ rational numbers::
164
+
165
+ >>> from mpmath import *
166
+ >>> mp.dps = 15; mp.pretty = True
167
+ >>> c, s = fourier(lambda x: x, [-pi, pi], 5)
168
+ >>> nprint(c)
169
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
170
+ >>> nprint(s)
171
+ [0.0, 2.0, -1.0, 0.666667, -0.5, 0.4]
172
+
173
+ This computes a Fourier series of a nonsymmetric function on
174
+ a nonstandard interval::
175
+
176
+ >>> I = [-1, 1.5]
177
+ >>> f = lambda x: x**2 - 4*x + 1
178
+ >>> cs = fourier(f, I, 4)
179
+ >>> nprint(cs[0])
180
+ [0.583333, 1.12479, -1.27552, 0.904708, -0.441296]
181
+ >>> nprint(cs[1])
182
+ [0.0, -2.6255, 0.580905, 0.219974, -0.540057]
183
+
184
+ It is instructive to plot a function along with its truncated
185
+ Fourier series::
186
+
187
+ >>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP
188
+
189
+ Fourier series generally converge slowly (and may not converge
190
+ pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier
191
+ series gives an `L^2` error corresponding to 2-digit accuracy::
192
+
193
+ >>> I = [-1, 1]
194
+ >>> cs = fourier(cosh, I, 9)
195
+ >>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2
196
+ >>> nprint(sqrt(quad(g, I)))
197
+ 0.00467963
198
+
199
+ :func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions,
200
+ the accuracy (and speed) can be improved by including all singular
201
+ points in the interval specification::
202
+
203
+ >>> nprint(fourier(abs, [-1, 1], 0), 10)
204
+ ([0.5000441648], [0.0])
205
+ >>> nprint(fourier(abs, [-1, 0, 1], 0), 10)
206
+ ([0.5], [0.0])
207
+
208
+ """
209
+ interval = ctx._as_points(interval)
210
+ a = interval[0]
211
+ b = interval[-1]
212
+ L = b-a
213
+ cos_series = []
214
+ sin_series = []
215
+ cutoff = ctx.eps*10
216
+ for n in xrange(N+1):
217
+ m = 2*n*ctx.pi/L
218
+ an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L
219
+ bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L
220
+ if n == 0:
221
+ an /= 2
222
+ if abs(an) < cutoff: an = ctx.zero
223
+ if abs(bn) < cutoff: bn = ctx.zero
224
+ cos_series.append(an)
225
+ sin_series.append(bn)
226
+ return cos_series, sin_series
227
+
228
+ @defun
229
+ def fourierval(ctx, series, interval, x):
230
+ """
231
+ Evaluates a Fourier series (in the format computed by
232
+ by :func:`~mpmath.fourier` for the given interval) at the point `x`.
233
+
234
+ The series should be a pair `(c, s)` where `c` is the
235
+ cosine series and `s` is the sine series. The two lists
236
+ need not have the same length.
237
+ """
238
+ cs, ss = series
239
+ ab = ctx._as_points(interval)
240
+ a = interval[0]
241
+ b = interval[-1]
242
+ m = 2*ctx.pi/(ab[-1]-ab[0])
243
+ s = ctx.zero
244
+ s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n])
245
+ s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n])
246
+ return s
lib/python3.11/site-packages/mpmath/calculus/calculus.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ class CalculusMethods(object):
2
+ pass
3
+
4
+ def defun(f):
5
+ setattr(CalculusMethods, f.__name__, f)
6
+ return f
lib/python3.11/site-packages/mpmath/calculus/differentiation.py ADDED
@@ -0,0 +1,647 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+ from .calculus import defun
3
+
4
+ try:
5
+ iteritems = dict.iteritems
6
+ except AttributeError:
7
+ iteritems = dict.items
8
+
9
+ #----------------------------------------------------------------------------#
10
+ # Differentiation #
11
+ #----------------------------------------------------------------------------#
12
+
13
+ @defun
14
+ def difference(ctx, s, n):
15
+ r"""
16
+ Given a sequence `(s_k)` containing at least `n+1` items, returns the
17
+ `n`-th forward difference,
18
+
19
+ .. math ::
20
+
21
+ \Delta^n = \sum_{k=0}^{\infty} (-1)^{k+n} {n \choose k} s_k.
22
+ """
23
+ n = int(n)
24
+ d = ctx.zero
25
+ b = (-1) ** (n & 1)
26
+ for k in xrange(n+1):
27
+ d += b * s[k]
28
+ b = (b * (k-n)) // (k+1)
29
+ return d
30
+
31
+ def hsteps(ctx, f, x, n, prec, **options):
32
+ singular = options.get('singular')
33
+ addprec = options.get('addprec', 10)
34
+ direction = options.get('direction', 0)
35
+ workprec = (prec+2*addprec) * (n+1)
36
+ orig = ctx.prec
37
+ try:
38
+ ctx.prec = workprec
39
+ h = options.get('h')
40
+ if h is None:
41
+ if options.get('relative'):
42
+ hextramag = int(ctx.mag(x))
43
+ else:
44
+ hextramag = 0
45
+ h = ctx.ldexp(1, -prec-addprec-hextramag)
46
+ else:
47
+ h = ctx.convert(h)
48
+ # Directed: steps x, x+h, ... x+n*h
49
+ direction = options.get('direction', 0)
50
+ if direction:
51
+ h *= ctx.sign(direction)
52
+ steps = xrange(n+1)
53
+ norm = h
54
+ # Central: steps x-n*h, x-(n-2)*h ..., x, ..., x+(n-2)*h, x+n*h
55
+ else:
56
+ steps = xrange(-n, n+1, 2)
57
+ norm = (2*h)
58
+ # Perturb
59
+ if singular:
60
+ x += 0.5*h
61
+ values = [f(x+k*h) for k in steps]
62
+ return values, norm, workprec
63
+ finally:
64
+ ctx.prec = orig
65
+
66
+
67
+ @defun
68
+ def diff(ctx, f, x, n=1, **options):
69
+ r"""
70
+ Numerically computes the derivative of `f`, `f'(x)`, or generally for
71
+ an integer `n \ge 0`, the `n`-th derivative `f^{(n)}(x)`.
72
+ A few basic examples are::
73
+
74
+ >>> from mpmath import *
75
+ >>> mp.dps = 15; mp.pretty = True
76
+ >>> diff(lambda x: x**2 + x, 1.0)
77
+ 3.0
78
+ >>> diff(lambda x: x**2 + x, 1.0, 2)
79
+ 2.0
80
+ >>> diff(lambda x: x**2 + x, 1.0, 3)
81
+ 0.0
82
+ >>> nprint([diff(exp, 3, n) for n in range(5)]) # exp'(x) = exp(x)
83
+ [20.0855, 20.0855, 20.0855, 20.0855, 20.0855]
84
+
85
+ Even more generally, given a tuple of arguments `(x_1, \ldots, x_k)`
86
+ and order `(n_1, \ldots, n_k)`, the partial derivative
87
+ `f^{(n_1,\ldots,n_k)}(x_1,\ldots,x_k)` is evaluated. For example::
88
+
89
+ >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (0,1))
90
+ 2.75
91
+ >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (1,1))
92
+ 3.0
93
+
94
+ **Options**
95
+
96
+ The following optional keyword arguments are recognized:
97
+
98
+ ``method``
99
+ Supported methods are ``'step'`` or ``'quad'``: derivatives may be
100
+ computed using either a finite difference with a small step
101
+ size `h` (default), or numerical quadrature.
102
+ ``direction``
103
+ Direction of finite difference: can be -1 for a left
104
+ difference, 0 for a central difference (default), or +1
105
+ for a right difference; more generally can be any complex number.
106
+ ``addprec``
107
+ Extra precision for `h` used to account for the function's
108
+ sensitivity to perturbations (default = 10).
109
+ ``relative``
110
+ Choose `h` relative to the magnitude of `x`, rather than an
111
+ absolute value; useful for large or tiny `x` (default = False).
112
+ ``h``
113
+ As an alternative to ``addprec`` and ``relative``, manually
114
+ select the step size `h`.
115
+ ``singular``
116
+ If True, evaluation exactly at the point `x` is avoided; this is
117
+ useful for differentiating functions with removable singularities.
118
+ Default = False.
119
+ ``radius``
120
+ Radius of integration contour (with ``method = 'quad'``).
121
+ Default = 0.25. A larger radius typically is faster and more
122
+ accurate, but it must be chosen so that `f` has no
123
+ singularities within the radius from the evaluation point.
124
+
125
+ A finite difference requires `n+1` function evaluations and must be
126
+ performed at `(n+1)` times the target precision. Accordingly, `f` must
127
+ support fast evaluation at high precision.
128
+
129
+ With integration, a larger number of function evaluations is
130
+ required, but not much extra precision is required. For high order
131
+ derivatives, this method may thus be faster if f is very expensive to
132
+ evaluate at high precision.
133
+
134
+ **Further examples**
135
+
136
+ The direction option is useful for computing left- or right-sided
137
+ derivatives of nonsmooth functions::
138
+
139
+ >>> diff(abs, 0, direction=0)
140
+ 0.0
141
+ >>> diff(abs, 0, direction=1)
142
+ 1.0
143
+ >>> diff(abs, 0, direction=-1)
144
+ -1.0
145
+
146
+ More generally, if the direction is nonzero, a right difference
147
+ is computed where the step size is multiplied by sign(direction).
148
+ For example, with direction=+j, the derivative from the positive
149
+ imaginary direction will be computed::
150
+
151
+ >>> diff(abs, 0, direction=j)
152
+ (0.0 - 1.0j)
153
+
154
+ With integration, the result may have a small imaginary part
155
+ even even if the result is purely real::
156
+
157
+ >>> diff(sqrt, 1, method='quad') # doctest:+ELLIPSIS
158
+ (0.5 - 4.59...e-26j)
159
+ >>> chop(_)
160
+ 0.5
161
+
162
+ Adding precision to obtain an accurate value::
163
+
164
+ >>> diff(cos, 1e-30)
165
+ 0.0
166
+ >>> diff(cos, 1e-30, h=0.0001)
167
+ -9.99999998328279e-31
168
+ >>> diff(cos, 1e-30, addprec=100)
169
+ -1.0e-30
170
+
171
+ """
172
+ partial = False
173
+ try:
174
+ orders = list(n)
175
+ x = list(x)
176
+ partial = True
177
+ except TypeError:
178
+ pass
179
+ if partial:
180
+ x = [ctx.convert(_) for _ in x]
181
+ return _partial_diff(ctx, f, x, orders, options)
182
+ method = options.get('method', 'step')
183
+ if n == 0 and method != 'quad' and not options.get('singular'):
184
+ return f(ctx.convert(x))
185
+ prec = ctx.prec
186
+ try:
187
+ if method == 'step':
188
+ values, norm, workprec = hsteps(ctx, f, x, n, prec, **options)
189
+ ctx.prec = workprec
190
+ v = ctx.difference(values, n) / norm**n
191
+ elif method == 'quad':
192
+ ctx.prec += 10
193
+ radius = ctx.convert(options.get('radius', 0.25))
194
+ def g(t):
195
+ rei = radius*ctx.expj(t)
196
+ z = x + rei
197
+ return f(z) / rei**n
198
+ d = ctx.quadts(g, [0, 2*ctx.pi])
199
+ v = d * ctx.factorial(n) / (2*ctx.pi)
200
+ else:
201
+ raise ValueError("unknown method: %r" % method)
202
+ finally:
203
+ ctx.prec = prec
204
+ return +v
205
+
206
+ def _partial_diff(ctx, f, xs, orders, options):
207
+ if not orders:
208
+ return f()
209
+ if not sum(orders):
210
+ return f(*xs)
211
+ i = 0
212
+ for i in range(len(orders)):
213
+ if orders[i]:
214
+ break
215
+ order = orders[i]
216
+ def fdiff_inner(*f_args):
217
+ def inner(t):
218
+ return f(*(f_args[:i] + (t,) + f_args[i+1:]))
219
+ return ctx.diff(inner, f_args[i], order, **options)
220
+ orders[i] = 0
221
+ return _partial_diff(ctx, fdiff_inner, xs, orders, options)
222
+
223
+ @defun
224
+ def diffs(ctx, f, x, n=None, **options):
225
+ r"""
226
+ Returns a generator that yields the sequence of derivatives
227
+
228
+ .. math ::
229
+
230
+ f(x), f'(x), f''(x), \ldots, f^{(k)}(x), \ldots
231
+
232
+ With ``method='step'``, :func:`~mpmath.diffs` uses only `O(k)`
233
+ function evaluations to generate the first `k` derivatives,
234
+ rather than the roughly `O(k^2)` evaluations
235
+ required if one calls :func:`~mpmath.diff` `k` separate times.
236
+
237
+ With `n < \infty`, the generator stops as soon as the
238
+ `n`-th derivative has been generated. If the exact number of
239
+ needed derivatives is known in advance, this is further
240
+ slightly more efficient.
241
+
242
+ Options are the same as for :func:`~mpmath.diff`.
243
+
244
+ **Examples**
245
+
246
+ >>> from mpmath import *
247
+ >>> mp.dps = 15
248
+ >>> nprint(list(diffs(cos, 1, 5)))
249
+ [0.540302, -0.841471, -0.540302, 0.841471, 0.540302, -0.841471]
250
+ >>> for i, d in zip(range(6), diffs(cos, 1)):
251
+ ... print("%s %s" % (i, d))
252
+ ...
253
+ 0 0.54030230586814
254
+ 1 -0.841470984807897
255
+ 2 -0.54030230586814
256
+ 3 0.841470984807897
257
+ 4 0.54030230586814
258
+ 5 -0.841470984807897
259
+
260
+ """
261
+ if n is None:
262
+ n = ctx.inf
263
+ else:
264
+ n = int(n)
265
+ if options.get('method', 'step') != 'step':
266
+ k = 0
267
+ while k < n + 1:
268
+ yield ctx.diff(f, x, k, **options)
269
+ k += 1
270
+ return
271
+ singular = options.get('singular')
272
+ if singular:
273
+ yield ctx.diff(f, x, 0, singular=True)
274
+ else:
275
+ yield f(ctx.convert(x))
276
+ if n < 1:
277
+ return
278
+ if n == ctx.inf:
279
+ A, B = 1, 2
280
+ else:
281
+ A, B = 1, n+1
282
+ while 1:
283
+ callprec = ctx.prec
284
+ y, norm, workprec = hsteps(ctx, f, x, B, callprec, **options)
285
+ for k in xrange(A, B):
286
+ try:
287
+ ctx.prec = workprec
288
+ d = ctx.difference(y, k) / norm**k
289
+ finally:
290
+ ctx.prec = callprec
291
+ yield +d
292
+ if k >= n:
293
+ return
294
+ A, B = B, int(A*1.4+1)
295
+ B = min(B, n)
296
+
297
+ def iterable_to_function(gen):
298
+ gen = iter(gen)
299
+ data = []
300
+ def f(k):
301
+ for i in xrange(len(data), k+1):
302
+ data.append(next(gen))
303
+ return data[k]
304
+ return f
305
+
306
+ @defun
307
+ def diffs_prod(ctx, factors):
308
+ r"""
309
+ Given a list of `N` iterables or generators yielding
310
+ `f_k(x), f'_k(x), f''_k(x), \ldots` for `k = 1, \ldots, N`,
311
+ generate `g(x), g'(x), g''(x), \ldots` where
312
+ `g(x) = f_1(x) f_2(x) \cdots f_N(x)`.
313
+
314
+ At high precision and for large orders, this is typically more efficient
315
+ than numerical differentiation if the derivatives of each `f_k(x)`
316
+ admit direct computation.
317
+
318
+ Note: This function does not increase the working precision internally,
319
+ so guard digits may have to be added externally for full accuracy.
320
+
321
+ **Examples**
322
+
323
+ >>> from mpmath import *
324
+ >>> mp.dps = 15; mp.pretty = True
325
+ >>> f = lambda x: exp(x)*cos(x)*sin(x)
326
+ >>> u = diffs(f, 1)
327
+ >>> v = mp.diffs_prod([diffs(exp,1), diffs(cos,1), diffs(sin,1)])
328
+ >>> next(u); next(v)
329
+ 1.23586333600241
330
+ 1.23586333600241
331
+ >>> next(u); next(v)
332
+ 0.104658952245596
333
+ 0.104658952245596
334
+ >>> next(u); next(v)
335
+ -5.96999877552086
336
+ -5.96999877552086
337
+ >>> next(u); next(v)
338
+ -12.4632923122697
339
+ -12.4632923122697
340
+
341
+ """
342
+ N = len(factors)
343
+ if N == 1:
344
+ for c in factors[0]:
345
+ yield c
346
+ else:
347
+ u = iterable_to_function(ctx.diffs_prod(factors[:N//2]))
348
+ v = iterable_to_function(ctx.diffs_prod(factors[N//2:]))
349
+ n = 0
350
+ while 1:
351
+ #yield sum(binomial(n,k)*u(n-k)*v(k) for k in xrange(n+1))
352
+ s = u(n) * v(0)
353
+ a = 1
354
+ for k in xrange(1,n+1):
355
+ a = a * (n-k+1) // k
356
+ s += a * u(n-k) * v(k)
357
+ yield s
358
+ n += 1
359
+
360
+ def dpoly(n, _cache={}):
361
+ """
362
+ nth differentiation polynomial for exp (Faa di Bruno's formula).
363
+
364
+ TODO: most exponents are zero, so maybe a sparse representation
365
+ would be better.
366
+ """
367
+ if n in _cache:
368
+ return _cache[n]
369
+ if not _cache:
370
+ _cache[0] = {(0,):1}
371
+ R = dpoly(n-1)
372
+ R = dict((c+(0,),v) for (c,v) in iteritems(R))
373
+ Ra = {}
374
+ for powers, count in iteritems(R):
375
+ powers1 = (powers[0]+1,) + powers[1:]
376
+ if powers1 in Ra:
377
+ Ra[powers1] += count
378
+ else:
379
+ Ra[powers1] = count
380
+ for powers, count in iteritems(R):
381
+ if not sum(powers):
382
+ continue
383
+ for k,p in enumerate(powers):
384
+ if p:
385
+ powers2 = powers[:k] + (p-1,powers[k+1]+1) + powers[k+2:]
386
+ if powers2 in Ra:
387
+ Ra[powers2] += p*count
388
+ else:
389
+ Ra[powers2] = p*count
390
+ _cache[n] = Ra
391
+ return _cache[n]
392
+
393
+ @defun
394
+ def diffs_exp(ctx, fdiffs):
395
+ r"""
396
+ Given an iterable or generator yielding `f(x), f'(x), f''(x), \ldots`
397
+ generate `g(x), g'(x), g''(x), \ldots` where `g(x) = \exp(f(x))`.
398
+
399
+ At high precision and for large orders, this is typically more efficient
400
+ than numerical differentiation if the derivatives of `f(x)`
401
+ admit direct computation.
402
+
403
+ Note: This function does not increase the working precision internally,
404
+ so guard digits may have to be added externally for full accuracy.
405
+
406
+ **Examples**
407
+
408
+ The derivatives of the gamma function can be computed using
409
+ logarithmic differentiation::
410
+
411
+ >>> from mpmath import *
412
+ >>> mp.dps = 15; mp.pretty = True
413
+ >>>
414
+ >>> def diffs_loggamma(x):
415
+ ... yield loggamma(x)
416
+ ... i = 0
417
+ ... while 1:
418
+ ... yield psi(i,x)
419
+ ... i += 1
420
+ ...
421
+ >>> u = diffs_exp(diffs_loggamma(3))
422
+ >>> v = diffs(gamma, 3)
423
+ >>> next(u); next(v)
424
+ 2.0
425
+ 2.0
426
+ >>> next(u); next(v)
427
+ 1.84556867019693
428
+ 1.84556867019693
429
+ >>> next(u); next(v)
430
+ 2.49292999190269
431
+ 2.49292999190269
432
+ >>> next(u); next(v)
433
+ 3.44996501352367
434
+ 3.44996501352367
435
+
436
+ """
437
+ fn = iterable_to_function(fdiffs)
438
+ f0 = ctx.exp(fn(0))
439
+ yield f0
440
+ i = 1
441
+ while 1:
442
+ s = ctx.mpf(0)
443
+ for powers, c in iteritems(dpoly(i)):
444
+ s += c*ctx.fprod(fn(k+1)**p for (k,p) in enumerate(powers) if p)
445
+ yield s * f0
446
+ i += 1
447
+
448
+ @defun
449
+ def differint(ctx, f, x, n=1, x0=0):
450
+ r"""
451
+ Calculates the Riemann-Liouville differintegral, or fractional
452
+ derivative, defined by
453
+
454
+ .. math ::
455
+
456
+ \,_{x_0}{\mathbb{D}}^n_xf(x) = \frac{1}{\Gamma(m-n)} \frac{d^m}{dx^m}
457
+ \int_{x_0}^{x}(x-t)^{m-n-1}f(t)dt
458
+
459
+ where `f` is a given (presumably well-behaved) function,
460
+ `x` is the evaluation point, `n` is the order, and `x_0` is
461
+ the reference point of integration (`m` is an arbitrary
462
+ parameter selected automatically).
463
+
464
+ With `n = 1`, this is just the standard derivative `f'(x)`; with `n = 2`,
465
+ the second derivative `f''(x)`, etc. With `n = -1`, it gives
466
+ `\int_{x_0}^x f(t) dt`, with `n = -2`
467
+ it gives `\int_{x_0}^x \left( \int_{x_0}^t f(u) du \right) dt`, etc.
468
+
469
+ As `n` is permitted to be any number, this operator generalizes
470
+ iterated differentiation and iterated integration to a single
471
+ operator with a continuous order parameter.
472
+
473
+ **Examples**
474
+
475
+ There is an exact formula for the fractional derivative of a
476
+ monomial `x^p`, which may be used as a reference. For example,
477
+ the following gives a half-derivative (order 0.5)::
478
+
479
+ >>> from mpmath import *
480
+ >>> mp.dps = 15; mp.pretty = True
481
+ >>> x = mpf(3); p = 2; n = 0.5
482
+ >>> differint(lambda t: t**p, x, n)
483
+ 7.81764019044672
484
+ >>> gamma(p+1)/gamma(p-n+1) * x**(p-n)
485
+ 7.81764019044672
486
+
487
+ Another useful test function is the exponential function, whose
488
+ integration / differentiation formula easy generalizes
489
+ to arbitrary order. Here we first compute a third derivative,
490
+ and then a triply nested integral. (The reference point `x_0`
491
+ is set to `-\infty` to avoid nonzero endpoint terms.)::
492
+
493
+ >>> differint(lambda x: exp(pi*x), -1.5, 3)
494
+ 0.278538406900792
495
+ >>> exp(pi*-1.5) * pi**3
496
+ 0.278538406900792
497
+ >>> differint(lambda x: exp(pi*x), 3.5, -3, -inf)
498
+ 1922.50563031149
499
+ >>> exp(pi*3.5) / pi**3
500
+ 1922.50563031149
501
+
502
+ However, for noninteger `n`, the differentiation formula for the
503
+ exponential function must be modified to give the same result as the
504
+ Riemann-Liouville differintegral::
505
+
506
+ >>> x = mpf(3.5)
507
+ >>> c = pi
508
+ >>> n = 1+2*j
509
+ >>> differint(lambda x: exp(c*x), x, n)
510
+ (-123295.005390743 + 140955.117867654j)
511
+ >>> x**(-n) * exp(c)**x * (x*c)**n * gammainc(-n, 0, x*c) / gamma(-n)
512
+ (-123295.005390743 + 140955.117867654j)
513
+
514
+
515
+ """
516
+ m = max(int(ctx.ceil(ctx.re(n)))+1, 1)
517
+ r = m-n-1
518
+ g = lambda x: ctx.quad(lambda t: (x-t)**r * f(t), [x0, x])
519
+ return ctx.diff(g, x, m) / ctx.gamma(m-n)
520
+
521
+ @defun
522
+ def diffun(ctx, f, n=1, **options):
523
+ r"""
524
+ Given a function `f`, returns a function `g(x)` that evaluates the nth
525
+ derivative `f^{(n)}(x)`::
526
+
527
+ >>> from mpmath import *
528
+ >>> mp.dps = 15; mp.pretty = True
529
+ >>> cos2 = diffun(sin)
530
+ >>> sin2 = diffun(sin, 4)
531
+ >>> cos(1.3), cos2(1.3)
532
+ (0.267498828624587, 0.267498828624587)
533
+ >>> sin(1.3), sin2(1.3)
534
+ (0.963558185417193, 0.963558185417193)
535
+
536
+ The function `f` must support arbitrary precision evaluation.
537
+ See :func:`~mpmath.diff` for additional details and supported
538
+ keyword options.
539
+ """
540
+ if n == 0:
541
+ return f
542
+ def g(x):
543
+ return ctx.diff(f, x, n, **options)
544
+ return g
545
+
546
+ @defun
547
+ def taylor(ctx, f, x, n, **options):
548
+ r"""
549
+ Produces a degree-`n` Taylor polynomial around the point `x` of the
550
+ given function `f`. The coefficients are returned as a list.
551
+
552
+ >>> from mpmath import *
553
+ >>> mp.dps = 15; mp.pretty = True
554
+ >>> nprint(chop(taylor(sin, 0, 5)))
555
+ [0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333]
556
+
557
+ The coefficients are computed using high-order numerical
558
+ differentiation. The function must be possible to evaluate
559
+ to arbitrary precision. See :func:`~mpmath.diff` for additional details
560
+ and supported keyword options.
561
+
562
+ Note that to evaluate the Taylor polynomial as an approximation
563
+ of `f`, e.g. with :func:`~mpmath.polyval`, the coefficients must be reversed,
564
+ and the point of the Taylor expansion must be subtracted from
565
+ the argument:
566
+
567
+ >>> p = taylor(exp, 2.0, 10)
568
+ >>> polyval(p[::-1], 2.5 - 2.0)
569
+ 12.1824939606092
570
+ >>> exp(2.5)
571
+ 12.1824939607035
572
+
573
+ """
574
+ gen = enumerate(ctx.diffs(f, x, n, **options))
575
+ if options.get("chop", True):
576
+ return [ctx.chop(d)/ctx.factorial(i) for i, d in gen]
577
+ else:
578
+ return [d/ctx.factorial(i) for i, d in gen]
579
+
580
+ @defun
581
+ def pade(ctx, a, L, M):
582
+ r"""
583
+ Computes a Pade approximation of degree `(L, M)` to a function.
584
+ Given at least `L+M+1` Taylor coefficients `a` approximating
585
+ a function `A(x)`, :func:`~mpmath.pade` returns coefficients of
586
+ polynomials `P, Q` satisfying
587
+
588
+ .. math ::
589
+
590
+ P = \sum_{k=0}^L p_k x^k
591
+
592
+ Q = \sum_{k=0}^M q_k x^k
593
+
594
+ Q_0 = 1
595
+
596
+ A(x) Q(x) = P(x) + O(x^{L+M+1})
597
+
598
+ `P(x)/Q(x)` can provide a good approximation to an analytic function
599
+ beyond the radius of convergence of its Taylor series (example
600
+ from G.A. Baker 'Essentials of Pade Approximants' Academic Press,
601
+ Ch.1A)::
602
+
603
+ >>> from mpmath import *
604
+ >>> mp.dps = 15; mp.pretty = True
605
+ >>> one = mpf(1)
606
+ >>> def f(x):
607
+ ... return sqrt((one + 2*x)/(one + x))
608
+ ...
609
+ >>> a = taylor(f, 0, 6)
610
+ >>> p, q = pade(a, 3, 3)
611
+ >>> x = 10
612
+ >>> polyval(p[::-1], x)/polyval(q[::-1], x)
613
+ 1.38169105566806
614
+ >>> f(x)
615
+ 1.38169855941551
616
+
617
+ """
618
+ # To determine L+1 coefficients of P and M coefficients of Q
619
+ # L+M+1 coefficients of A must be provided
620
+ if len(a) < L+M+1:
621
+ raise ValueError("L+M+1 Coefficients should be provided")
622
+
623
+ if M == 0:
624
+ if L == 0:
625
+ return [ctx.one], [ctx.one]
626
+ else:
627
+ return a[:L+1], [ctx.one]
628
+
629
+ # Solve first
630
+ # a[L]*q[1] + ... + a[L-M+1]*q[M] = -a[L+1]
631
+ # ...
632
+ # a[L+M-1]*q[1] + ... + a[L]*q[M] = -a[L+M]
633
+ A = ctx.matrix(M)
634
+ for j in range(M):
635
+ for i in range(min(M, L+j+1)):
636
+ A[j, i] = a[L+j-i]
637
+ v = -ctx.matrix(a[(L+1):(L+M+1)])
638
+ x = ctx.lu_solve(A, v)
639
+ q = [ctx.one] + list(x)
640
+ # compute p
641
+ p = [0]*(L+1)
642
+ for i in range(L+1):
643
+ s = a[i]
644
+ for j in range(1, min(M,i) + 1):
645
+ s += q[j]*a[i-j]
646
+ p[i] = s
647
+ return p, q
lib/python3.11/site-packages/mpmath/calculus/extrapolation.py ADDED
@@ -0,0 +1,2115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ from itertools import izip
3
+ except ImportError:
4
+ izip = zip
5
+
6
+ from ..libmp.backend import xrange
7
+ from .calculus import defun
8
+
9
+ try:
10
+ next = next
11
+ except NameError:
12
+ next = lambda _: _.next()
13
+
14
+ @defun
15
+ def richardson(ctx, seq):
16
+ r"""
17
+ Given a list ``seq`` of the first `N` elements of a slowly convergent
18
+ infinite sequence, :func:`~mpmath.richardson` computes the `N`-term
19
+ Richardson extrapolate for the limit.
20
+
21
+ :func:`~mpmath.richardson` returns `(v, c)` where `v` is the estimated
22
+ limit and `c` is the magnitude of the largest weight used during the
23
+ computation. The weight provides an estimate of the precision
24
+ lost to cancellation. Due to cancellation effects, the sequence must
25
+ be typically be computed at a much higher precision than the target
26
+ accuracy of the extrapolation.
27
+
28
+ **Applicability and issues**
29
+
30
+ The `N`-step Richardson extrapolation algorithm used by
31
+ :func:`~mpmath.richardson` is described in [1].
32
+
33
+ Richardson extrapolation only works for a specific type of sequence,
34
+ namely one converging like partial sums of
35
+ `P(1)/Q(1) + P(2)/Q(2) + \ldots` where `P` and `Q` are polynomials.
36
+ When the sequence does not convergence at such a rate
37
+ :func:`~mpmath.richardson` generally produces garbage.
38
+
39
+ Richardson extrapolation has the advantage of being fast: the `N`-term
40
+ extrapolate requires only `O(N)` arithmetic operations, and usually
41
+ produces an estimate that is accurate to `O(N)` digits. Contrast with
42
+ the Shanks transformation (see :func:`~mpmath.shanks`), which requires
43
+ `O(N^2)` operations.
44
+
45
+ :func:`~mpmath.richardson` is unable to produce an estimate for the
46
+ approximation error. One way to estimate the error is to perform
47
+ two extrapolations with slightly different `N` and comparing the
48
+ results.
49
+
50
+ Richardson extrapolation does not work for oscillating sequences.
51
+ As a simple workaround, :func:`~mpmath.richardson` detects if the last
52
+ three elements do not differ monotonically, and in that case
53
+ applies extrapolation only to the even-index elements.
54
+
55
+ **Example**
56
+
57
+ Applying Richardson extrapolation to the Leibniz series for `\pi`::
58
+
59
+ >>> from mpmath import *
60
+ >>> mp.dps = 30; mp.pretty = True
61
+ >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
62
+ ... for m in range(1,30)]
63
+ >>> v, c = richardson(S[:10])
64
+ >>> v
65
+ 3.2126984126984126984126984127
66
+ >>> nprint([v-pi, c])
67
+ [0.0711058, 2.0]
68
+
69
+ >>> v, c = richardson(S[:30])
70
+ >>> v
71
+ 3.14159265468624052829954206226
72
+ >>> nprint([v-pi, c])
73
+ [1.09645e-9, 20833.3]
74
+
75
+ **References**
76
+
77
+ 1. [BenderOrszag]_ pp. 375-376
78
+
79
+ """
80
+ if len(seq) < 3:
81
+ raise ValueError("seq should be of minimum length 3")
82
+ if ctx.sign(seq[-1]-seq[-2]) != ctx.sign(seq[-2]-seq[-3]):
83
+ seq = seq[::2]
84
+ N = len(seq)//2-1
85
+ s = ctx.zero
86
+ # The general weight is c[k] = (N+k)**N * (-1)**(k+N) / k! / (N-k)!
87
+ # To avoid repeated factorials, we simplify the quotient
88
+ # of successive weights to obtain a recurrence relation
89
+ c = (-1)**N * N**N / ctx.mpf(ctx._ifac(N))
90
+ maxc = 1
91
+ for k in xrange(N+1):
92
+ s += c * seq[N+k]
93
+ maxc = max(abs(c), maxc)
94
+ c *= (k-N)*ctx.mpf(k+N+1)**N
95
+ c /= ((1+k)*ctx.mpf(k+N)**N)
96
+ return s, maxc
97
+
98
+ @defun
99
+ def shanks(ctx, seq, table=None, randomized=False):
100
+ r"""
101
+ Given a list ``seq`` of the first `N` elements of a slowly
102
+ convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated
103
+ Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks
104
+ transformation often provides strong convergence acceleration,
105
+ especially if the sequence is oscillating.
106
+
107
+ The iterated Shanks transformation is computed using the Wynn
108
+ epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full
109
+ epsilon table generated by Wynn's algorithm, which can be read
110
+ off as follows:
111
+
112
+ * The table is a list of lists forming a lower triangular matrix,
113
+ where higher row and column indices correspond to more accurate
114
+ values.
115
+ * The columns with even index hold dummy entries (required for the
116
+ computation) and the columns with odd index hold the actual
117
+ extrapolates.
118
+ * The last element in the last row is typically the most
119
+ accurate estimate of the limit.
120
+ * The difference to the third last element in the last row
121
+ provides an estimate of the approximation error.
122
+ * The magnitude of the second last element provides an estimate
123
+ of the numerical accuracy lost to cancellation.
124
+
125
+ For convenience, so the extrapolation is stopped at an odd index
126
+ so that ``shanks(seq)[-1][-1]`` always gives an estimate of the
127
+ limit.
128
+
129
+ Optionally, an existing table can be passed to :func:`~mpmath.shanks`.
130
+ This can be used to efficiently extend a previous computation after
131
+ new elements have been appended to the sequence. The table will
132
+ then be updated in-place.
133
+
134
+ **The Shanks transformation**
135
+
136
+ The Shanks transformation is defined as follows (see [2]): given
137
+ the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is
138
+ given by
139
+
140
+ .. math ::
141
+
142
+ S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k}
143
+
144
+ The Shanks transformation gives the exact limit `A_{\infty}` in a
145
+ single step if `A_k = A + a q^k`. Note in particular that it
146
+ extrapolates the exact sum of a geometric series in a single step.
147
+
148
+ Applying the Shanks transformation once often improves convergence
149
+ substantially for an arbitrary sequence, but the optimal effect is
150
+ obtained by applying it iteratively:
151
+ `S(S(A_k)), S(S(S(A_k))), \ldots`.
152
+
153
+ Wynn's epsilon algorithm provides an efficient way to generate
154
+ the table of iterated Shanks transformations. It reduces the
155
+ computation of each element to essentially a single division, at
156
+ the cost of requiring dummy elements in the table. See [1] for
157
+ details.
158
+
159
+ **Precision issues**
160
+
161
+ Due to cancellation effects, the sequence must be typically be
162
+ computed at a much higher precision than the target accuracy
163
+ of the extrapolation.
164
+
165
+ If the Shanks transformation converges to the exact limit (such
166
+ as if the sequence is a geometric series), then a division by
167
+ zero occurs. By default, :func:`~mpmath.shanks` handles this case by
168
+ terminating the iteration and returning the table it has
169
+ generated so far. With *randomized=True*, it will instead
170
+ replace the zero by a pseudorandom number close to zero.
171
+ (TODO: find a better solution to this problem.)
172
+
173
+ **Examples**
174
+
175
+ We illustrate by applying Shanks transformation to the Leibniz
176
+ series for `\pi`::
177
+
178
+ >>> from mpmath import *
179
+ >>> mp.dps = 50
180
+ >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
181
+ ... for m in range(1,30)]
182
+ >>>
183
+ >>> T = shanks(S[:7])
184
+ >>> for row in T:
185
+ ... nprint(row)
186
+ ...
187
+ [-0.75]
188
+ [1.25, 3.16667]
189
+ [-1.75, 3.13333, -28.75]
190
+ [2.25, 3.14524, 82.25, 3.14234]
191
+ [-2.75, 3.13968, -177.75, 3.14139, -969.937]
192
+ [3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161]
193
+
194
+ The extrapolated accuracy is about 4 digits, and about 4 digits
195
+ may have been lost due to cancellation::
196
+
197
+ >>> L = T[-1]
198
+ >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
199
+ [2.22532e-5, 4.78309e-5, 3515.06]
200
+
201
+ Now we extend the computation::
202
+
203
+ >>> T = shanks(S[:25], T)
204
+ >>> L = T[-1]
205
+ >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
206
+ [3.75527e-19, 1.48478e-19, 2.96014e+17]
207
+
208
+ The value for pi is now accurate to 18 digits. About 18 digits may
209
+ also have been lost to cancellation.
210
+
211
+ Here is an example with a geometric series, where the convergence
212
+ is immediate (the sum is exactly 1)::
213
+
214
+ >>> mp.dps = 15
215
+ >>> for row in shanks([0.5, 0.75, 0.875, 0.9375, 0.96875]):
216
+ ... nprint(row)
217
+ [4.0]
218
+ [8.0, 1.0]
219
+
220
+ **References**
221
+
222
+ 1. [GravesMorris]_
223
+
224
+ 2. [BenderOrszag]_ pp. 368-375
225
+
226
+ """
227
+ if len(seq) < 2:
228
+ raise ValueError("seq should be of minimum length 2")
229
+ if table:
230
+ START = len(table)
231
+ else:
232
+ START = 0
233
+ table = []
234
+ STOP = len(seq) - 1
235
+ if STOP & 1:
236
+ STOP -= 1
237
+ one = ctx.one
238
+ eps = +ctx.eps
239
+ if randomized:
240
+ from random import Random
241
+ rnd = Random()
242
+ rnd.seed(START)
243
+ for i in xrange(START, STOP):
244
+ row = []
245
+ for j in xrange(i+1):
246
+ if j == 0:
247
+ a, b = 0, seq[i+1]-seq[i]
248
+ else:
249
+ if j == 1:
250
+ a = seq[i]
251
+ else:
252
+ a = table[i-1][j-2]
253
+ b = row[j-1] - table[i-1][j-1]
254
+ if not b:
255
+ if randomized:
256
+ b = (1 + rnd.getrandbits(10))*eps
257
+ elif i & 1:
258
+ return table[:-1]
259
+ else:
260
+ return table
261
+ row.append(a + one/b)
262
+ table.append(row)
263
+ return table
264
+
265
+
266
+ class levin_class:
267
+ # levin: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
268
+ r"""
269
+ This interface implements Levin's (nonlinear) sequence transformation for
270
+ convergence acceleration and summation of divergent series. It performs
271
+ better than the Shanks/Wynn-epsilon algorithm for logarithmic convergent
272
+ or alternating divergent series.
273
+
274
+ Let *A* be the series we want to sum:
275
+
276
+ .. math ::
277
+
278
+ A = \sum_{k=0}^{\infty} a_k
279
+
280
+ Attention: all `a_k` must be non-zero!
281
+
282
+ Let `s_n` be the partial sums of this series:
283
+
284
+ .. math ::
285
+
286
+ s_n = \sum_{k=0}^n a_k.
287
+
288
+ **Methods**
289
+
290
+ Calling ``levin`` returns an object with the following methods.
291
+
292
+ ``update(...)`` works with the list of individual terms `a_k` of *A*, and
293
+ ``update_step(...)`` works with the list of partial sums `s_k` of *A*:
294
+
295
+ .. code ::
296
+
297
+ v, e = ...update([a_0, a_1,..., a_k])
298
+ v, e = ...update_psum([s_0, s_1,..., s_k])
299
+
300
+ ``step(...)`` works with the individual terms `a_k` and ``step_psum(...)``
301
+ works with the partial sums `s_k`:
302
+
303
+ .. code ::
304
+
305
+ v, e = ...step(a_k)
306
+ v, e = ...step_psum(s_k)
307
+
308
+ *v* is the current estimate for *A*, and *e* is an error estimate which is
309
+ simply the difference between the current estimate and the last estimate.
310
+ One should not mix ``update``, ``update_psum``, ``step`` and ``step_psum``.
311
+
312
+ **A word of caution**
313
+
314
+ One can only hope for good results (i.e. convergence acceleration or
315
+ resummation) if the `s_n` have some well defind asymptotic behavior for
316
+ large `n` and are not erratic or random. Furthermore one usually needs very
317
+ high working precision because of the numerical cancellation. If the working
318
+ precision is insufficient, levin may produce silently numerical garbage.
319
+ Furthermore even if the Levin-transformation converges, in the general case
320
+ there is no proof that the result is mathematically sound. Only for very
321
+ special classes of problems one can prove that the Levin-transformation
322
+ converges to the expected result (for example Stieltjes-type integrals).
323
+ Furthermore the Levin-transform is quite expensive (i.e. slow) in comparison
324
+ to Shanks/Wynn-epsilon, Richardson & co.
325
+ In summary one can say that the Levin-transformation is powerful but
326
+ unreliable and that it may need a copious amount of working precision.
327
+
328
+ The Levin transform has several variants differing in the choice of weights.
329
+ Some variants are better suited for the possible flavours of convergence
330
+ behaviour of *A* than other variants:
331
+
332
+ .. code ::
333
+
334
+ convergence behaviour levin-u levin-t levin-v shanks/wynn-epsilon
335
+
336
+ logarithmic + - + -
337
+ linear + + + +
338
+ alternating divergent + + + +
339
+
340
+ "+" means the variant is suitable,"-" means the variant is not suitable;
341
+ for comparison the Shanks/Wynn-epsilon transform is listed, too.
342
+
343
+ The variant is controlled though the variant keyword (i.e. ``variant="u"``,
344
+ ``variant="t"`` or ``variant="v"``). Overall "u" is probably the best choice.
345
+
346
+ Finally it is possible to use the Sidi-S transform instead of the Levin transform
347
+ by using the keyword ``method='sidi'``. The Sidi-S transform works better than the
348
+ Levin transformation for some divergent series (see the examples).
349
+
350
+ Parameters:
351
+
352
+ .. code ::
353
+
354
+ method "levin" or "sidi" chooses either the Levin or the Sidi-S transformation
355
+ variant "u","t" or "v" chooses the weight variant.
356
+
357
+ The Levin transform is also accessible through the nsum interface.
358
+ ``method="l"`` or ``method="levin"`` select the normal Levin transform while
359
+ ``method="sidi"``
360
+ selects the Sidi-S transform. The variant is in both cases selected through the
361
+ levin_variant keyword. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise
362
+ it will miss the point where the Levin transform converges resulting in numerical
363
+ overflow/garbage. For highly divergent series a copious amount of working precision
364
+ must be chosen.
365
+
366
+ **Examples**
367
+
368
+ First we sum the zeta function::
369
+
370
+ >>> from mpmath import mp
371
+ >>> mp.prec = 53
372
+ >>> eps = mp.mpf(mp.eps)
373
+ >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
374
+ ... L = mp.levin(method = "levin", variant = "u")
375
+ ... S, s, n = [], 0, 1
376
+ ... while 1:
377
+ ... s += mp.one / (n * n)
378
+ ... n += 1
379
+ ... S.append(s)
380
+ ... v, e = L.update_psum(S)
381
+ ... if e < eps:
382
+ ... break
383
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
384
+ >>> print(mp.chop(v - mp.pi ** 2 / 6))
385
+ 0.0
386
+ >>> w = mp.nsum(lambda n: 1 / (n*n), [1, mp.inf], method = "levin", levin_variant = "u")
387
+ >>> print(mp.chop(v - w))
388
+ 0.0
389
+
390
+ Now we sum the zeta function outside its range of convergence
391
+ (attention: This does not work at the negative integers!)::
392
+
393
+ >>> eps = mp.mpf(mp.eps)
394
+ >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
395
+ ... L = mp.levin(method = "levin", variant = "v")
396
+ ... A, n = [], 1
397
+ ... while 1:
398
+ ... s = mp.mpf(n) ** (2 + 3j)
399
+ ... n += 1
400
+ ... A.append(s)
401
+ ... v, e = L.update(A)
402
+ ... if e < eps:
403
+ ... break
404
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
405
+ >>> print(mp.chop(v - mp.zeta(-2-3j)))
406
+ 0.0
407
+ >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
408
+ >>> print(mp.chop(v - w))
409
+ 0.0
410
+
411
+ Now we sum the divergent asymptotic expansion of an integral related to the
412
+ exponential integral (see also [2] p.373). The Sidi-S transform works best here::
413
+
414
+ >>> z = mp.mpf(10)
415
+ >>> exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
416
+ >>> # exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
417
+ >>> eps = mp.mpf(mp.eps)
418
+ >>> with mp.extraprec(2 * mp.prec): # high working precisions are mandatory for divergent resummation
419
+ ... L = mp.levin(method = "sidi", variant = "t")
420
+ ... n = 0
421
+ ... while 1:
422
+ ... s = (-1)**n * mp.fac(n) * z ** (-n)
423
+ ... v, e = L.step(s)
424
+ ... n += 1
425
+ ... if e < eps:
426
+ ... break
427
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
428
+ >>> print(mp.chop(v - exact))
429
+ 0.0
430
+ >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
431
+ >>> print(mp.chop(v - w))
432
+ 0.0
433
+
434
+ Another highly divergent integral is also summable::
435
+
436
+ >>> z = mp.mpf(2)
437
+ >>> eps = mp.mpf(mp.eps)
438
+ >>> exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
439
+ >>> # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
440
+ >>> with mp.extraprec(7 * mp.prec): # we need copious amount of precision to sum this highly divergent series
441
+ ... L = mp.levin(method = "levin", variant = "t")
442
+ ... n, s = 0, 0
443
+ ... while 1:
444
+ ... s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n))
445
+ ... n += 1
446
+ ... v, e = L.step_psum(s)
447
+ ... if e < eps:
448
+ ... break
449
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
450
+ >>> print(mp.chop(v - exact))
451
+ 0.0
452
+ >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
453
+ ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
454
+ >>> print(mp.chop(v - w))
455
+ 0.0
456
+
457
+ These examples run with 15-20 decimal digits precision. For higher precision the
458
+ working precision must be raised.
459
+
460
+ **Examples for nsum**
461
+
462
+ Here we calculate Euler's constant as the constant term in the Laurent
463
+ expansion of `\zeta(s)` at `s=1`. This sum converges extremly slowly because of
464
+ the logarithmic convergence behaviour of the Dirichlet series for zeta::
465
+
466
+ >>> mp.dps = 30
467
+ >>> z = mp.mpf(10) ** (-10)
468
+ >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z
469
+ >>> print(mp.chop(a - mp.euler, tol = 1e-10))
470
+ 0.0
471
+
472
+ The Sidi-S transform performs excellently for the alternating series of `\log(2)`::
473
+
474
+ >>> a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi")
475
+ >>> print(mp.chop(a - mp.log(2)))
476
+ 0.0
477
+
478
+ Hypergeometric series can also be summed outside their range of convergence.
479
+ The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise it will miss the
480
+ point where the Levin transform converges resulting in numerical overflow/garbage::
481
+
482
+ >>> z = 2 + 1j
483
+ >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
484
+ >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
485
+ >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
486
+ >>> print(mp.chop(exact-v))
487
+ 0.0
488
+
489
+ References:
490
+
491
+ [1] E.J. Weniger - "Nonlinear Sequence Transformations for the Acceleration of
492
+ Convergence and the Summation of Divergent Series" arXiv:math/0306302
493
+
494
+ [2] A. Sidi - "Pratical Extrapolation Methods"
495
+
496
+ [3] H.H.H. Homeier - "Scalar Levin-Type Sequence Transformations" arXiv:math/0005209
497
+
498
+ """
499
+
500
+ def __init__(self, method = "levin", variant = "u"):
501
+ self.variant = variant
502
+ self.n = 0
503
+ self.a0 = 0
504
+ self.theta = 1
505
+ self.A = []
506
+ self.B = []
507
+ self.last = 0
508
+ self.last_s = False
509
+
510
+ if method == "levin":
511
+ self.factor = self.factor_levin
512
+ elif method == "sidi":
513
+ self.factor = self.factor_sidi
514
+ else:
515
+ raise ValueError("levin: unknown method \"%s\"" % method)
516
+
517
+ def factor_levin(self, i):
518
+ # original levin
519
+ # [1] p.50,e.7.5-7 (with n-j replaced by i)
520
+ return (self.theta + i) * (self.theta + self.n - 1) ** (self.n - i - 2) / self.ctx.mpf(self.theta + self.n) ** (self.n - i - 1)
521
+
522
+ def factor_sidi(self, i):
523
+ # sidi analogon to levin (factorial series)
524
+ # [1] p.59,e.8.3-16 (with n-j replaced by i)
525
+ return (self.theta + self.n - 1) * (self.theta + self.n - 2) / self.ctx.mpf((self.theta + 2 * self.n - i - 2) * (self.theta + 2 * self.n - i - 3))
526
+
527
+ def run(self, s, a0, a1 = 0):
528
+ if self.variant=="t":
529
+ # levin t
530
+ w=a0
531
+ elif self.variant=="u":
532
+ # levin u
533
+ w=a0*(self.theta+self.n)
534
+ elif self.variant=="v":
535
+ # levin v
536
+ w=a0*a1/(a0-a1)
537
+ else:
538
+ assert False, "unknown variant"
539
+
540
+ if w==0:
541
+ raise ValueError("levin: zero weight")
542
+
543
+ self.A.append(s/w)
544
+ self.B.append(1/w)
545
+
546
+ for i in range(self.n-1,-1,-1):
547
+ if i==self.n-1:
548
+ f=1
549
+ else:
550
+ f=self.factor(i)
551
+
552
+ self.A[i]=self.A[i+1]-f*self.A[i]
553
+ self.B[i]=self.B[i+1]-f*self.B[i]
554
+
555
+ self.n+=1
556
+
557
+ ###########################################################################
558
+
559
+ def update_psum(self,S):
560
+ """
561
+ This routine applies the convergence acceleration to the list of partial sums.
562
+
563
+ A = sum(a_k, k = 0..infinity)
564
+ s_n = sum(a_k, k = 0..n)
565
+
566
+ v, e = ...update_psum([s_0, s_1,..., s_k])
567
+
568
+ output:
569
+ v current estimate of the series A
570
+ e an error estimate which is simply the difference between the current
571
+ estimate and the last estimate.
572
+ """
573
+
574
+ if self.variant!="v":
575
+ if self.n==0:
576
+ self.run(S[0],S[0])
577
+ while self.n<len(S):
578
+ self.run(S[self.n],S[self.n]-S[self.n-1])
579
+ else:
580
+ if len(S)==1:
581
+ self.last=0
582
+ return S[0],abs(S[0])
583
+
584
+ if self.n==0:
585
+ self.a1=S[1]-S[0]
586
+ self.run(S[0],S[0],self.a1)
587
+
588
+ while self.n<len(S)-1:
589
+ na1=S[self.n+1]-S[self.n]
590
+ self.run(S[self.n],self.a1,na1)
591
+ self.a1=na1
592
+
593
+ value=self.A[0]/self.B[0]
594
+ err=abs(value-self.last)
595
+ self.last=value
596
+
597
+ return value,err
598
+
599
+ def update(self,X):
600
+ """
601
+ This routine applies the convergence acceleration to the list of individual terms.
602
+
603
+ A = sum(a_k, k = 0..infinity)
604
+
605
+ v, e = ...update([a_0, a_1,..., a_k])
606
+
607
+ output:
608
+ v current estimate of the series A
609
+ e an error estimate which is simply the difference between the current
610
+ estimate and the last estimate.
611
+ """
612
+
613
+ if self.variant!="v":
614
+ if self.n==0:
615
+ self.s=X[0]
616
+ self.run(self.s,X[0])
617
+ while self.n<len(X):
618
+ self.s+=X[self.n]
619
+ self.run(self.s,X[self.n])
620
+ else:
621
+ if len(X)==1:
622
+ self.last=0
623
+ return X[0],abs(X[0])
624
+
625
+ if self.n==0:
626
+ self.s=X[0]
627
+ self.run(self.s,X[0],X[1])
628
+
629
+ while self.n<len(X)-1:
630
+ self.s+=X[self.n]
631
+ self.run(self.s,X[self.n],X[self.n+1])
632
+
633
+ value=self.A[0]/self.B[0]
634
+ err=abs(value-self.last)
635
+ self.last=value
636
+
637
+ return value,err
638
+
639
+ ###########################################################################
640
+
641
+ def step_psum(self,s):
642
+ """
643
+ This routine applies the convergence acceleration to the partial sums.
644
+
645
+ A = sum(a_k, k = 0..infinity)
646
+ s_n = sum(a_k, k = 0..n)
647
+
648
+ v, e = ...step_psum(s_k)
649
+
650
+ output:
651
+ v current estimate of the series A
652
+ e an error estimate which is simply the difference between the current
653
+ estimate and the last estimate.
654
+ """
655
+
656
+ if self.variant!="v":
657
+ if self.n==0:
658
+ self.last_s=s
659
+ self.run(s,s)
660
+ else:
661
+ self.run(s,s-self.last_s)
662
+ self.last_s=s
663
+ else:
664
+ if isinstance(self.last_s,bool):
665
+ self.last_s=s
666
+ self.last_w=s
667
+ self.last=0
668
+ return s,abs(s)
669
+
670
+ na1=s-self.last_s
671
+ self.run(self.last_s,self.last_w,na1)
672
+ self.last_w=na1
673
+ self.last_s=s
674
+
675
+ value=self.A[0]/self.B[0]
676
+ err=abs(value-self.last)
677
+ self.last=value
678
+
679
+ return value,err
680
+
681
+ def step(self,x):
682
+ """
683
+ This routine applies the convergence acceleration to the individual terms.
684
+
685
+ A = sum(a_k, k = 0..infinity)
686
+
687
+ v, e = ...step(a_k)
688
+
689
+ output:
690
+ v current estimate of the series A
691
+ e an error estimate which is simply the difference between the current
692
+ estimate and the last estimate.
693
+ """
694
+
695
+ if self.variant!="v":
696
+ if self.n==0:
697
+ self.s=x
698
+ self.run(self.s,x)
699
+ else:
700
+ self.s+=x
701
+ self.run(self.s,x)
702
+ else:
703
+ if isinstance(self.last_s,bool):
704
+ self.last_s=x
705
+ self.s=0
706
+ self.last=0
707
+ return x,abs(x)
708
+
709
+ self.s+=self.last_s
710
+ self.run(self.s,self.last_s,x)
711
+ self.last_s=x
712
+
713
+ value=self.A[0]/self.B[0]
714
+ err=abs(value-self.last)
715
+ self.last=value
716
+
717
+ return value,err
718
+
719
+ def levin(ctx, method = "levin", variant = "u"):
720
+ L = levin_class(method = method, variant = variant)
721
+ L.ctx = ctx
722
+ return L
723
+
724
+ levin.__doc__ = levin_class.__doc__
725
+ defun(levin)
726
+
727
+
728
+ class cohen_alt_class:
729
+ # cohen_alt: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
730
+ r"""
731
+ This interface implements the convergence acceleration of alternating series
732
+ as described in H. Cohen, F.R. Villegas, D. Zagier - "Convergence Acceleration
733
+ of Alternating Series". This series transformation works only well if the
734
+ individual terms of the series have an alternating sign. It belongs to the
735
+ class of linear series transformations (in contrast to the Shanks/Wynn-epsilon
736
+ or Levin transform). This series transformation is also able to sum some types
737
+ of divergent series. See the paper under which conditions this resummation is
738
+ mathematical sound.
739
+
740
+ Let *A* be the series we want to sum:
741
+
742
+ .. math ::
743
+
744
+ A = \sum_{k=0}^{\infty} a_k
745
+
746
+ Let `s_n` be the partial sums of this series:
747
+
748
+ .. math ::
749
+
750
+ s_n = \sum_{k=0}^n a_k.
751
+
752
+
753
+ **Interface**
754
+
755
+ Calling ``cohen_alt`` returns an object with the following methods.
756
+
757
+ Then ``update(...)`` works with the list of individual terms `a_k` and
758
+ ``update_psum(...)`` works with the list of partial sums `s_k`:
759
+
760
+ .. code ::
761
+
762
+ v, e = ...update([a_0, a_1,..., a_k])
763
+ v, e = ...update_psum([s_0, s_1,..., s_k])
764
+
765
+ *v* is the current estimate for *A*, and *e* is an error estimate which is
766
+ simply the difference between the current estimate and the last estimate.
767
+
768
+ **Examples**
769
+
770
+ Here we compute the alternating zeta function using ``update_psum``::
771
+
772
+ >>> from mpmath import mp
773
+ >>> AC = mp.cohen_alt()
774
+ >>> S, s, n = [], 0, 1
775
+ >>> while 1:
776
+ ... s += -((-1) ** n) * mp.one / (n * n)
777
+ ... n += 1
778
+ ... S.append(s)
779
+ ... v, e = AC.update_psum(S)
780
+ ... if e < mp.eps:
781
+ ... break
782
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
783
+ >>> print(mp.chop(v - mp.pi ** 2 / 12))
784
+ 0.0
785
+
786
+ Here we compute the product `\prod_{n=1}^{\infty} \Gamma(1+1/(2n-1)) / \Gamma(1+1/(2n))`::
787
+
788
+ >>> A = []
789
+ >>> AC = mp.cohen_alt()
790
+ >>> n = 1
791
+ >>> while 1:
792
+ ... A.append( mp.loggamma(1 + mp.one / (2 * n - 1)))
793
+ ... A.append(-mp.loggamma(1 + mp.one / (2 * n)))
794
+ ... n += 1
795
+ ... v, e = AC.update(A)
796
+ ... if e < mp.eps:
797
+ ... break
798
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
799
+ >>> v = mp.exp(v)
800
+ >>> print(mp.chop(v - 1.06215090557106, tol = 1e-12))
801
+ 0.0
802
+
803
+ ``cohen_alt`` is also accessible through the :func:`~mpmath.nsum` interface::
804
+
805
+ >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
806
+ >>> print(mp.chop(v - mp.log(2)))
807
+ 0.0
808
+ >>> v = mp.nsum(lambda n: (-1)**n / (2 * n + 1), [0, mp.inf], method = "a")
809
+ >>> print(mp.chop(v - mp.pi / 4))
810
+ 0.0
811
+ >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
812
+ >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
813
+ 0.0
814
+
815
+ """
816
+
817
+ def __init__(self):
818
+ self.last=0
819
+
820
+ def update(self, A):
821
+ """
822
+ This routine applies the convergence acceleration to the list of individual terms.
823
+
824
+ A = sum(a_k, k = 0..infinity)
825
+
826
+ v, e = ...update([a_0, a_1,..., a_k])
827
+
828
+ output:
829
+ v current estimate of the series A
830
+ e an error estimate which is simply the difference between the current
831
+ estimate and the last estimate.
832
+ """
833
+
834
+ n = len(A)
835
+ d = (3 + self.ctx.sqrt(8)) ** n
836
+ d = (d + 1 / d) / 2
837
+ b = -self.ctx.one
838
+ c = -d
839
+ s = 0
840
+
841
+ for k in xrange(n):
842
+ c = b - c
843
+ if k % 2 == 0:
844
+ s = s + c * A[k]
845
+ else:
846
+ s = s - c * A[k]
847
+ b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
848
+
849
+ value = s / d
850
+
851
+ err = abs(value - self.last)
852
+ self.last = value
853
+
854
+ return value, err
855
+
856
+ def update_psum(self, S):
857
+ """
858
+ This routine applies the convergence acceleration to the list of partial sums.
859
+
860
+ A = sum(a_k, k = 0..infinity)
861
+ s_n = sum(a_k ,k = 0..n)
862
+
863
+ v, e = ...update_psum([s_0, s_1,..., s_k])
864
+
865
+ output:
866
+ v current estimate of the series A
867
+ e an error estimate which is simply the difference between the current
868
+ estimate and the last estimate.
869
+ """
870
+
871
+ n = len(S)
872
+ d = (3 + self.ctx.sqrt(8)) ** n
873
+ d = (d + 1 / d) / 2
874
+ b = self.ctx.one
875
+ s = 0
876
+
877
+ for k in xrange(n):
878
+ b = 2 * (n + k) * (n - k) * b / ((2 * k + 1) * (k + self.ctx.one))
879
+ s += b * S[k]
880
+
881
+ value = s / d
882
+
883
+ err = abs(value - self.last)
884
+ self.last = value
885
+
886
+ return value, err
887
+
888
+ def cohen_alt(ctx):
889
+ L = cohen_alt_class()
890
+ L.ctx = ctx
891
+ return L
892
+
893
+ cohen_alt.__doc__ = cohen_alt_class.__doc__
894
+ defun(cohen_alt)
895
+
896
+
897
+ @defun
898
+ def sumap(ctx, f, interval, integral=None, error=False):
899
+ r"""
900
+ Evaluates an infinite series of an analytic summand *f* using the
901
+ Abel-Plana formula
902
+
903
+ .. math ::
904
+
905
+ \sum_{k=0}^{\infty} f(k) = \int_0^{\infty} f(t) dt + \frac{1}{2} f(0) +
906
+ i \int_0^{\infty} \frac{f(it)-f(-it)}{e^{2\pi t}-1} dt.
907
+
908
+ Unlike the Euler-Maclaurin formula (see :func:`~mpmath.sumem`),
909
+ the Abel-Plana formula does not require derivatives. However,
910
+ it only works when `|f(it)-f(-it)|` does not
911
+ increase too rapidly with `t`.
912
+
913
+ **Examples**
914
+
915
+ The Abel-Plana formula is particularly useful when the summand
916
+ decreases like a power of `k`; for example when the sum is a pure
917
+ zeta function::
918
+
919
+ >>> from mpmath import *
920
+ >>> mp.dps = 25; mp.pretty = True
921
+ >>> sumap(lambda k: 1/k**2.5, [1,inf])
922
+ 1.34148725725091717975677
923
+ >>> zeta(2.5)
924
+ 1.34148725725091717975677
925
+ >>> sumap(lambda k: 1/(k+1j)**(2.5+2.5j), [1,inf])
926
+ (-3.385361068546473342286084 - 0.7432082105196321803869551j)
927
+ >>> zeta(2.5+2.5j, 1+1j)
928
+ (-3.385361068546473342286084 - 0.7432082105196321803869551j)
929
+
930
+ If the series is alternating, numerical quadrature along the real
931
+ line is likely to give poor results, so it is better to evaluate
932
+ the first term symbolically whenever possible:
933
+
934
+ >>> n=3; z=-0.75
935
+ >>> I = expint(n,-log(z))
936
+ >>> chop(sumap(lambda k: z**k / k**n, [1,inf], integral=I))
937
+ -0.6917036036904594510141448
938
+ >>> polylog(n,z)
939
+ -0.6917036036904594510141448
940
+
941
+ """
942
+ prec = ctx.prec
943
+ try:
944
+ ctx.prec += 10
945
+ a, b = interval
946
+ if b != ctx.inf:
947
+ raise ValueError("b should be equal to ctx.inf")
948
+ g = lambda x: f(x+a)
949
+ if integral is None:
950
+ i1, err1 = ctx.quad(g, [0,ctx.inf], error=True)
951
+ else:
952
+ i1, err1 = integral, 0
953
+ j = ctx.j
954
+ p = ctx.pi * 2
955
+ if ctx._is_real_type(i1):
956
+ h = lambda t: -2 * ctx.im(g(j*t)) / ctx.expm1(p*t)
957
+ else:
958
+ h = lambda t: j*(g(j*t)-g(-j*t)) / ctx.expm1(p*t)
959
+ i2, err2 = ctx.quad(h, [0,ctx.inf], error=True)
960
+ err = err1+err2
961
+ v = i1+i2+0.5*g(ctx.mpf(0))
962
+ finally:
963
+ ctx.prec = prec
964
+ if error:
965
+ return +v, err
966
+ return +v
967
+
968
+
969
+ @defun
970
+ def sumem(ctx, f, interval, tol=None, reject=10, integral=None,
971
+ adiffs=None, bdiffs=None, verbose=False, error=False,
972
+ _fast_abort=False):
973
+ r"""
974
+ Uses the Euler-Maclaurin formula to compute an approximation accurate
975
+ to within ``tol`` (which defaults to the present epsilon) of the sum
976
+
977
+ .. math ::
978
+
979
+ S = \sum_{k=a}^b f(k)
980
+
981
+ where `(a,b)` are given by ``interval`` and `a` or `b` may be
982
+ infinite. The approximation is
983
+
984
+ .. math ::
985
+
986
+ S \sim \int_a^b f(x) \,dx + \frac{f(a)+f(b)}{2} +
987
+ \sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!}
988
+ \left(f^{(2k-1)}(b)-f^{(2k-1)}(a)\right).
989
+
990
+ The last sum in the Euler-Maclaurin formula is not generally
991
+ convergent (a notable exception is if `f` is a polynomial, in
992
+ which case Euler-Maclaurin actually gives an exact result).
993
+
994
+ The summation is stopped as soon as the quotient between two
995
+ consecutive terms falls below *reject*. That is, by default
996
+ (*reject* = 10), the summation is continued as long as each
997
+ term adds at least one decimal.
998
+
999
+ Although not convergent, convergence to a given tolerance can
1000
+ often be "forced" if `b = \infty` by summing up to `a+N` and then
1001
+ applying the Euler-Maclaurin formula to the sum over the range
1002
+ `(a+N+1, \ldots, \infty)`. This procedure is implemented by
1003
+ :func:`~mpmath.nsum`.
1004
+
1005
+ By default numerical quadrature and differentiation is used.
1006
+ If the symbolic values of the integral and endpoint derivatives
1007
+ are known, it is more efficient to pass the value of the
1008
+ integral explicitly as ``integral`` and the derivatives
1009
+ explicitly as ``adiffs`` and ``bdiffs``. The derivatives
1010
+ should be given as iterables that yield
1011
+ `f(a), f'(a), f''(a), \ldots` (and the equivalent for `b`).
1012
+
1013
+ **Examples**
1014
+
1015
+ Summation of an infinite series, with automatic and symbolic
1016
+ integral and derivative values (the second should be much faster)::
1017
+
1018
+ >>> from mpmath import *
1019
+ >>> mp.dps = 50; mp.pretty = True
1020
+ >>> sumem(lambda n: 1/n**2, [32, inf])
1021
+ 0.03174336652030209012658168043874142714132886413417
1022
+ >>> I = mpf(1)/32
1023
+ >>> D = adiffs=((-1)**n*fac(n+1)*32**(-2-n) for n in range(999))
1024
+ >>> sumem(lambda n: 1/n**2, [32, inf], integral=I, adiffs=D)
1025
+ 0.03174336652030209012658168043874142714132886413417
1026
+
1027
+ An exact evaluation of a finite polynomial sum::
1028
+
1029
+ >>> sumem(lambda n: n**5-12*n**2+3*n, [-100000, 200000])
1030
+ 10500155000624963999742499550000.0
1031
+ >>> print(sum(n**5-12*n**2+3*n for n in range(-100000, 200001)))
1032
+ 10500155000624963999742499550000
1033
+
1034
+ """
1035
+ tol = tol or +ctx.eps
1036
+ interval = ctx._as_points(interval)
1037
+ a = ctx.convert(interval[0])
1038
+ b = ctx.convert(interval[-1])
1039
+ err = ctx.zero
1040
+ prev = 0
1041
+ M = 10000
1042
+ if a == ctx.ninf: adiffs = (0 for n in xrange(M))
1043
+ else: adiffs = adiffs or ctx.diffs(f, a)
1044
+ if b == ctx.inf: bdiffs = (0 for n in xrange(M))
1045
+ else: bdiffs = bdiffs or ctx.diffs(f, b)
1046
+ orig = ctx.prec
1047
+ #verbose = 1
1048
+ try:
1049
+ ctx.prec += 10
1050
+ s = ctx.zero
1051
+ for k, (da, db) in enumerate(izip(adiffs, bdiffs)):
1052
+ if k & 1:
1053
+ term = (db-da) * ctx.bernoulli(k+1) / ctx.factorial(k+1)
1054
+ mag = abs(term)
1055
+ if verbose:
1056
+ print("term", k, "magnitude =", ctx.nstr(mag))
1057
+ if k > 4 and mag < tol:
1058
+ s += term
1059
+ break
1060
+ elif k > 4 and abs(prev) / mag < reject:
1061
+ err += mag
1062
+ if _fast_abort:
1063
+ return [s, (s, err)][error]
1064
+ if verbose:
1065
+ print("Failed to converge")
1066
+ break
1067
+ else:
1068
+ s += term
1069
+ prev = term
1070
+ # Endpoint correction
1071
+ if a != ctx.ninf: s += f(a)/2
1072
+ if b != ctx.inf: s += f(b)/2
1073
+ # Tail integral
1074
+ if verbose:
1075
+ print("Integrating f(x) from x = %s to %s" % (ctx.nstr(a), ctx.nstr(b)))
1076
+ if integral:
1077
+ s += integral
1078
+ else:
1079
+ integral, ierr = ctx.quad(f, interval, error=True)
1080
+ if verbose:
1081
+ print("Integration error:", ierr)
1082
+ s += integral
1083
+ err += ierr
1084
+ finally:
1085
+ ctx.prec = orig
1086
+ if error:
1087
+ return s, err
1088
+ else:
1089
+ return s
1090
+
1091
+ @defun
1092
+ def adaptive_extrapolation(ctx, update, emfun, kwargs):
1093
+ option = kwargs.get
1094
+ if ctx._fixed_precision:
1095
+ tol = option('tol', ctx.eps*2**10)
1096
+ else:
1097
+ tol = option('tol', ctx.eps/2**10)
1098
+ verbose = option('verbose', False)
1099
+ maxterms = option('maxterms', ctx.dps*10)
1100
+ method = set(option('method', 'r+s').split('+'))
1101
+ skip = option('skip', 0)
1102
+ steps = iter(option('steps', xrange(10, 10**9, 10)))
1103
+ strict = option('strict')
1104
+ #steps = (10 for i in xrange(1000))
1105
+ summer=[]
1106
+ if 'd' in method or 'direct' in method:
1107
+ TRY_RICHARDSON = TRY_SHANKS = TRY_EULER_MACLAURIN = False
1108
+ else:
1109
+ TRY_RICHARDSON = ('r' in method) or ('richardson' in method)
1110
+ TRY_SHANKS = ('s' in method) or ('shanks' in method)
1111
+ TRY_EULER_MACLAURIN = ('e' in method) or \
1112
+ ('euler-maclaurin' in method)
1113
+
1114
+ def init_levin(m):
1115
+ variant = kwargs.get("levin_variant", "u")
1116
+ if isinstance(variant, str):
1117
+ if variant == "all":
1118
+ variant = ["u", "v", "t"]
1119
+ else:
1120
+ variant = [variant]
1121
+ for s in variant:
1122
+ L = levin_class(method = m, variant = s)
1123
+ L.ctx = ctx
1124
+ L.name = m + "(" + s + ")"
1125
+ summer.append(L)
1126
+
1127
+ if ('l' in method) or ('levin' in method):
1128
+ init_levin("levin")
1129
+
1130
+ if ('sidi' in method):
1131
+ init_levin("sidi")
1132
+
1133
+ if ('a' in method) or ('alternating' in method):
1134
+ L = cohen_alt_class()
1135
+ L.ctx = ctx
1136
+ L.name = "alternating"
1137
+ summer.append(L)
1138
+
1139
+ last_richardson_value = 0
1140
+ shanks_table = []
1141
+ index = 0
1142
+ step = 10
1143
+ partial = []
1144
+ best = ctx.zero
1145
+ orig = ctx.prec
1146
+ try:
1147
+ if 'workprec' in kwargs:
1148
+ ctx.prec = kwargs['workprec']
1149
+ elif TRY_RICHARDSON or TRY_SHANKS or len(summer)!=0:
1150
+ ctx.prec = (ctx.prec+10) * 4
1151
+ else:
1152
+ ctx.prec += 30
1153
+ while 1:
1154
+ if index >= maxterms:
1155
+ break
1156
+
1157
+ # Get new batch of terms
1158
+ try:
1159
+ step = next(steps)
1160
+ except StopIteration:
1161
+ pass
1162
+ if verbose:
1163
+ print("-"*70)
1164
+ print("Adding terms #%i-#%i" % (index, index+step))
1165
+ update(partial, xrange(index, index+step))
1166
+ index += step
1167
+
1168
+ # Check direct error
1169
+ best = partial[-1]
1170
+ error = abs(best - partial[-2])
1171
+ if verbose:
1172
+ print("Direct error: %s" % ctx.nstr(error))
1173
+ if error <= tol:
1174
+ return best
1175
+
1176
+ # Check each extrapolation method
1177
+ if TRY_RICHARDSON:
1178
+ value, maxc = ctx.richardson(partial)
1179
+ # Convergence
1180
+ richardson_error = abs(value - last_richardson_value)
1181
+ if verbose:
1182
+ print("Richardson error: %s" % ctx.nstr(richardson_error))
1183
+ # Convergence
1184
+ if richardson_error <= tol:
1185
+ return value
1186
+ last_richardson_value = value
1187
+ # Unreliable due to cancellation
1188
+ if ctx.eps*maxc > tol:
1189
+ if verbose:
1190
+ print("Ran out of precision for Richardson")
1191
+ TRY_RICHARDSON = False
1192
+ if richardson_error < error:
1193
+ error = richardson_error
1194
+ best = value
1195
+ if TRY_SHANKS:
1196
+ shanks_table = ctx.shanks(partial, shanks_table, randomized=True)
1197
+ row = shanks_table[-1]
1198
+ if len(row) == 2:
1199
+ est1 = row[-1]
1200
+ shanks_error = 0
1201
+ else:
1202
+ est1, maxc, est2 = row[-1], abs(row[-2]), row[-3]
1203
+ shanks_error = abs(est1-est2)
1204
+ if verbose:
1205
+ print("Shanks error: %s" % ctx.nstr(shanks_error))
1206
+ if shanks_error <= tol:
1207
+ return est1
1208
+ if ctx.eps*maxc > tol:
1209
+ if verbose:
1210
+ print("Ran out of precision for Shanks")
1211
+ TRY_SHANKS = False
1212
+ if shanks_error < error:
1213
+ error = shanks_error
1214
+ best = est1
1215
+ for L in summer:
1216
+ est, lerror = L.update_psum(partial)
1217
+ if verbose:
1218
+ print("%s error: %s" % (L.name, ctx.nstr(lerror)))
1219
+ if lerror <= tol:
1220
+ return est
1221
+ if lerror < error:
1222
+ error = lerror
1223
+ best = est
1224
+ if TRY_EULER_MACLAURIN:
1225
+ if ctx.almosteq(ctx.mpc(ctx.sign(partial[-1]) / ctx.sign(partial[-2])), -1):
1226
+ if verbose:
1227
+ print ("NOT using Euler-Maclaurin: the series appears"
1228
+ " to be alternating, so numerical\n quadrature"
1229
+ " will most likely fail")
1230
+ TRY_EULER_MACLAURIN = False
1231
+ else:
1232
+ value, em_error = emfun(index, tol)
1233
+ value += partial[-1]
1234
+ if verbose:
1235
+ print("Euler-Maclaurin error: %s" % ctx.nstr(em_error))
1236
+ if em_error <= tol:
1237
+ return value
1238
+ if em_error < error:
1239
+ best = value
1240
+ finally:
1241
+ ctx.prec = orig
1242
+ if strict:
1243
+ raise ctx.NoConvergence
1244
+ if verbose:
1245
+ print("Warning: failed to converge to target accuracy")
1246
+ return best
1247
+
1248
+ @defun
1249
+ def nsum(ctx, f, *intervals, **options):
1250
+ r"""
1251
+ Computes the sum
1252
+
1253
+ .. math :: S = \sum_{k=a}^b f(k)
1254
+
1255
+ where `(a, b)` = *interval*, and where `a = -\infty` and/or
1256
+ `b = \infty` are allowed, or more generally
1257
+
1258
+ .. math :: S = \sum_{k_1=a_1}^{b_1} \cdots
1259
+ \sum_{k_n=a_n}^{b_n} f(k_1,\ldots,k_n)
1260
+
1261
+ if multiple intervals are given.
1262
+
1263
+ Two examples of infinite series that can be summed by :func:`~mpmath.nsum`,
1264
+ where the first converges rapidly and the second converges slowly,
1265
+ are::
1266
+
1267
+ >>> from mpmath import *
1268
+ >>> mp.dps = 15; mp.pretty = True
1269
+ >>> nsum(lambda n: 1/fac(n), [0, inf])
1270
+ 2.71828182845905
1271
+ >>> nsum(lambda n: 1/n**2, [1, inf])
1272
+ 1.64493406684823
1273
+
1274
+ When appropriate, :func:`~mpmath.nsum` applies convergence acceleration to
1275
+ accurately estimate the sums of slowly convergent series. If the series is
1276
+ finite, :func:`~mpmath.nsum` currently does not attempt to perform any
1277
+ extrapolation, and simply calls :func:`~mpmath.fsum`.
1278
+
1279
+ Multidimensional infinite series are reduced to a single-dimensional
1280
+ series over expanding hypercubes; if both infinite and finite dimensions
1281
+ are present, the finite ranges are moved innermost. For more advanced
1282
+ control over the summation order, use nested calls to :func:`~mpmath.nsum`,
1283
+ or manually rewrite the sum as a single-dimensional series.
1284
+
1285
+ **Options**
1286
+
1287
+ *tol*
1288
+ Desired maximum final error. Defaults roughly to the
1289
+ epsilon of the working precision.
1290
+
1291
+ *method*
1292
+ Which summation algorithm to use (described below).
1293
+ Default: ``'richardson+shanks'``.
1294
+
1295
+ *maxterms*
1296
+ Cancel after at most this many terms. Default: 10*dps.
1297
+
1298
+ *steps*
1299
+ An iterable giving the number of terms to add between
1300
+ each extrapolation attempt. The default sequence is
1301
+ [10, 20, 30, 40, ...]. For example, if you know that
1302
+ approximately 100 terms will be required, efficiency might be
1303
+ improved by setting this to [100, 10]. Then the first
1304
+ extrapolation will be performed after 100 terms, the second
1305
+ after 110, etc.
1306
+
1307
+ *verbose*
1308
+ Print details about progress.
1309
+
1310
+ *ignore*
1311
+ If enabled, any term that raises ``ArithmeticError``
1312
+ or ``ValueError`` (e.g. through division by zero) is replaced
1313
+ by a zero. This is convenient for lattice sums with
1314
+ a singular term near the origin.
1315
+
1316
+ **Methods**
1317
+
1318
+ Unfortunately, an algorithm that can efficiently sum any infinite
1319
+ series does not exist. :func:`~mpmath.nsum` implements several different
1320
+ algorithms that each work well in different cases. The *method*
1321
+ keyword argument selects a method.
1322
+
1323
+ The default method is ``'r+s'``, i.e. both Richardson extrapolation
1324
+ and Shanks transformation is attempted. A slower method that
1325
+ handles more cases is ``'r+s+e'``. For very high precision
1326
+ summation, or if the summation needs to be fast (for example if
1327
+ multiple sums need to be evaluated), it is a good idea to
1328
+ investigate which one method works best and only use that.
1329
+
1330
+ ``'richardson'`` / ``'r'``:
1331
+ Uses Richardson extrapolation. Provides useful extrapolation
1332
+ when `f(k) \sim P(k)/Q(k)` or when `f(k) \sim (-1)^k P(k)/Q(k)`
1333
+ for polynomials `P` and `Q`. See :func:`~mpmath.richardson` for
1334
+ additional information.
1335
+
1336
+ ``'shanks'`` / ``'s'``:
1337
+ Uses Shanks transformation. Typically provides useful
1338
+ extrapolation when `f(k) \sim c^k` or when successive terms
1339
+ alternate signs. Is able to sum some divergent series.
1340
+ See :func:`~mpmath.shanks` for additional information.
1341
+
1342
+ ``'levin'`` / ``'l'``:
1343
+ Uses the Levin transformation. It performs better than the Shanks
1344
+ transformation for logarithmic convergent or alternating divergent
1345
+ series. The ``'levin_variant'``-keyword selects the variant. Valid
1346
+ choices are "u", "t", "v" and "all" whereby "all" uses all three
1347
+ u,t and v simultanously (This is good for performance comparison in
1348
+ conjunction with "verbose=True"). Instead of the Levin transform one can
1349
+ also use the Sidi-S transform by selecting the method ``'sidi'``.
1350
+ See :func:`~mpmath.levin` for additional details.
1351
+
1352
+ ``'alternating'`` / ``'a'``:
1353
+ This is the convergence acceleration of alternating series developped
1354
+ by Cohen, Villegras and Zagier.
1355
+ See :func:`~mpmath.cohen_alt` for additional details.
1356
+
1357
+ ``'euler-maclaurin'`` / ``'e'``:
1358
+ Uses the Euler-Maclaurin summation formula to approximate
1359
+ the remainder sum by an integral. This requires high-order
1360
+ numerical derivatives and numerical integration. The advantage
1361
+ of this algorithm is that it works regardless of the
1362
+ decay rate of `f`, as long as `f` is sufficiently smooth.
1363
+ See :func:`~mpmath.sumem` for additional information.
1364
+
1365
+ ``'direct'`` / ``'d'``:
1366
+ Does not perform any extrapolation. This can be used
1367
+ (and should only be used for) rapidly convergent series.
1368
+ The summation automatically stops when the terms
1369
+ decrease below the target tolerance.
1370
+
1371
+ **Basic examples**
1372
+
1373
+ A finite sum::
1374
+
1375
+ >>> nsum(lambda k: 1/k, [1, 6])
1376
+ 2.45
1377
+
1378
+ Summation of a series going to negative infinity and a doubly
1379
+ infinite series::
1380
+
1381
+ >>> nsum(lambda k: 1/k**2, [-inf, -1])
1382
+ 1.64493406684823
1383
+ >>> nsum(lambda k: 1/(1+k**2), [-inf, inf])
1384
+ 3.15334809493716
1385
+
1386
+ :func:`~mpmath.nsum` handles sums of complex numbers::
1387
+
1388
+ >>> nsum(lambda k: (0.5+0.25j)**k, [0, inf])
1389
+ (1.6 + 0.8j)
1390
+
1391
+ The following sum converges very rapidly, so it is most
1392
+ efficient to sum it by disabling convergence acceleration::
1393
+
1394
+ >>> mp.dps = 1000
1395
+ >>> a = nsum(lambda k: -(-1)**k * k**2 / fac(2*k), [1, inf],
1396
+ ... method='direct')
1397
+ >>> b = (cos(1)+sin(1))/4
1398
+ >>> abs(a-b) < mpf('1e-998')
1399
+ True
1400
+
1401
+ **Examples with Richardson extrapolation**
1402
+
1403
+ Richardson extrapolation works well for sums over rational
1404
+ functions, as well as their alternating counterparts::
1405
+
1406
+ >>> mp.dps = 50
1407
+ >>> nsum(lambda k: 1 / k**3, [1, inf],
1408
+ ... method='richardson')
1409
+ 1.2020569031595942853997381615114499907649862923405
1410
+ >>> zeta(3)
1411
+ 1.2020569031595942853997381615114499907649862923405
1412
+
1413
+ >>> nsum(lambda n: (n + 3)/(n**3 + n**2), [1, inf],
1414
+ ... method='richardson')
1415
+ 2.9348022005446793094172454999380755676568497036204
1416
+ >>> pi**2/2-2
1417
+ 2.9348022005446793094172454999380755676568497036204
1418
+
1419
+ >>> nsum(lambda k: (-1)**k / k**3, [1, inf],
1420
+ ... method='richardson')
1421
+ -0.90154267736969571404980362113358749307373971925537
1422
+ >>> -3*zeta(3)/4
1423
+ -0.90154267736969571404980362113358749307373971925538
1424
+
1425
+ **Examples with Shanks transformation**
1426
+
1427
+ The Shanks transformation works well for geometric series
1428
+ and typically provides excellent acceleration for Taylor
1429
+ series near the border of their disk of convergence.
1430
+ Here we apply it to a series for `\log(2)`, which can be
1431
+ seen as the Taylor series for `\log(1+x)` with `x = 1`::
1432
+
1433
+ >>> nsum(lambda k: -(-1)**k/k, [1, inf],
1434
+ ... method='shanks')
1435
+ 0.69314718055994530941723212145817656807550013436025
1436
+ >>> log(2)
1437
+ 0.69314718055994530941723212145817656807550013436025
1438
+
1439
+ Here we apply it to a slowly convergent geometric series::
1440
+
1441
+ >>> nsum(lambda k: mpf('0.995')**k, [0, inf],
1442
+ ... method='shanks')
1443
+ 200.0
1444
+
1445
+ Finally, Shanks' method works very well for alternating series
1446
+ where `f(k) = (-1)^k g(k)`, and often does so regardless of
1447
+ the exact decay rate of `g(k)`::
1448
+
1449
+ >>> mp.dps = 15
1450
+ >>> nsum(lambda k: (-1)**(k+1) / k**1.5, [1, inf],
1451
+ ... method='shanks')
1452
+ 0.765147024625408
1453
+ >>> (2-sqrt(2))*zeta(1.5)/2
1454
+ 0.765147024625408
1455
+
1456
+ The following slowly convergent alternating series has no known
1457
+ closed-form value. Evaluating the sum a second time at higher
1458
+ precision indicates that the value is probably correct::
1459
+
1460
+ >>> nsum(lambda k: (-1)**k / log(k), [2, inf],
1461
+ ... method='shanks')
1462
+ 0.924299897222939
1463
+ >>> mp.dps = 30
1464
+ >>> nsum(lambda k: (-1)**k / log(k), [2, inf],
1465
+ ... method='shanks')
1466
+ 0.92429989722293885595957018136
1467
+
1468
+ **Examples with Levin transformation**
1469
+
1470
+ The following example calculates Euler's constant as the constant term in
1471
+ the Laurent expansion of zeta(s) at s=1. This sum converges extremly slow
1472
+ because of the logarithmic convergence behaviour of the Dirichlet series
1473
+ for zeta.
1474
+
1475
+ >>> mp.dps = 30
1476
+ >>> z = mp.mpf(10) ** (-10)
1477
+ >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "levin") - 1 / z
1478
+ >>> print(mp.chop(a - mp.euler, tol = 1e-10))
1479
+ 0.0
1480
+
1481
+ Now we sum the zeta function outside its range of convergence
1482
+ (attention: This does not work at the negative integers!):
1483
+
1484
+ >>> mp.dps = 15
1485
+ >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
1486
+ >>> print(mp.chop(w - mp.zeta(-2-3j)))
1487
+ 0.0
1488
+
1489
+ The next example resummates an asymptotic series expansion of an integral
1490
+ related to the exponential integral.
1491
+
1492
+ >>> mp.dps = 15
1493
+ >>> z = mp.mpf(10)
1494
+ >>> # exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
1495
+ >>> exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
1496
+ >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
1497
+ >>> print(mp.chop(w - exact))
1498
+ 0.0
1499
+
1500
+ Following highly divergent asymptotic expansion needs some care. Firstly we
1501
+ need copious amount of working precision. Secondly the stepsize must not be
1502
+ chosen to large, otherwise nsum may miss the point where the Levin transform
1503
+ converges and reach the point where only numerical garbage is produced due to
1504
+ numerical cancellation.
1505
+
1506
+ >>> mp.dps = 15
1507
+ >>> z = mp.mpf(2)
1508
+ >>> # exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
1509
+ >>> exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
1510
+ >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
1511
+ ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
1512
+ >>> print(mp.chop(w - exact))
1513
+ 0.0
1514
+
1515
+ The hypergeoemtric function can also be summed outside its range of convergence:
1516
+
1517
+ >>> mp.dps = 15
1518
+ >>> z = 2 + 1j
1519
+ >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
1520
+ >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
1521
+ >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
1522
+ >>> print(mp.chop(exact-v))
1523
+ 0.0
1524
+
1525
+ **Examples with Cohen's alternating series resummation**
1526
+
1527
+ The next example sums the alternating zeta function:
1528
+
1529
+ >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
1530
+ >>> print(mp.chop(v - mp.log(2)))
1531
+ 0.0
1532
+
1533
+ The derivate of the alternating zeta function outside its range of
1534
+ convergence:
1535
+
1536
+ >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
1537
+ >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
1538
+ 0.0
1539
+
1540
+ **Examples with Euler-Maclaurin summation**
1541
+
1542
+ The sum in the following example has the wrong rate of convergence
1543
+ for either Richardson or Shanks to be effective.
1544
+
1545
+ >>> f = lambda k: log(k)/k**2.5
1546
+ >>> mp.dps = 15
1547
+ >>> nsum(f, [1, inf], method='euler-maclaurin')
1548
+ 0.38734195032621
1549
+ >>> -diff(zeta, 2.5)
1550
+ 0.38734195032621
1551
+
1552
+ Increasing ``steps`` improves speed at higher precision::
1553
+
1554
+ >>> mp.dps = 50
1555
+ >>> nsum(f, [1, inf], method='euler-maclaurin', steps=[250])
1556
+ 0.38734195032620997271199237593105101319948228874688
1557
+ >>> -diff(zeta, 2.5)
1558
+ 0.38734195032620997271199237593105101319948228874688
1559
+
1560
+ **Divergent series**
1561
+
1562
+ The Shanks transformation is able to sum some *divergent*
1563
+ series. In particular, it is often able to sum Taylor series
1564
+ beyond their radius of convergence (this is due to a relation
1565
+ between the Shanks transformation and Pade approximations;
1566
+ see :func:`~mpmath.pade` for an alternative way to evaluate divergent
1567
+ Taylor series). Furthermore the Levin-transform examples above
1568
+ contain some divergent series resummation.
1569
+
1570
+ Here we apply it to `\log(1+x)` far outside the region of
1571
+ convergence::
1572
+
1573
+ >>> mp.dps = 50
1574
+ >>> nsum(lambda k: -(-9)**k/k, [1, inf],
1575
+ ... method='shanks')
1576
+ 2.3025850929940456840179914546843642076011014886288
1577
+ >>> log(10)
1578
+ 2.3025850929940456840179914546843642076011014886288
1579
+
1580
+ A particular type of divergent series that can be summed
1581
+ using the Shanks transformation is geometric series.
1582
+ The result is the same as using the closed-form formula
1583
+ for an infinite geometric series::
1584
+
1585
+ >>> mp.dps = 15
1586
+ >>> for n in range(-8, 8):
1587
+ ... if n == 1:
1588
+ ... continue
1589
+ ... print("%s %s %s" % (mpf(n), mpf(1)/(1-n),
1590
+ ... nsum(lambda k: n**k, [0, inf], method='shanks')))
1591
+ ...
1592
+ -8.0 0.111111111111111 0.111111111111111
1593
+ -7.0 0.125 0.125
1594
+ -6.0 0.142857142857143 0.142857142857143
1595
+ -5.0 0.166666666666667 0.166666666666667
1596
+ -4.0 0.2 0.2
1597
+ -3.0 0.25 0.25
1598
+ -2.0 0.333333333333333 0.333333333333333
1599
+ -1.0 0.5 0.5
1600
+ 0.0 1.0 1.0
1601
+ 2.0 -1.0 -1.0
1602
+ 3.0 -0.5 -0.5
1603
+ 4.0 -0.333333333333333 -0.333333333333333
1604
+ 5.0 -0.25 -0.25
1605
+ 6.0 -0.2 -0.2
1606
+ 7.0 -0.166666666666667 -0.166666666666667
1607
+
1608
+ **Multidimensional sums**
1609
+
1610
+ Any combination of finite and infinite ranges is allowed for the
1611
+ summation indices::
1612
+
1613
+ >>> mp.dps = 15
1614
+ >>> nsum(lambda x,y: x+y, [2,3], [4,5])
1615
+ 28.0
1616
+ >>> nsum(lambda x,y: x/2**y, [1,3], [1,inf])
1617
+ 6.0
1618
+ >>> nsum(lambda x,y: y/2**x, [1,inf], [1,3])
1619
+ 6.0
1620
+ >>> nsum(lambda x,y,z: z/(2**x*2**y), [1,inf], [1,inf], [3,4])
1621
+ 7.0
1622
+ >>> nsum(lambda x,y,z: y/(2**x*2**z), [1,inf], [3,4], [1,inf])
1623
+ 7.0
1624
+ >>> nsum(lambda x,y,z: x/(2**z*2**y), [3,4], [1,inf], [1,inf])
1625
+ 7.0
1626
+
1627
+ Some nice examples of double series with analytic solutions or
1628
+ reductions to single-dimensional series (see [1])::
1629
+
1630
+ >>> nsum(lambda m, n: 1/2**(m*n), [1,inf], [1,inf])
1631
+ 1.60669515241529
1632
+ >>> nsum(lambda n: 1/(2**n-1), [1,inf])
1633
+ 1.60669515241529
1634
+
1635
+ >>> nsum(lambda i,j: (-1)**(i+j)/(i**2+j**2), [1,inf], [1,inf])
1636
+ 0.278070510848213
1637
+ >>> pi*(pi-3*ln2)/12
1638
+ 0.278070510848213
1639
+
1640
+ >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**2, [1,inf], [1,inf])
1641
+ 0.129319852864168
1642
+ >>> altzeta(2) - altzeta(1)
1643
+ 0.129319852864168
1644
+
1645
+ >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**3, [1,inf], [1,inf])
1646
+ 0.0790756439455825
1647
+ >>> altzeta(3) - altzeta(2)
1648
+ 0.0790756439455825
1649
+
1650
+ >>> nsum(lambda m,n: m**2*n/(3**m*(n*3**m+m*3**n)),
1651
+ ... [1,inf], [1,inf])
1652
+ 0.28125
1653
+ >>> mpf(9)/32
1654
+ 0.28125
1655
+
1656
+ >>> nsum(lambda i,j: fac(i-1)*fac(j-1)/fac(i+j),
1657
+ ... [1,inf], [1,inf], workprec=400)
1658
+ 1.64493406684823
1659
+ >>> zeta(2)
1660
+ 1.64493406684823
1661
+
1662
+ A hard example of a multidimensional sum is the Madelung constant
1663
+ in three dimensions (see [2]). The defining sum converges very
1664
+ slowly and only conditionally, so :func:`~mpmath.nsum` is lucky to
1665
+ obtain an accurate value through convergence acceleration. The
1666
+ second evaluation below uses a much more efficient, rapidly
1667
+ convergent 2D sum::
1668
+
1669
+ >>> nsum(lambda x,y,z: (-1)**(x+y+z)/(x*x+y*y+z*z)**0.5,
1670
+ ... [-inf,inf], [-inf,inf], [-inf,inf], ignore=True)
1671
+ -1.74756459463318
1672
+ >>> nsum(lambda x,y: -12*pi*sech(0.5*pi * \
1673
+ ... sqrt((2*x+1)**2+(2*y+1)**2))**2, [0,inf], [0,inf])
1674
+ -1.74756459463318
1675
+
1676
+ Another example of a lattice sum in 2D::
1677
+
1678
+ >>> nsum(lambda x,y: (-1)**(x+y) / (x**2+y**2), [-inf,inf],
1679
+ ... [-inf,inf], ignore=True)
1680
+ -2.1775860903036
1681
+ >>> -pi*ln2
1682
+ -2.1775860903036
1683
+
1684
+ An example of an Eisenstein series::
1685
+
1686
+ >>> nsum(lambda m,n: (m+n*1j)**(-4), [-inf,inf], [-inf,inf],
1687
+ ... ignore=True)
1688
+ (3.1512120021539 + 0.0j)
1689
+
1690
+ **References**
1691
+
1692
+ 1. [Weisstein]_ http://mathworld.wolfram.com/DoubleSeries.html,
1693
+ 2. [Weisstein]_ http://mathworld.wolfram.com/MadelungConstants.html
1694
+
1695
+ """
1696
+ infinite, g = standardize(ctx, f, intervals, options)
1697
+ if not infinite:
1698
+ return +g()
1699
+
1700
+ def update(partial_sums, indices):
1701
+ if partial_sums:
1702
+ psum = partial_sums[-1]
1703
+ else:
1704
+ psum = ctx.zero
1705
+ for k in indices:
1706
+ psum = psum + g(ctx.mpf(k))
1707
+ partial_sums.append(psum)
1708
+
1709
+ prec = ctx.prec
1710
+
1711
+ def emfun(point, tol):
1712
+ workprec = ctx.prec
1713
+ ctx.prec = prec + 10
1714
+ v = ctx.sumem(g, [point, ctx.inf], tol, error=1)
1715
+ ctx.prec = workprec
1716
+ return v
1717
+
1718
+ return +ctx.adaptive_extrapolation(update, emfun, options)
1719
+
1720
+
1721
+ def wrapsafe(f):
1722
+ def g(*args):
1723
+ try:
1724
+ return f(*args)
1725
+ except (ArithmeticError, ValueError):
1726
+ return 0
1727
+ return g
1728
+
1729
+ def standardize(ctx, f, intervals, options):
1730
+ if options.get("ignore"):
1731
+ f = wrapsafe(f)
1732
+ finite = []
1733
+ infinite = []
1734
+ for k, points in enumerate(intervals):
1735
+ a, b = ctx._as_points(points)
1736
+ if b < a:
1737
+ return False, (lambda: ctx.zero)
1738
+ if a == ctx.ninf or b == ctx.inf:
1739
+ infinite.append((k, (a,b)))
1740
+ else:
1741
+ finite.append((k, (int(a), int(b))))
1742
+ if finite:
1743
+ f = fold_finite(ctx, f, finite)
1744
+ if not infinite:
1745
+ return False, lambda: f(*([0]*len(intervals)))
1746
+ if infinite:
1747
+ f = standardize_infinite(ctx, f, infinite)
1748
+ f = fold_infinite(ctx, f, infinite)
1749
+ args = [0] * len(intervals)
1750
+ d = infinite[0][0]
1751
+ def g(k):
1752
+ args[d] = k
1753
+ return f(*args)
1754
+ return True, g
1755
+
1756
+ # backwards compatible itertools.product
1757
+ def cartesian_product(args):
1758
+ pools = map(tuple, args)
1759
+ result = [[]]
1760
+ for pool in pools:
1761
+ result = [x+[y] for x in result for y in pool]
1762
+ for prod in result:
1763
+ yield tuple(prod)
1764
+
1765
+ def fold_finite(ctx, f, intervals):
1766
+ if not intervals:
1767
+ return f
1768
+ indices = [v[0] for v in intervals]
1769
+ points = [v[1] for v in intervals]
1770
+ ranges = [xrange(a, b+1) for (a,b) in points]
1771
+ def g(*args):
1772
+ args = list(args)
1773
+ s = ctx.zero
1774
+ for xs in cartesian_product(ranges):
1775
+ for dim, x in zip(indices, xs):
1776
+ args[dim] = ctx.mpf(x)
1777
+ s += f(*args)
1778
+ return s
1779
+ #print "Folded finite", indices
1780
+ return g
1781
+
1782
+ # Standardize each interval to [0,inf]
1783
+ def standardize_infinite(ctx, f, intervals):
1784
+ if not intervals:
1785
+ return f
1786
+ dim, [a,b] = intervals[-1]
1787
+ if a == ctx.ninf:
1788
+ if b == ctx.inf:
1789
+ def g(*args):
1790
+ args = list(args)
1791
+ k = args[dim]
1792
+ if k:
1793
+ s = f(*args)
1794
+ args[dim] = -k
1795
+ s += f(*args)
1796
+ return s
1797
+ else:
1798
+ return f(*args)
1799
+ else:
1800
+ def g(*args):
1801
+ args = list(args)
1802
+ args[dim] = b - args[dim]
1803
+ return f(*args)
1804
+ else:
1805
+ def g(*args):
1806
+ args = list(args)
1807
+ args[dim] += a
1808
+ return f(*args)
1809
+ #print "Standardized infinity along dimension", dim, a, b
1810
+ return standardize_infinite(ctx, g, intervals[:-1])
1811
+
1812
+ def fold_infinite(ctx, f, intervals):
1813
+ if len(intervals) < 2:
1814
+ return f
1815
+ dim1 = intervals[-2][0]
1816
+ dim2 = intervals[-1][0]
1817
+ # Assume intervals are [0,inf] x [0,inf] x ...
1818
+ def g(*args):
1819
+ args = list(args)
1820
+ #args.insert(dim2, None)
1821
+ n = int(args[dim1])
1822
+ s = ctx.zero
1823
+ #y = ctx.mpf(n)
1824
+ args[dim2] = ctx.mpf(n) #y
1825
+ for x in xrange(n+1):
1826
+ args[dim1] = ctx.mpf(x)
1827
+ s += f(*args)
1828
+ args[dim1] = ctx.mpf(n) #ctx.mpf(n)
1829
+ for y in xrange(n):
1830
+ args[dim2] = ctx.mpf(y)
1831
+ s += f(*args)
1832
+ return s
1833
+ #print "Folded infinite from", len(intervals), "to", (len(intervals)-1)
1834
+ return fold_infinite(ctx, g, intervals[:-1])
1835
+
1836
+ @defun
1837
+ def nprod(ctx, f, interval, nsum=False, **kwargs):
1838
+ r"""
1839
+ Computes the product
1840
+
1841
+ .. math ::
1842
+
1843
+ P = \prod_{k=a}^b f(k)
1844
+
1845
+ where `(a, b)` = *interval*, and where `a = -\infty` and/or
1846
+ `b = \infty` are allowed.
1847
+
1848
+ By default, :func:`~mpmath.nprod` uses the same extrapolation methods as
1849
+ :func:`~mpmath.nsum`, except applied to the partial products rather than
1850
+ partial sums, and the same keyword options as for :func:`~mpmath.nsum` are
1851
+ supported. If ``nsum=True``, the product is instead computed via
1852
+ :func:`~mpmath.nsum` as
1853
+
1854
+ .. math ::
1855
+
1856
+ P = \exp\left( \sum_{k=a}^b \log(f(k)) \right).
1857
+
1858
+ This is slower, but can sometimes yield better results. It is
1859
+ also required (and used automatically) when Euler-Maclaurin
1860
+ summation is requested.
1861
+
1862
+ **Examples**
1863
+
1864
+ A simple finite product::
1865
+
1866
+ >>> from mpmath import *
1867
+ >>> mp.dps = 25; mp.pretty = True
1868
+ >>> nprod(lambda k: k, [1, 4])
1869
+ 24.0
1870
+
1871
+ A large number of infinite products have known exact values,
1872
+ and can therefore be used as a reference. Most of the following
1873
+ examples are taken from MathWorld [1].
1874
+
1875
+ A few infinite products with simple values are::
1876
+
1877
+ >>> 2*nprod(lambda k: (4*k**2)/(4*k**2-1), [1, inf])
1878
+ 3.141592653589793238462643
1879
+ >>> nprod(lambda k: (1+1/k)**2/(1+2/k), [1, inf])
1880
+ 2.0
1881
+ >>> nprod(lambda k: (k**3-1)/(k**3+1), [2, inf])
1882
+ 0.6666666666666666666666667
1883
+ >>> nprod(lambda k: (1-1/k**2), [2, inf])
1884
+ 0.5
1885
+
1886
+ Next, several more infinite products with more complicated
1887
+ values::
1888
+
1889
+ >>> nprod(lambda k: exp(1/k**2), [1, inf]); exp(pi**2/6)
1890
+ 5.180668317897115748416626
1891
+ 5.180668317897115748416626
1892
+
1893
+ >>> nprod(lambda k: (k**2-1)/(k**2+1), [2, inf]); pi*csch(pi)
1894
+ 0.2720290549821331629502366
1895
+ 0.2720290549821331629502366
1896
+
1897
+ >>> nprod(lambda k: (k**4-1)/(k**4+1), [2, inf])
1898
+ 0.8480540493529003921296502
1899
+ >>> pi*sinh(pi)/(cosh(sqrt(2)*pi)-cos(sqrt(2)*pi))
1900
+ 0.8480540493529003921296502
1901
+
1902
+ >>> nprod(lambda k: (1+1/k+1/k**2)**2/(1+2/k+3/k**2), [1, inf])
1903
+ 1.848936182858244485224927
1904
+ >>> 3*sqrt(2)*cosh(pi*sqrt(3)/2)**2*csch(pi*sqrt(2))/pi
1905
+ 1.848936182858244485224927
1906
+
1907
+ >>> nprod(lambda k: (1-1/k**4), [2, inf]); sinh(pi)/(4*pi)
1908
+ 0.9190194775937444301739244
1909
+ 0.9190194775937444301739244
1910
+
1911
+ >>> nprod(lambda k: (1-1/k**6), [2, inf])
1912
+ 0.9826842777421925183244759
1913
+ >>> (1+cosh(pi*sqrt(3)))/(12*pi**2)
1914
+ 0.9826842777421925183244759
1915
+
1916
+ >>> nprod(lambda k: (1+1/k**2), [2, inf]); sinh(pi)/(2*pi)
1917
+ 1.838038955187488860347849
1918
+ 1.838038955187488860347849
1919
+
1920
+ >>> nprod(lambda n: (1+1/n)**n * exp(1/(2*n)-1), [1, inf])
1921
+ 1.447255926890365298959138
1922
+ >>> exp(1+euler/2)/sqrt(2*pi)
1923
+ 1.447255926890365298959138
1924
+
1925
+ The following two products are equivalent and can be evaluated in
1926
+ terms of a Jacobi theta function. Pi can be replaced by any value
1927
+ (as long as convergence is preserved)::
1928
+
1929
+ >>> nprod(lambda k: (1-pi**-k)/(1+pi**-k), [1, inf])
1930
+ 0.3838451207481672404778686
1931
+ >>> nprod(lambda k: tanh(k*log(pi)/2), [1, inf])
1932
+ 0.3838451207481672404778686
1933
+ >>> jtheta(4,0,1/pi)
1934
+ 0.3838451207481672404778686
1935
+
1936
+ This product does not have a known closed form value::
1937
+
1938
+ >>> nprod(lambda k: (1-1/2**k), [1, inf])
1939
+ 0.2887880950866024212788997
1940
+
1941
+ A product taken from `-\infty`::
1942
+
1943
+ >>> nprod(lambda k: 1-k**(-3), [-inf,-2])
1944
+ 0.8093965973662901095786805
1945
+ >>> cosh(pi*sqrt(3)/2)/(3*pi)
1946
+ 0.8093965973662901095786805
1947
+
1948
+ A doubly infinite product::
1949
+
1950
+ >>> nprod(lambda k: exp(1/(1+k**2)), [-inf, inf])
1951
+ 23.41432688231864337420035
1952
+ >>> exp(pi/tanh(pi))
1953
+ 23.41432688231864337420035
1954
+
1955
+ A product requiring the use of Euler-Maclaurin summation to compute
1956
+ an accurate value::
1957
+
1958
+ >>> nprod(lambda k: (1-1/k**2.5), [2, inf], method='e')
1959
+ 0.696155111336231052898125
1960
+
1961
+ **References**
1962
+
1963
+ 1. [Weisstein]_ http://mathworld.wolfram.com/InfiniteProduct.html
1964
+
1965
+ """
1966
+ if nsum or ('e' in kwargs.get('method', '')):
1967
+ orig = ctx.prec
1968
+ try:
1969
+ # TODO: we are evaluating log(1+eps) -> eps, which is
1970
+ # inaccurate. This currently works because nsum greatly
1971
+ # increases the working precision. But we should be
1972
+ # more intelligent and handle the precision here.
1973
+ ctx.prec += 10
1974
+ v = ctx.nsum(lambda n: ctx.ln(f(n)), interval, **kwargs)
1975
+ finally:
1976
+ ctx.prec = orig
1977
+ return +ctx.exp(v)
1978
+
1979
+ a, b = ctx._as_points(interval)
1980
+ if a == ctx.ninf:
1981
+ if b == ctx.inf:
1982
+ return f(0) * ctx.nprod(lambda k: f(-k) * f(k), [1, ctx.inf], **kwargs)
1983
+ return ctx.nprod(f, [-b, ctx.inf], **kwargs)
1984
+ elif b != ctx.inf:
1985
+ return ctx.fprod(f(ctx.mpf(k)) for k in xrange(int(a), int(b)+1))
1986
+
1987
+ a = int(a)
1988
+
1989
+ def update(partial_products, indices):
1990
+ if partial_products:
1991
+ pprod = partial_products[-1]
1992
+ else:
1993
+ pprod = ctx.one
1994
+ for k in indices:
1995
+ pprod = pprod * f(a + ctx.mpf(k))
1996
+ partial_products.append(pprod)
1997
+
1998
+ return +ctx.adaptive_extrapolation(update, None, kwargs)
1999
+
2000
+
2001
+ @defun
2002
+ def limit(ctx, f, x, direction=1, exp=False, **kwargs):
2003
+ r"""
2004
+ Computes an estimate of the limit
2005
+
2006
+ .. math ::
2007
+
2008
+ \lim_{t \to x} f(t)
2009
+
2010
+ where `x` may be finite or infinite.
2011
+
2012
+ For finite `x`, :func:`~mpmath.limit` evaluates `f(x + d/n)` for
2013
+ consecutive integer values of `n`, where the approach direction
2014
+ `d` may be specified using the *direction* keyword argument.
2015
+ For infinite `x`, :func:`~mpmath.limit` evaluates values of
2016
+ `f(\mathrm{sign}(x) \cdot n)`.
2017
+
2018
+ If the approach to the limit is not sufficiently fast to give
2019
+ an accurate estimate directly, :func:`~mpmath.limit` attempts to find
2020
+ the limit using Richardson extrapolation or the Shanks
2021
+ transformation. You can select between these methods using
2022
+ the *method* keyword (see documentation of :func:`~mpmath.nsum` for
2023
+ more information).
2024
+
2025
+ **Options**
2026
+
2027
+ The following options are available with essentially the
2028
+ same meaning as for :func:`~mpmath.nsum`: *tol*, *method*, *maxterms*,
2029
+ *steps*, *verbose*.
2030
+
2031
+ If the option *exp=True* is set, `f` will be
2032
+ sampled at exponentially spaced points `n = 2^1, 2^2, 2^3, \ldots`
2033
+ instead of the linearly spaced points `n = 1, 2, 3, \ldots`.
2034
+ This can sometimes improve the rate of convergence so that
2035
+ :func:`~mpmath.limit` may return a more accurate answer (and faster).
2036
+ However, do note that this can only be used if `f`
2037
+ supports fast and accurate evaluation for arguments that
2038
+ are extremely close to the limit point (or if infinite,
2039
+ very large arguments).
2040
+
2041
+ **Examples**
2042
+
2043
+ A basic evaluation of a removable singularity::
2044
+
2045
+ >>> from mpmath import *
2046
+ >>> mp.dps = 30; mp.pretty = True
2047
+ >>> limit(lambda x: (x-sin(x))/x**3, 0)
2048
+ 0.166666666666666666666666666667
2049
+
2050
+ Computing the exponential function using its limit definition::
2051
+
2052
+ >>> limit(lambda n: (1+3/n)**n, inf)
2053
+ 20.0855369231876677409285296546
2054
+ >>> exp(3)
2055
+ 20.0855369231876677409285296546
2056
+
2057
+ A limit for `\pi`::
2058
+
2059
+ >>> f = lambda n: 2**(4*n+1)*fac(n)**4/(2*n+1)/fac(2*n)**2
2060
+ >>> limit(f, inf)
2061
+ 3.14159265358979323846264338328
2062
+
2063
+ Calculating the coefficient in Stirling's formula::
2064
+
2065
+ >>> limit(lambda n: fac(n) / (sqrt(n)*(n/e)**n), inf)
2066
+ 2.50662827463100050241576528481
2067
+ >>> sqrt(2*pi)
2068
+ 2.50662827463100050241576528481
2069
+
2070
+ Evaluating Euler's constant `\gamma` using the limit representation
2071
+
2072
+ .. math ::
2073
+
2074
+ \gamma = \lim_{n \rightarrow \infty } \left[ \left(
2075
+ \sum_{k=1}^n \frac{1}{k} \right) - \log(n) \right]
2076
+
2077
+ (which converges notoriously slowly)::
2078
+
2079
+ >>> f = lambda n: sum([mpf(1)/k for k in range(1,int(n)+1)]) - log(n)
2080
+ >>> limit(f, inf)
2081
+ 0.577215664901532860606512090082
2082
+ >>> +euler
2083
+ 0.577215664901532860606512090082
2084
+
2085
+ With default settings, the following limit converges too slowly
2086
+ to be evaluated accurately. Changing to exponential sampling
2087
+ however gives a perfect result::
2088
+
2089
+ >>> f = lambda x: sqrt(x**3+x**2)/(sqrt(x**3)+x)
2090
+ >>> limit(f, inf)
2091
+ 0.992831158558330281129249686491
2092
+ >>> limit(f, inf, exp=True)
2093
+ 1.0
2094
+
2095
+ """
2096
+
2097
+ if ctx.isinf(x):
2098
+ direction = ctx.sign(x)
2099
+ g = lambda k: f(ctx.mpf(k+1)*direction)
2100
+ else:
2101
+ direction *= ctx.one
2102
+ g = lambda k: f(x + direction/(k+1))
2103
+ if exp:
2104
+ h = g
2105
+ g = lambda k: h(2**k)
2106
+
2107
+ def update(values, indices):
2108
+ for k in indices:
2109
+ values.append(g(k+1))
2110
+
2111
+ # XXX: steps used by nsum don't work well
2112
+ if not 'steps' in kwargs:
2113
+ kwargs['steps'] = [10]
2114
+
2115
+ return +ctx.adaptive_extrapolation(update, None, kwargs)
lib/python3.11/site-packages/mpmath/calculus/inverselaplace.py ADDED
@@ -0,0 +1,973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # contributed to mpmath by Kristopher L. Kuhlman, February 2017
2
+ # contributed to mpmath by Guillermo Navas-Palencia, February 2022
3
+
4
+ class InverseLaplaceTransform(object):
5
+ r"""
6
+ Inverse Laplace transform methods are implemented using this
7
+ class, in order to simplify the code and provide a common
8
+ infrastructure.
9
+
10
+ Implement a custom inverse Laplace transform algorithm by
11
+ subclassing :class:`InverseLaplaceTransform` and implementing the
12
+ appropriate methods. The subclass can then be used by
13
+ :func:`~mpmath.invertlaplace` by passing it as the *method*
14
+ argument.
15
+ """
16
+
17
+ def __init__(self, ctx):
18
+ self.ctx = ctx
19
+
20
+ def calc_laplace_parameter(self, t, **kwargs):
21
+ r"""
22
+ Determine the vector of Laplace parameter values needed for an
23
+ algorithm, this will depend on the choice of algorithm (de
24
+ Hoog is default), the algorithm-specific parameters passed (or
25
+ default ones), and desired time.
26
+ """
27
+ raise NotImplementedError
28
+
29
+ def calc_time_domain_solution(self, fp):
30
+ r"""
31
+ Compute the time domain solution, after computing the
32
+ Laplace-space function evaluations at the abscissa required
33
+ for the algorithm. Abscissa computed for one algorithm are
34
+ typically not useful for another algorithm.
35
+ """
36
+ raise NotImplementedError
37
+
38
+
39
+ class FixedTalbot(InverseLaplaceTransform):
40
+
41
+ def calc_laplace_parameter(self, t, **kwargs):
42
+ r"""The "fixed" Talbot method deforms the Bromwich contour towards
43
+ `-\infty` in the shape of a parabola. Traditionally the Talbot
44
+ algorithm has adjustable parameters, but the "fixed" version
45
+ does not. The `r` parameter could be passed in as a parameter,
46
+ if you want to override the default given by (Abate & Valko,
47
+ 2004).
48
+
49
+ The Laplace parameter is sampled along a parabola opening
50
+ along the negative imaginary axis, with the base of the
51
+ parabola along the real axis at
52
+ `p=\frac{r}{t_\mathrm{max}}`. As the number of terms used in
53
+ the approximation (degree) grows, the abscissa required for
54
+ function evaluation tend towards `-\infty`, requiring high
55
+ precision to prevent overflow. If any poles, branch cuts or
56
+ other singularities exist such that the deformed Bromwich
57
+ contour lies to the left of the singularity, the method will
58
+ fail.
59
+
60
+ **Optional arguments**
61
+
62
+ :class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
63
+ recognizes the following keywords
64
+
65
+ *tmax*
66
+ maximum time associated with vector of times
67
+ (typically just the time requested)
68
+ *degree*
69
+ integer order of approximation (M = number of terms)
70
+ *r*
71
+ abscissa for `p_0` (otherwise computed using rule
72
+ of thumb `2M/5`)
73
+
74
+ The working precision will be increased according to a rule of
75
+ thumb. If 'degree' is not specified, the working precision and
76
+ degree are chosen to hopefully achieve the dps of the calling
77
+ context. If 'degree' is specified, the working precision is
78
+ chosen to achieve maximum resulting precision for the
79
+ specified degree.
80
+
81
+ .. math ::
82
+
83
+ p_0=\frac{r}{t}
84
+
85
+ .. math ::
86
+
87
+ p_i=\frac{i r \pi}{Mt_\mathrm{max}}\left[\cot\left(
88
+ \frac{i\pi}{M}\right) + j \right] \qquad 1\le i <M
89
+
90
+ where `j=\sqrt{-1}`, `r=2M/5`, and `t_\mathrm{max}` is the
91
+ maximum specified time.
92
+
93
+ """
94
+
95
+ # required
96
+ # ------------------------------
97
+ # time of desired approximation
98
+ self.t = self.ctx.convert(t)
99
+
100
+ # optional
101
+ # ------------------------------
102
+ # maximum time desired (used for scaling) default is requested
103
+ # time.
104
+ self.tmax = self.ctx.convert(kwargs.get('tmax', self.t))
105
+
106
+ # empirical relationships used here based on a linear fit of
107
+ # requested and delivered dps for exponentially decaying time
108
+ # functions for requested dps up to 512.
109
+
110
+ if 'degree' in kwargs:
111
+ self.degree = kwargs['degree']
112
+ self.dps_goal = self.degree
113
+ else:
114
+ self.dps_goal = int(1.72*self.ctx.dps)
115
+ self.degree = max(12, int(1.38*self.dps_goal))
116
+
117
+ M = self.degree
118
+
119
+ # this is adjusting the dps of the calling context hopefully
120
+ # the caller doesn't monkey around with it between calling
121
+ # this routine and calc_time_domain_solution()
122
+ self.dps_orig = self.ctx.dps
123
+ self.ctx.dps = self.dps_goal
124
+
125
+ # Abate & Valko rule of thumb for r parameter
126
+ self.r = kwargs.get('r', self.ctx.fraction(2, 5)*M)
127
+
128
+ self.theta = self.ctx.linspace(0.0, self.ctx.pi, M+1)
129
+
130
+ self.cot_theta = self.ctx.matrix(M, 1)
131
+ self.cot_theta[0] = 0 # not used
132
+
133
+ # all but time-dependent part of p
134
+ self.delta = self.ctx.matrix(M, 1)
135
+ self.delta[0] = self.r
136
+
137
+ for i in range(1, M):
138
+ self.cot_theta[i] = self.ctx.cot(self.theta[i])
139
+ self.delta[i] = self.r*self.theta[i]*(self.cot_theta[i] + 1j)
140
+
141
+ self.p = self.ctx.matrix(M, 1)
142
+ self.p = self.delta/self.tmax
143
+
144
+ # NB: p is complex (mpc)
145
+
146
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
147
+ r"""The fixed Talbot time-domain solution is computed from the
148
+ Laplace-space function evaluations using
149
+
150
+ .. math ::
151
+
152
+ f(t,M)=\frac{2}{5t}\sum_{k=0}^{M-1}\Re \left[
153
+ \gamma_k \bar{f}(p_k)\right]
154
+
155
+ where
156
+
157
+ .. math ::
158
+
159
+ \gamma_0 = \frac{1}{2}e^{r}\bar{f}(p_0)
160
+
161
+ .. math ::
162
+
163
+ \gamma_k = e^{tp_k}\left\lbrace 1 + \frac{jk\pi}{M}\left[1 +
164
+ \cot \left( \frac{k \pi}{M} \right)^2 \right] - j\cot\left(
165
+ \frac{k \pi}{M}\right)\right \rbrace \qquad 1\le k<M.
166
+
167
+ Again, `j=\sqrt{-1}`.
168
+
169
+ Before calling this function, call
170
+ :class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
171
+ to set the parameters and compute the required coefficients.
172
+
173
+ **References**
174
+
175
+ 1. Abate, J., P. Valko (2004). Multi-precision Laplace
176
+ transform inversion. *International Journal for Numerical
177
+ Methods in Engineering* 60:979-993,
178
+ http://dx.doi.org/10.1002/nme.995
179
+ 2. Talbot, A. (1979). The accurate numerical inversion of
180
+ Laplace transforms. *IMA Journal of Applied Mathematics*
181
+ 23(1):97, http://dx.doi.org/10.1093/imamat/23.1.97
182
+ """
183
+
184
+ # required
185
+ # ------------------------------
186
+ self.t = self.ctx.convert(t)
187
+
188
+ # assume fp was computed from p matrix returned from
189
+ # calc_laplace_parameter(), so is already a list or matrix of
190
+ # mpmath 'mpc' types
191
+
192
+ # these were computed in previous call to
193
+ # calc_laplace_parameter()
194
+ theta = self.theta
195
+ delta = self.delta
196
+ M = self.degree
197
+ p = self.p
198
+ r = self.r
199
+
200
+ ans = self.ctx.matrix(M, 1)
201
+ ans[0] = self.ctx.exp(delta[0])*fp[0]/2
202
+
203
+ for i in range(1, M):
204
+ ans[i] = self.ctx.exp(delta[i])*fp[i]*(
205
+ 1 + 1j*theta[i]*(1 + self.cot_theta[i]**2) -
206
+ 1j*self.cot_theta[i])
207
+
208
+ result = self.ctx.fraction(2, 5)*self.ctx.fsum(ans)/self.t
209
+
210
+ # setting dps back to value when calc_laplace_parameter was
211
+ # called, unless flag is set.
212
+ if not manual_prec:
213
+ self.ctx.dps = self.dps_orig
214
+
215
+ return result.real
216
+
217
+
218
+ # ****************************************
219
+
220
+ class Stehfest(InverseLaplaceTransform):
221
+
222
+ def calc_laplace_parameter(self, t, **kwargs):
223
+ r"""
224
+ The Gaver-Stehfest method is a discrete approximation of the
225
+ Widder-Post inversion algorithm, rather than a direct
226
+ approximation of the Bromwich contour integral.
227
+
228
+ The method abscissa along the real axis, and therefore has
229
+ issues inverting oscillatory functions (which have poles in
230
+ pairs away from the real axis).
231
+
232
+ The working precision will be increased according to a rule of
233
+ thumb. If 'degree' is not specified, the working precision and
234
+ degree are chosen to hopefully achieve the dps of the calling
235
+ context. If 'degree' is specified, the working precision is
236
+ chosen to achieve maximum resulting precision for the
237
+ specified degree.
238
+
239
+ .. math ::
240
+
241
+ p_k = \frac{k \log 2}{t} \qquad 1 \le k \le M
242
+ """
243
+
244
+ # required
245
+ # ------------------------------
246
+ # time of desired approximation
247
+ self.t = self.ctx.convert(t)
248
+
249
+ # optional
250
+ # ------------------------------
251
+
252
+ # empirical relationships used here based on a linear fit of
253
+ # requested and delivered dps for exponentially decaying time
254
+ # functions for requested dps up to 512.
255
+
256
+ if 'degree' in kwargs:
257
+ self.degree = kwargs['degree']
258
+ self.dps_goal = int(1.38*self.degree)
259
+ else:
260
+ self.dps_goal = int(2.93*self.ctx.dps)
261
+ self.degree = max(16, self.dps_goal)
262
+
263
+ # _coeff routine requires even degree
264
+ if self.degree % 2 > 0:
265
+ self.degree += 1
266
+
267
+ M = self.degree
268
+
269
+ # this is adjusting the dps of the calling context
270
+ # hopefully the caller doesn't monkey around with it
271
+ # between calling this routine and calc_time_domain_solution()
272
+ self.dps_orig = self.ctx.dps
273
+ self.ctx.dps = self.dps_goal
274
+
275
+ self.V = self._coeff()
276
+ self.p = self.ctx.matrix(self.ctx.arange(1, M+1))*self.ctx.ln2/self.t
277
+
278
+ # NB: p is real (mpf)
279
+
280
+ def _coeff(self):
281
+ r"""Salzer summation weights (aka, "Stehfest coefficients")
282
+ only depend on the approximation order (M) and the precision"""
283
+
284
+ M = self.degree
285
+ M2 = int(M/2) # checked earlier that M is even
286
+
287
+ V = self.ctx.matrix(M, 1)
288
+
289
+ # Salzer summation weights
290
+ # get very large in magnitude and oscillate in sign,
291
+ # if the precision is not high enough, there will be
292
+ # catastrophic cancellation
293
+ for k in range(1, M+1):
294
+ z = self.ctx.matrix(min(k, M2)+1, 1)
295
+ for j in range(int((k+1)/2), min(k, M2)+1):
296
+ z[j] = (self.ctx.power(j, M2)*self.ctx.fac(2*j)/
297
+ (self.ctx.fac(M2-j)*self.ctx.fac(j)*
298
+ self.ctx.fac(j-1)*self.ctx.fac(k-j)*
299
+ self.ctx.fac(2*j-k)))
300
+ V[k-1] = self.ctx.power(-1, k+M2)*self.ctx.fsum(z)
301
+
302
+ return V
303
+
304
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
305
+ r"""Compute time-domain Stehfest algorithm solution.
306
+
307
+ .. math ::
308
+
309
+ f(t,M) = \frac{\log 2}{t} \sum_{k=1}^{M} V_k \bar{f}\left(
310
+ p_k \right)
311
+
312
+ where
313
+
314
+ .. math ::
315
+
316
+ V_k = (-1)^{k + N/2} \sum^{\min(k,N/2)}_{i=\lfloor(k+1)/2 \rfloor}
317
+ \frac{i^{\frac{N}{2}}(2i)!}{\left(\frac{N}{2}-i \right)! \, i! \,
318
+ \left(i-1 \right)! \, \left(k-i\right)! \, \left(2i-k \right)!}
319
+
320
+ As the degree increases, the abscissa (`p_k`) only increase
321
+ linearly towards `\infty`, but the Stehfest coefficients
322
+ (`V_k`) alternate in sign and increase rapidly in sign,
323
+ requiring high precision to prevent overflow or loss of
324
+ significance when evaluating the sum.
325
+
326
+ **References**
327
+
328
+ 1. Widder, D. (1941). *The Laplace Transform*. Princeton.
329
+ 2. Stehfest, H. (1970). Algorithm 368: numerical inversion of
330
+ Laplace transforms. *Communications of the ACM* 13(1):47-49,
331
+ http://dx.doi.org/10.1145/361953.361969
332
+
333
+ """
334
+
335
+ # required
336
+ self.t = self.ctx.convert(t)
337
+
338
+ # assume fp was computed from p matrix returned from
339
+ # calc_laplace_parameter(), so is already
340
+ # a list or matrix of mpmath 'mpf' types
341
+
342
+ result = self.ctx.fdot(self.V, fp)*self.ctx.ln2/self.t
343
+
344
+ # setting dps back to value when calc_laplace_parameter was called
345
+ if not manual_prec:
346
+ self.ctx.dps = self.dps_orig
347
+
348
+ # ignore any small imaginary part
349
+ return result.real
350
+
351
+
352
+ # ****************************************
353
+
354
+ class deHoog(InverseLaplaceTransform):
355
+
356
+ def calc_laplace_parameter(self, t, **kwargs):
357
+ r"""the de Hoog, Knight & Stokes algorithm is an
358
+ accelerated form of the Fourier series numerical
359
+ inverse Laplace transform algorithms.
360
+
361
+ .. math ::
362
+
363
+ p_k = \gamma + \frac{jk}{T} \qquad 0 \le k < 2M+1
364
+
365
+ where
366
+
367
+ .. math ::
368
+
369
+ \gamma = \alpha - \frac{\log \mathrm{tol}}{2T},
370
+
371
+ `j=\sqrt{-1}`, `T = 2t_\mathrm{max}` is a scaled time,
372
+ `\alpha=10^{-\mathrm{dps\_goal}}` is the real part of the
373
+ rightmost pole or singularity, which is chosen based on the
374
+ desired accuracy (assuming the rightmost singularity is 0),
375
+ and `\mathrm{tol}=10\alpha` is the desired tolerance, which is
376
+ chosen in relation to `\alpha`.`
377
+
378
+ When increasing the degree, the abscissa increase towards
379
+ `j\infty`, but more slowly than the fixed Talbot
380
+ algorithm. The de Hoog et al. algorithm typically does better
381
+ with oscillatory functions of time, and less well-behaved
382
+ functions. The method tends to be slower than the Talbot and
383
+ Stehfest algorithsm, especially so at very high precision
384
+ (e.g., `>500` digits precision).
385
+
386
+ """
387
+
388
+ # required
389
+ # ------------------------------
390
+ self.t = self.ctx.convert(t)
391
+
392
+ # optional
393
+ # ------------------------------
394
+ self.tmax = kwargs.get('tmax', self.t)
395
+
396
+ # empirical relationships used here based on a linear fit of
397
+ # requested and delivered dps for exponentially decaying time
398
+ # functions for requested dps up to 512.
399
+
400
+ if 'degree' in kwargs:
401
+ self.degree = kwargs['degree']
402
+ self.dps_goal = int(1.38*self.degree)
403
+ else:
404
+ self.dps_goal = int(self.ctx.dps*1.36)
405
+ self.degree = max(10, self.dps_goal)
406
+
407
+ # 2*M+1 terms in approximation
408
+ M = self.degree
409
+
410
+ # adjust alpha component of abscissa of convergence for higher
411
+ # precision
412
+ tmp = self.ctx.power(10.0, -self.dps_goal)
413
+ self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
414
+
415
+ # desired tolerance (here simply related to alpha)
416
+ self.tol = self.ctx.convert(kwargs.get('tol', self.alpha*10.0))
417
+ self.np = 2*self.degree+1 # number of terms in approximation
418
+
419
+ # this is adjusting the dps of the calling context
420
+ # hopefully the caller doesn't monkey around with it
421
+ # between calling this routine and calc_time_domain_solution()
422
+ self.dps_orig = self.ctx.dps
423
+ self.ctx.dps = self.dps_goal
424
+
425
+ # scaling factor (likely tun-able, but 2 is typical)
426
+ self.scale = kwargs.get('scale', 2)
427
+ self.T = self.ctx.convert(kwargs.get('T', self.scale*self.tmax))
428
+
429
+ self.p = self.ctx.matrix(2*M+1, 1)
430
+ self.gamma = self.alpha - self.ctx.log(self.tol)/(self.scale*self.T)
431
+ self.p = (self.gamma + self.ctx.pi*
432
+ self.ctx.matrix(self.ctx.arange(self.np))/self.T*1j)
433
+
434
+ # NB: p is complex (mpc)
435
+
436
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
437
+ r"""Calculate time-domain solution for
438
+ de Hoog, Knight & Stokes algorithm.
439
+
440
+ The un-accelerated Fourier series approach is:
441
+
442
+ .. math ::
443
+
444
+ f(t,2M+1) = \frac{e^{\gamma t}}{T} \sum_{k=0}^{2M}{}^{'}
445
+ \Re\left[\bar{f}\left( p_k \right)
446
+ e^{i\pi t/T} \right],
447
+
448
+ where the prime on the summation indicates the first term is halved.
449
+
450
+ This simplistic approach requires so many function evaluations
451
+ that it is not practical. Non-linear acceleration is
452
+ accomplished via Pade-approximation and an analytic expression
453
+ for the remainder of the continued fraction. See the original
454
+ paper (reference 2 below) a detailed description of the
455
+ numerical approach.
456
+
457
+ **References**
458
+
459
+ 1. Davies, B. (2005). *Integral Transforms and their
460
+ Applications*, Third Edition. Springer.
461
+ 2. de Hoog, F., J. Knight, A. Stokes (1982). An improved
462
+ method for numerical inversion of Laplace transforms. *SIAM
463
+ Journal of Scientific and Statistical Computing* 3:357-366,
464
+ http://dx.doi.org/10.1137/0903022
465
+
466
+ """
467
+
468
+ M = self.degree
469
+ np = self.np
470
+ T = self.T
471
+
472
+ self.t = self.ctx.convert(t)
473
+
474
+ # would it be useful to try re-using
475
+ # space between e&q and A&B?
476
+ e = self.ctx.zeros(np, M+1)
477
+ q = self.ctx.matrix(2*M, M)
478
+ d = self.ctx.matrix(np, 1)
479
+ A = self.ctx.zeros(np+1, 1)
480
+ B = self.ctx.ones(np+1, 1)
481
+
482
+ # initialize Q-D table
483
+ e[:, 0] = 0.0 + 0j
484
+ q[0, 0] = fp[1]/(fp[0]/2)
485
+ for i in range(1, 2*M):
486
+ q[i, 0] = fp[i+1]/fp[i]
487
+
488
+ # rhombus rule for filling triangular Q-D table (e & q)
489
+ for r in range(1, M+1):
490
+ # start with e, column 1, 0:2*M-2
491
+ mr = 2*(M-r) + 1
492
+ e[0:mr, r] = q[1:mr+1, r-1] - q[0:mr, r-1] + e[1:mr+1, r-1]
493
+ if not r == M:
494
+ rq = r+1
495
+ mr = 2*(M-rq)+1 + 2
496
+ for i in range(mr):
497
+ q[i, rq-1] = q[i+1, rq-2]*e[i+1, rq-1]/e[i, rq-1]
498
+
499
+ # build up continued fraction coefficients (d)
500
+ d[0] = fp[0]/2
501
+ for r in range(1, M+1):
502
+ d[2*r-1] = -q[0, r-1] # even terms
503
+ d[2*r] = -e[0, r] # odd terms
504
+
505
+ # seed A and B for recurrence
506
+ A[0] = 0.0 + 0.0j
507
+ A[1] = d[0]
508
+ B[0:2] = 1.0 + 0.0j
509
+
510
+ # base of the power series
511
+ z = self.ctx.expjpi(self.t/T) # i*pi is already in fcn
512
+
513
+ # coefficients of Pade approximation (A & B)
514
+ # using recurrence for all but last term
515
+ for i in range(1, 2*M):
516
+ A[i+1] = A[i] + d[i]*A[i-1]*z
517
+ B[i+1] = B[i] + d[i]*B[i-1]*z
518
+
519
+ # "improved remainder" to continued fraction
520
+ brem = (1 + (d[2*M-1] - d[2*M])*z)/2
521
+ # powm1(x,y) computes x^y - 1 more accurately near zero
522
+ rem = brem*self.ctx.powm1(1 + d[2*M]*z/brem,
523
+ self.ctx.fraction(1, 2))
524
+
525
+ # last term of recurrence using new remainder
526
+ A[np] = A[2*M] + rem*A[2*M-1]
527
+ B[np] = B[2*M] + rem*B[2*M-1]
528
+
529
+ # diagonal Pade approximation
530
+ # F=A/B represents accelerated trapezoid rule
531
+ result = self.ctx.exp(self.gamma*self.t)/T*(A[np]/B[np]).real
532
+
533
+ # setting dps back to value when calc_laplace_parameter was called
534
+ if not manual_prec:
535
+ self.ctx.dps = self.dps_orig
536
+
537
+ return result
538
+
539
+
540
+ # ****************************************
541
+
542
+ class Cohen(InverseLaplaceTransform):
543
+
544
+ def calc_laplace_parameter(self, t, **kwargs):
545
+ r"""The Cohen algorithm accelerates the convergence of the nearly
546
+ alternating series resulting from the application of the trapezoidal
547
+ rule to the Bromwich contour inversion integral.
548
+
549
+ .. math ::
550
+
551
+ p_k = \frac{\gamma}{2 t} + \frac{\pi i k}{t} \qquad 0 \le k < M
552
+
553
+ where
554
+
555
+ .. math ::
556
+
557
+ \gamma = \frac{2}{3} (d + \log(10) + \log(2 t)),
558
+
559
+ `d = \mathrm{dps\_goal}`, which is chosen based on the desired
560
+ accuracy using the method developed in [1] to improve numerical
561
+ stability. The Cohen algorithm shows robustness similar to the de Hoog
562
+ et al. algorithm, but it is faster than the fixed Talbot algorithm.
563
+
564
+ **Optional arguments**
565
+
566
+ *degree*
567
+ integer order of the approximation (M = number of terms)
568
+ *alpha*
569
+ abscissa for `p_0` (controls the discretization error)
570
+
571
+ The working precision will be increased according to a rule of
572
+ thumb. If 'degree' is not specified, the working precision and
573
+ degree are chosen to hopefully achieve the dps of the calling
574
+ context. If 'degree' is specified, the working precision is
575
+ chosen to achieve maximum resulting precision for the
576
+ specified degree.
577
+
578
+ **References**
579
+
580
+ 1. P. Glasserman, J. Ruiz-Mata (2006). Computing the credit loss
581
+ distribution in the Gaussian copula model: a comparison of methods.
582
+ *Journal of Credit Risk* 2(4):33-66, 10.21314/JCR.2006.057
583
+
584
+ """
585
+ self.t = self.ctx.convert(t)
586
+
587
+ if 'degree' in kwargs:
588
+ self.degree = kwargs['degree']
589
+ self.dps_goal = int(1.5 * self.degree)
590
+ else:
591
+ self.dps_goal = int(self.ctx.dps * 1.74)
592
+ self.degree = max(22, int(1.31 * self.dps_goal))
593
+
594
+ M = self.degree + 1
595
+
596
+ # this is adjusting the dps of the calling context hopefully
597
+ # the caller doesn't monkey around with it between calling
598
+ # this routine and calc_time_domain_solution()
599
+ self.dps_orig = self.ctx.dps
600
+ self.ctx.dps = self.dps_goal
601
+
602
+ ttwo = 2 * self.t
603
+ tmp = self.ctx.dps * self.ctx.log(10) + self.ctx.log(ttwo)
604
+ tmp = self.ctx.fraction(2, 3) * tmp
605
+ self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
606
+
607
+ # all but time-dependent part of p
608
+ a_t = self.alpha / ttwo
609
+ p_t = self.ctx.pi * 1j / self.t
610
+
611
+ self.p = self.ctx.matrix(M, 1)
612
+ self.p[0] = a_t
613
+
614
+ for i in range(1, M):
615
+ self.p[i] = a_t + i * p_t
616
+
617
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
618
+ r"""Calculate time-domain solution for Cohen algorithm.
619
+
620
+ The accelerated nearly alternating series is:
621
+
622
+ .. math ::
623
+
624
+ f(t, M) = \frac{e^{\gamma / 2}}{t} \left[\frac{1}{2}
625
+ \Re\left(\bar{f}\left(\frac{\gamma}{2t}\right) \right) -
626
+ \sum_{k=0}^{M-1}\frac{c_{M,k}}{d_M}\Re\left(\bar{f}
627
+ \left(\frac{\gamma + 2(k+1) \pi i}{2t}\right)\right)\right],
628
+
629
+ where coefficients `\frac{c_{M, k}}{d_M}` are described in [1].
630
+
631
+ 1. H. Cohen, F. Rodriguez Villegas, D. Zagier (2000). Convergence
632
+ acceleration of alternating series. *Experiment. Math* 9(1):3-12
633
+
634
+ """
635
+ self.t = self.ctx.convert(t)
636
+
637
+ n = self.degree
638
+ M = n + 1
639
+
640
+ A = self.ctx.matrix(M, 1)
641
+ for i in range(M):
642
+ A[i] = fp[i].real
643
+
644
+ d = (3 + self.ctx.sqrt(8)) ** n
645
+ d = (d + 1 / d) / 2
646
+ b = -self.ctx.one
647
+ c = -d
648
+ s = 0
649
+
650
+ for k in range(n):
651
+ c = b - c
652
+ s = s + c * A[k + 1]
653
+ b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
654
+
655
+ result = self.ctx.exp(self.alpha / 2) / self.t * (A[0] / 2 - s / d)
656
+
657
+ # setting dps back to value when calc_laplace_parameter was
658
+ # called, unless flag is set.
659
+ if not manual_prec:
660
+ self.ctx.dps = self.dps_orig
661
+
662
+ return result
663
+
664
+
665
+ # ****************************************
666
+
667
+ class LaplaceTransformInversionMethods(object):
668
+ def __init__(ctx, *args, **kwargs):
669
+ ctx._fixed_talbot = FixedTalbot(ctx)
670
+ ctx._stehfest = Stehfest(ctx)
671
+ ctx._de_hoog = deHoog(ctx)
672
+ ctx._cohen = Cohen(ctx)
673
+
674
+ def invertlaplace(ctx, f, t, **kwargs):
675
+ r"""Computes the numerical inverse Laplace transform for a
676
+ Laplace-space function at a given time. The function being
677
+ evaluated is assumed to be a real-valued function of time.
678
+
679
+ The user must supply a Laplace-space function `\bar{f}(p)`,
680
+ and a desired time at which to estimate the time-domain
681
+ solution `f(t)`.
682
+
683
+ A few basic examples of Laplace-space functions with known
684
+ inverses (see references [1,2]) :
685
+
686
+ .. math ::
687
+
688
+ \mathcal{L}\left\lbrace f(t) \right\rbrace=\bar{f}(p)
689
+
690
+ .. math ::
691
+
692
+ \mathcal{L}^{-1}\left\lbrace \bar{f}(p) \right\rbrace = f(t)
693
+
694
+ .. math ::
695
+
696
+ \bar{f}(p) = \frac{1}{(p+1)^2}
697
+
698
+ .. math ::
699
+
700
+ f(t) = t e^{-t}
701
+
702
+ >>> from mpmath import *
703
+ >>> mp.dps = 15; mp.pretty = True
704
+ >>> tt = [0.001, 0.01, 0.1, 1, 10]
705
+ >>> fp = lambda p: 1/(p+1)**2
706
+ >>> ft = lambda t: t*exp(-t)
707
+ >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='talbot')
708
+ (0.000999000499833375, 8.57923043561212e-20)
709
+ >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='talbot')
710
+ (0.00990049833749168, 3.27007646698047e-19)
711
+ >>> ft(tt[2]),ft(tt[2])-invertlaplace(fp,tt[2],method='talbot')
712
+ (0.090483741803596, -1.75215800052168e-18)
713
+ >>> ft(tt[3]),ft(tt[3])-invertlaplace(fp,tt[3],method='talbot')
714
+ (0.367879441171442, 1.2428864009344e-17)
715
+ >>> ft(tt[4]),ft(tt[4])-invertlaplace(fp,tt[4],method='talbot')
716
+ (0.000453999297624849, 4.04513489306658e-20)
717
+
718
+ The methods also work for higher precision:
719
+
720
+ >>> mp.dps = 100; mp.pretty = True
721
+ >>> nstr(ft(tt[0]),15),nstr(ft(tt[0])-invertlaplace(fp,tt[0],method='talbot'),15)
722
+ ('0.000999000499833375', '-4.96868310693356e-105')
723
+ >>> nstr(ft(tt[1]),15),nstr(ft(tt[1])-invertlaplace(fp,tt[1],method='talbot'),15)
724
+ ('0.00990049833749168', '1.23032291513122e-104')
725
+
726
+ .. math ::
727
+
728
+ \bar{f}(p) = \frac{1}{p^2+1}
729
+
730
+ .. math ::
731
+
732
+ f(t) = \mathrm{J}_0(t)
733
+
734
+ >>> mp.dps = 15; mp.pretty = True
735
+ >>> fp = lambda p: 1/sqrt(p*p + 1)
736
+ >>> ft = lambda t: besselj(0,t)
737
+ >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='dehoog')
738
+ (0.999999750000016, -6.09717765032273e-18)
739
+ >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='dehoog')
740
+ (0.99997500015625, -5.61756281076169e-17)
741
+
742
+ .. math ::
743
+
744
+ \bar{f}(p) = \frac{\log p}{p}
745
+
746
+ .. math ::
747
+
748
+ f(t) = -\gamma -\log t
749
+
750
+ >>> mp.dps = 15; mp.pretty = True
751
+ >>> fp = lambda p: log(p)/p
752
+ >>> ft = lambda t: -euler-log(t)
753
+ >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='stehfest')
754
+ (6.3305396140806, -1.92126634837863e-16)
755
+ >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='stehfest')
756
+ (4.02795452108656, -4.81486093200704e-16)
757
+
758
+ **Options**
759
+
760
+ :func:`~mpmath.invertlaplace` recognizes the following optional
761
+ keywords valid for all methods:
762
+
763
+ *method*
764
+ Chooses numerical inverse Laplace transform algorithm
765
+ (described below).
766
+ *degree*
767
+ Number of terms used in the approximation
768
+
769
+ **Algorithms**
770
+
771
+ Mpmath implements four numerical inverse Laplace transform
772
+ algorithms, attributed to: Talbot, Stehfest, and de Hoog,
773
+ Knight and Stokes. These can be selected by using
774
+ *method='talbot'*, *method='stehfest'*, *method='dehoog'* or
775
+ *method='cohen'* or by passing the classes *method=FixedTalbot*,
776
+ *method=Stehfest*, *method=deHoog*, or *method=Cohen*. The functions
777
+ :func:`~mpmath.invlaptalbot`, :func:`~mpmath.invlapstehfest`,
778
+ :func:`~mpmath.invlapdehoog`, and :func:`~mpmath.invlapcohen`
779
+ are also available as shortcuts.
780
+
781
+ All four algorithms implement a heuristic balance between the
782
+ requested precision and the precision used internally for the
783
+ calculations. This has been tuned for a typical exponentially
784
+ decaying function and precision up to few hundred decimal
785
+ digits.
786
+
787
+ The Laplace transform converts the variable time (i.e., along
788
+ a line) into a parameter given by the right half of the
789
+ complex `p`-plane. Singularities, poles, and branch cuts in
790
+ the complex `p`-plane contain all the information regarding
791
+ the time behavior of the corresponding function. Any numerical
792
+ method must therefore sample `p`-plane "close enough" to the
793
+ singularities to accurately characterize them, while not
794
+ getting too close to have catastrophic cancellation, overflow,
795
+ or underflow issues. Most significantly, if one or more of the
796
+ singularities in the `p`-plane is not on the left side of the
797
+ Bromwich contour, its effects will be left out of the computed
798
+ solution, and the answer will be completely wrong.
799
+
800
+ *Talbot*
801
+
802
+ The fixed Talbot method is high accuracy and fast, but the
803
+ method can catastrophically fail for certain classes of time-domain
804
+ behavior, including a Heaviside step function for positive
805
+ time (e.g., `H(t-2)`), or some oscillatory behaviors. The
806
+ Talbot method usually has adjustable parameters, but the
807
+ "fixed" variety implemented here does not. This method
808
+ deforms the Bromwich integral contour in the shape of a
809
+ parabola towards `-\infty`, which leads to problems
810
+ when the solution has a decaying exponential in it (e.g., a
811
+ Heaviside step function is equivalent to multiplying by a
812
+ decaying exponential in Laplace space).
813
+
814
+ *Stehfest*
815
+
816
+ The Stehfest algorithm only uses abscissa along the real axis
817
+ of the complex `p`-plane to estimate the time-domain
818
+ function. Oscillatory time-domain functions have poles away
819
+ from the real axis, so this method does not work well with
820
+ oscillatory functions, especially high-frequency ones. This
821
+ method also depends on summation of terms in a series that
822
+ grows very large, and will have catastrophic cancellation
823
+ during summation if the working precision is too low.
824
+
825
+ *de Hoog et al.*
826
+
827
+ The de Hoog, Knight, and Stokes method is essentially a
828
+ Fourier-series quadrature-type approximation to the Bromwich
829
+ contour integral, with non-linear series acceleration and an
830
+ analytical expression for the remainder term. This method is
831
+ typically one of the most robust. This method also involves the
832
+ greatest amount of overhead, so it is typically the slowest of the
833
+ four methods at high precision.
834
+
835
+ *Cohen*
836
+
837
+ The Cohen method is a trapezoidal rule approximation to the Bromwich
838
+ contour integral, with linear acceleration for alternating
839
+ series. This method is as robust as the de Hoog et al method and the
840
+ fastest of the four methods at high precision, and is therefore the
841
+ default method.
842
+
843
+ **Singularities**
844
+
845
+ All numerical inverse Laplace transform methods have problems
846
+ at large time when the Laplace-space function has poles,
847
+ singularities, or branch cuts to the right of the origin in
848
+ the complex plane. For simple poles in `\bar{f}(p)` at the
849
+ `p`-plane origin, the time function is constant in time (e.g.,
850
+ `\mathcal{L}\left\lbrace 1 \right\rbrace=1/p` has a pole at
851
+ `p=0`). A pole in `\bar{f}(p)` to the left of the origin is a
852
+ decreasing function of time (e.g., `\mathcal{L}\left\lbrace
853
+ e^{-t/2} \right\rbrace=1/(p+1/2)` has a pole at `p=-1/2`), and
854
+ a pole to the right of the origin leads to an increasing
855
+ function in time (e.g., `\mathcal{L}\left\lbrace t e^{t/4}
856
+ \right\rbrace = 1/(p-1/4)^2` has a pole at `p=1/4`). When
857
+ singularities occur off the real `p` axis, the time-domain
858
+ function is oscillatory. For example `\mathcal{L}\left\lbrace
859
+ \mathrm{J}_0(t) \right\rbrace=1/\sqrt{p^2+1}` has a branch cut
860
+ starting at `p=j=\sqrt{-1}` and is a decaying oscillatory
861
+ function, This range of behaviors is illustrated in Duffy [3]
862
+ Figure 4.10.4, p. 228.
863
+
864
+ In general as `p \rightarrow \infty` `t \rightarrow 0` and
865
+ vice-versa. All numerical inverse Laplace transform methods
866
+ require their abscissa to shift closer to the origin for
867
+ larger times. If the abscissa shift left of the rightmost
868
+ singularity in the Laplace domain, the answer will be
869
+ completely wrong (the effect of singularities to the right of
870
+ the Bromwich contour are not included in the results).
871
+
872
+ For example, the following exponentially growing function has
873
+ a pole at `p=3`:
874
+
875
+ .. math ::
876
+
877
+ \bar{f}(p)=\frac{1}{p^2-9}
878
+
879
+ .. math ::
880
+
881
+ f(t)=\frac{1}{3}\sinh 3t
882
+
883
+ >>> mp.dps = 15; mp.pretty = True
884
+ >>> fp = lambda p: 1/(p*p-9)
885
+ >>> ft = lambda t: sinh(3*t)/3
886
+ >>> tt = [0.01,0.1,1.0,10.0]
887
+ >>> ft(tt[0]),invertlaplace(fp,tt[0],method='talbot')
888
+ (0.0100015000675014, 0.0100015000675014)
889
+ >>> ft(tt[1]),invertlaplace(fp,tt[1],method='talbot')
890
+ (0.101506764482381, 0.101506764482381)
891
+ >>> ft(tt[2]),invertlaplace(fp,tt[2],method='talbot')
892
+ (3.33929164246997, 3.33929164246997)
893
+ >>> ft(tt[3]),invertlaplace(fp,tt[3],method='talbot')
894
+ (1781079096920.74, -1.61331069624091e-14)
895
+
896
+ **References**
897
+
898
+ 1. [DLMF]_ section 1.14 (http://dlmf.nist.gov/1.14T4)
899
+ 2. Cohen, A.M. (2007). Numerical Methods for Laplace Transform
900
+ Inversion, Springer.
901
+ 3. Duffy, D.G. (1998). Advanced Engineering Mathematics, CRC Press.
902
+
903
+ **Numerical Inverse Laplace Transform Reviews**
904
+
905
+ 1. Bellman, R., R.E. Kalaba, J.A. Lockett (1966). *Numerical
906
+ inversion of the Laplace transform: Applications to Biology,
907
+ Economics, Engineering, and Physics*. Elsevier.
908
+ 2. Davies, B., B. Martin (1979). Numerical inversion of the
909
+ Laplace transform: a survey and comparison of methods. *Journal
910
+ of Computational Physics* 33:1-32,
911
+ http://dx.doi.org/10.1016/0021-9991(79)90025-1
912
+ 3. Duffy, D.G. (1993). On the numerical inversion of Laplace
913
+ transforms: Comparison of three new methods on characteristic
914
+ problems from applications. *ACM Transactions on Mathematical
915
+ Software* 19(3):333-359, http://dx.doi.org/10.1145/155743.155788
916
+ 4. Kuhlman, K.L., (2013). Review of Inverse Laplace Transform
917
+ Algorithms for Laplace-Space Numerical Approaches, *Numerical
918
+ Algorithms*, 63(2):339-355.
919
+ http://dx.doi.org/10.1007/s11075-012-9625-3
920
+
921
+ """
922
+
923
+ rule = kwargs.get('method', 'cohen')
924
+ if type(rule) is str:
925
+ lrule = rule.lower()
926
+ if lrule == 'talbot':
927
+ rule = ctx._fixed_talbot
928
+ elif lrule == 'stehfest':
929
+ rule = ctx._stehfest
930
+ elif lrule == 'dehoog':
931
+ rule = ctx._de_hoog
932
+ elif rule == 'cohen':
933
+ rule = ctx._cohen
934
+ else:
935
+ raise ValueError("unknown invlap algorithm: %s" % rule)
936
+ else:
937
+ rule = rule(ctx)
938
+
939
+ # determine the vector of Laplace-space parameter
940
+ # needed for the requested method and desired time
941
+ rule.calc_laplace_parameter(t, **kwargs)
942
+
943
+ # compute the Laplace-space function evalutations
944
+ # at the required abscissa.
945
+ fp = [f(p) for p in rule.p]
946
+
947
+ # compute the time-domain solution from the
948
+ # Laplace-space function evaluations
949
+ return rule.calc_time_domain_solution(fp, t)
950
+
951
+ # shortcuts for the above function for specific methods
952
+ def invlaptalbot(ctx, *args, **kwargs):
953
+ kwargs['method'] = 'talbot'
954
+ return ctx.invertlaplace(*args, **kwargs)
955
+
956
+ def invlapstehfest(ctx, *args, **kwargs):
957
+ kwargs['method'] = 'stehfest'
958
+ return ctx.invertlaplace(*args, **kwargs)
959
+
960
+ def invlapdehoog(ctx, *args, **kwargs):
961
+ kwargs['method'] = 'dehoog'
962
+ return ctx.invertlaplace(*args, **kwargs)
963
+
964
+ def invlapcohen(ctx, *args, **kwargs):
965
+ kwargs['method'] = 'cohen'
966
+ return ctx.invertlaplace(*args, **kwargs)
967
+
968
+
969
+ # ****************************************
970
+
971
+ if __name__ == '__main__':
972
+ import doctest
973
+ doctest.testmod()
lib/python3.11/site-packages/mpmath/calculus/odes.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bisect import bisect
2
+ from ..libmp.backend import xrange
3
+
4
+ class ODEMethods(object):
5
+ pass
6
+
7
+ def ode_taylor(ctx, derivs, x0, y0, tol_prec, n):
8
+ h = tol = ctx.ldexp(1, -tol_prec)
9
+ dim = len(y0)
10
+ xs = [x0]
11
+ ys = [y0]
12
+ x = x0
13
+ y = y0
14
+ orig = ctx.prec
15
+ try:
16
+ ctx.prec = orig*(1+n)
17
+ # Use n steps with Euler's method to get
18
+ # evaluation points for derivatives
19
+ for i in range(n):
20
+ fxy = derivs(x, y)
21
+ y = [y[i]+h*fxy[i] for i in xrange(len(y))]
22
+ x += h
23
+ xs.append(x)
24
+ ys.append(y)
25
+ # Compute derivatives
26
+ ser = [[] for d in range(dim)]
27
+ for j in range(n+1):
28
+ s = [0]*dim
29
+ b = (-1) ** (j & 1)
30
+ k = 1
31
+ for i in range(j+1):
32
+ for d in range(dim):
33
+ s[d] += b * ys[i][d]
34
+ b = (b * (j-k+1)) // (-k)
35
+ k += 1
36
+ scale = h**(-j) / ctx.fac(j)
37
+ for d in range(dim):
38
+ s[d] = s[d] * scale
39
+ ser[d].append(s[d])
40
+ finally:
41
+ ctx.prec = orig
42
+ # Estimate radius for which we can get full accuracy.
43
+ # XXX: do this right for zeros
44
+ radius = ctx.one
45
+ for ts in ser:
46
+ if ts[-1]:
47
+ radius = min(radius, ctx.nthroot(tol/abs(ts[-1]), n))
48
+ radius /= 2 # XXX
49
+ return ser, x0+radius
50
+
51
+ def odefun(ctx, F, x0, y0, tol=None, degree=None, method='taylor', verbose=False):
52
+ r"""
53
+ Returns a function `y(x) = [y_0(x), y_1(x), \ldots, y_n(x)]`
54
+ that is a numerical solution of the `n+1`-dimensional first-order
55
+ ordinary differential equation (ODE) system
56
+
57
+ .. math ::
58
+
59
+ y_0'(x) = F_0(x, [y_0(x), y_1(x), \ldots, y_n(x)])
60
+
61
+ y_1'(x) = F_1(x, [y_0(x), y_1(x), \ldots, y_n(x)])
62
+
63
+ \vdots
64
+
65
+ y_n'(x) = F_n(x, [y_0(x), y_1(x), \ldots, y_n(x)])
66
+
67
+ The derivatives are specified by the vector-valued function
68
+ *F* that evaluates
69
+ `[y_0', \ldots, y_n'] = F(x, [y_0, \ldots, y_n])`.
70
+ The initial point `x_0` is specified by the scalar argument *x0*,
71
+ and the initial value `y(x_0) = [y_0(x_0), \ldots, y_n(x_0)]` is
72
+ specified by the vector argument *y0*.
73
+
74
+ For convenience, if the system is one-dimensional, you may optionally
75
+ provide just a scalar value for *y0*. In this case, *F* should accept
76
+ a scalar *y* argument and return a scalar. The solution function
77
+ *y* will return scalar values instead of length-1 vectors.
78
+
79
+ Evaluation of the solution function `y(x)` is permitted
80
+ for any `x \ge x_0`.
81
+
82
+ A high-order ODE can be solved by transforming it into first-order
83
+ vector form. This transformation is described in standard texts
84
+ on ODEs. Examples will also be given below.
85
+
86
+ **Options, speed and accuracy**
87
+
88
+ By default, :func:`~mpmath.odefun` uses a high-order Taylor series
89
+ method. For reasonably well-behaved problems, the solution will
90
+ be fully accurate to within the working precision. Note that
91
+ *F* must be possible to evaluate to very high precision
92
+ for the generation of Taylor series to work.
93
+
94
+ To get a faster but less accurate solution, you can set a large
95
+ value for *tol* (which defaults roughly to *eps*). If you just
96
+ want to plot the solution or perform a basic simulation,
97
+ *tol = 0.01* is likely sufficient.
98
+
99
+ The *degree* argument controls the degree of the solver (with
100
+ *method='taylor'*, this is the degree of the Taylor series
101
+ expansion). A higher degree means that a longer step can be taken
102
+ before a new local solution must be generated from *F*,
103
+ meaning that fewer steps are required to get from `x_0` to a given
104
+ `x_1`. On the other hand, a higher degree also means that each
105
+ local solution becomes more expensive (i.e., more evaluations of
106
+ *F* are required per step, and at higher precision).
107
+
108
+ The optimal setting therefore involves a tradeoff. Generally,
109
+ decreasing the *degree* for Taylor series is likely to give faster
110
+ solution at low precision, while increasing is likely to be better
111
+ at higher precision.
112
+
113
+ The function
114
+ object returned by :func:`~mpmath.odefun` caches the solutions at all step
115
+ points and uses polynomial interpolation between step points.
116
+ Therefore, once `y(x_1)` has been evaluated for some `x_1`,
117
+ `y(x)` can be evaluated very quickly for any `x_0 \le x \le x_1`.
118
+ and continuing the evaluation up to `x_2 > x_1` is also fast.
119
+
120
+ **Examples of first-order ODEs**
121
+
122
+ We will solve the standard test problem `y'(x) = y(x), y(0) = 1`
123
+ which has explicit solution `y(x) = \exp(x)`::
124
+
125
+ >>> from mpmath import *
126
+ >>> mp.dps = 15; mp.pretty = True
127
+ >>> f = odefun(lambda x, y: y, 0, 1)
128
+ >>> for x in [0, 1, 2.5]:
129
+ ... print((f(x), exp(x)))
130
+ ...
131
+ (1.0, 1.0)
132
+ (2.71828182845905, 2.71828182845905)
133
+ (12.1824939607035, 12.1824939607035)
134
+
135
+ The solution with high precision::
136
+
137
+ >>> mp.dps = 50
138
+ >>> f = odefun(lambda x, y: y, 0, 1)
139
+ >>> f(1)
140
+ 2.7182818284590452353602874713526624977572470937
141
+ >>> exp(1)
142
+ 2.7182818284590452353602874713526624977572470937
143
+
144
+ Using the more general vectorized form, the test problem
145
+ can be input as (note that *f* returns a 1-element vector)::
146
+
147
+ >>> mp.dps = 15
148
+ >>> f = odefun(lambda x, y: [y[0]], 0, [1])
149
+ >>> f(1)
150
+ [2.71828182845905]
151
+
152
+ :func:`~mpmath.odefun` can solve nonlinear ODEs, which are generally
153
+ impossible (and at best difficult) to solve analytically. As
154
+ an example of a nonlinear ODE, we will solve `y'(x) = x \sin(y(x))`
155
+ for `y(0) = \pi/2`. An exact solution happens to be known
156
+ for this problem, and is given by
157
+ `y(x) = 2 \tan^{-1}\left(\exp\left(x^2/2\right)\right)`::
158
+
159
+ >>> f = odefun(lambda x, y: x*sin(y), 0, pi/2)
160
+ >>> for x in [2, 5, 10]:
161
+ ... print((f(x), 2*atan(exp(mpf(x)**2/2))))
162
+ ...
163
+ (2.87255666284091, 2.87255666284091)
164
+ (3.14158520028345, 3.14158520028345)
165
+ (3.14159265358979, 3.14159265358979)
166
+
167
+ If `F` is independent of `y`, an ODE can be solved using direct
168
+ integration. We can therefore obtain a reference solution with
169
+ :func:`~mpmath.quad`::
170
+
171
+ >>> f = lambda x: (1+x**2)/(1+x**3)
172
+ >>> g = odefun(lambda x, y: f(x), pi, 0)
173
+ >>> g(2*pi)
174
+ 0.72128263801696
175
+ >>> quad(f, [pi, 2*pi])
176
+ 0.72128263801696
177
+
178
+ **Examples of second-order ODEs**
179
+
180
+ We will solve the harmonic oscillator equation `y''(x) + y(x) = 0`.
181
+ To do this, we introduce the helper functions `y_0 = y, y_1 = y_0'`
182
+ whereby the original equation can be written as `y_1' + y_0' = 0`. Put
183
+ together, we get the first-order, two-dimensional vector ODE
184
+
185
+ .. math ::
186
+
187
+ \begin{cases}
188
+ y_0' = y_1 \\
189
+ y_1' = -y_0
190
+ \end{cases}
191
+
192
+ To get a well-defined IVP, we need two initial values. With
193
+ `y(0) = y_0(0) = 1` and `-y'(0) = y_1(0) = 0`, the problem will of
194
+ course be solved by `y(x) = y_0(x) = \cos(x)` and
195
+ `-y'(x) = y_1(x) = \sin(x)`. We check this::
196
+
197
+ >>> f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0])
198
+ >>> for x in [0, 1, 2.5, 10]:
199
+ ... nprint(f(x), 15)
200
+ ... nprint([cos(x), sin(x)], 15)
201
+ ... print("---")
202
+ ...
203
+ [1.0, 0.0]
204
+ [1.0, 0.0]
205
+ ---
206
+ [0.54030230586814, 0.841470984807897]
207
+ [0.54030230586814, 0.841470984807897]
208
+ ---
209
+ [-0.801143615546934, 0.598472144103957]
210
+ [-0.801143615546934, 0.598472144103957]
211
+ ---
212
+ [-0.839071529076452, -0.54402111088937]
213
+ [-0.839071529076452, -0.54402111088937]
214
+ ---
215
+
216
+ Note that we get both the sine and the cosine solutions
217
+ simultaneously.
218
+
219
+ **TODO**
220
+
221
+ * Better automatic choice of degree and step size
222
+ * Make determination of Taylor series convergence radius
223
+ more robust
224
+ * Allow solution for `x < x_0`
225
+ * Allow solution for complex `x`
226
+ * Test for difficult (ill-conditioned) problems
227
+ * Implement Runge-Kutta and other algorithms
228
+
229
+ """
230
+ if tol:
231
+ tol_prec = int(-ctx.log(tol, 2))+10
232
+ else:
233
+ tol_prec = ctx.prec+10
234
+ degree = degree or (3 + int(3*ctx.dps/2.))
235
+ workprec = ctx.prec + 40
236
+ try:
237
+ len(y0)
238
+ return_vector = True
239
+ except TypeError:
240
+ F_ = F
241
+ F = lambda x, y: [F_(x, y[0])]
242
+ y0 = [y0]
243
+ return_vector = False
244
+ ser, xb = ode_taylor(ctx, F, x0, y0, tol_prec, degree)
245
+ series_boundaries = [x0, xb]
246
+ series_data = [(ser, x0, xb)]
247
+ # We will be working with vectors of Taylor series
248
+ def mpolyval(ser, a):
249
+ return [ctx.polyval(s[::-1], a) for s in ser]
250
+ # Find nearest expansion point; compute if necessary
251
+ def get_series(x):
252
+ if x < x0:
253
+ raise ValueError
254
+ n = bisect(series_boundaries, x)
255
+ if n < len(series_boundaries):
256
+ return series_data[n-1]
257
+ while 1:
258
+ ser, xa, xb = series_data[-1]
259
+ if verbose:
260
+ print("Computing Taylor series for [%f, %f]" % (xa, xb))
261
+ y = mpolyval(ser, xb-xa)
262
+ xa = xb
263
+ ser, xb = ode_taylor(ctx, F, xb, y, tol_prec, degree)
264
+ series_boundaries.append(xb)
265
+ series_data.append((ser, xa, xb))
266
+ if x <= xb:
267
+ return series_data[-1]
268
+ # Evaluation function
269
+ def interpolant(x):
270
+ x = ctx.convert(x)
271
+ orig = ctx.prec
272
+ try:
273
+ ctx.prec = workprec
274
+ ser, xa, xb = get_series(x)
275
+ y = mpolyval(ser, x-xa)
276
+ finally:
277
+ ctx.prec = orig
278
+ if return_vector:
279
+ return [+yk for yk in y]
280
+ else:
281
+ return +y[0]
282
+ return interpolant
283
+
284
+ ODEMethods.odefun = odefun
285
+
286
+ if __name__ == "__main__":
287
+ import doctest
288
+ doctest.testmod()
lib/python3.11/site-packages/mpmath/calculus/optimization.py ADDED
@@ -0,0 +1,1102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+
3
+ from copy import copy
4
+
5
+ from ..libmp.backend import xrange
6
+
7
+ class OptimizationMethods(object):
8
+ def __init__(ctx):
9
+ pass
10
+
11
+ ##############
12
+ # 1D-SOLVERS #
13
+ ##############
14
+
15
+ class Newton:
16
+ """
17
+ 1d-solver generating pairs of approximative root and error.
18
+
19
+ Needs starting points x0 close to the root.
20
+
21
+ Pro:
22
+
23
+ * converges fast
24
+ * sometimes more robust than secant with bad second starting point
25
+
26
+ Contra:
27
+
28
+ * converges slowly for multiple roots
29
+ * needs first derivative
30
+ * 2 function evaluations per iteration
31
+ """
32
+ maxsteps = 20
33
+
34
+ def __init__(self, ctx, f, x0, **kwargs):
35
+ self.ctx = ctx
36
+ if len(x0) == 1:
37
+ self.x0 = x0[0]
38
+ else:
39
+ raise ValueError('expected 1 starting point, got %i' % len(x0))
40
+ self.f = f
41
+ if not 'df' in kwargs:
42
+ def df(x):
43
+ return self.ctx.diff(f, x)
44
+ else:
45
+ df = kwargs['df']
46
+ self.df = df
47
+
48
+ def __iter__(self):
49
+ f = self.f
50
+ df = self.df
51
+ x0 = self.x0
52
+ while True:
53
+ x1 = x0 - f(x0) / df(x0)
54
+ error = abs(x1 - x0)
55
+ x0 = x1
56
+ yield (x1, error)
57
+
58
+ class Secant:
59
+ """
60
+ 1d-solver generating pairs of approximative root and error.
61
+
62
+ Needs starting points x0 and x1 close to the root.
63
+ x1 defaults to x0 + 0.25.
64
+
65
+ Pro:
66
+
67
+ * converges fast
68
+
69
+ Contra:
70
+
71
+ * converges slowly for multiple roots
72
+ """
73
+ maxsteps = 30
74
+
75
+ def __init__(self, ctx, f, x0, **kwargs):
76
+ self.ctx = ctx
77
+ if len(x0) == 1:
78
+ self.x0 = x0[0]
79
+ self.x1 = self.x0 + 0.25
80
+ elif len(x0) == 2:
81
+ self.x0 = x0[0]
82
+ self.x1 = x0[1]
83
+ else:
84
+ raise ValueError('expected 1 or 2 starting points, got %i' % len(x0))
85
+ self.f = f
86
+
87
+ def __iter__(self):
88
+ f = self.f
89
+ x0 = self.x0
90
+ x1 = self.x1
91
+ f0 = f(x0)
92
+ while True:
93
+ f1 = f(x1)
94
+ l = x1 - x0
95
+ if not l:
96
+ break
97
+ s = (f1 - f0) / l
98
+ if not s:
99
+ break
100
+ x0, x1 = x1, x1 - f1/s
101
+ f0 = f1
102
+ yield x1, abs(l)
103
+
104
+ class MNewton:
105
+ """
106
+ 1d-solver generating pairs of approximative root and error.
107
+
108
+ Needs starting point x0 close to the root.
109
+ Uses modified Newton's method that converges fast regardless of the
110
+ multiplicity of the root.
111
+
112
+ Pro:
113
+
114
+ * converges fast for multiple roots
115
+
116
+ Contra:
117
+
118
+ * needs first and second derivative of f
119
+ * 3 function evaluations per iteration
120
+ """
121
+ maxsteps = 20
122
+
123
+ def __init__(self, ctx, f, x0, **kwargs):
124
+ self.ctx = ctx
125
+ if not len(x0) == 1:
126
+ raise ValueError('expected 1 starting point, got %i' % len(x0))
127
+ self.x0 = x0[0]
128
+ self.f = f
129
+ if not 'df' in kwargs:
130
+ def df(x):
131
+ return self.ctx.diff(f, x)
132
+ else:
133
+ df = kwargs['df']
134
+ self.df = df
135
+ if not 'd2f' in kwargs:
136
+ def d2f(x):
137
+ return self.ctx.diff(df, x)
138
+ else:
139
+ d2f = kwargs['df']
140
+ self.d2f = d2f
141
+
142
+ def __iter__(self):
143
+ x = self.x0
144
+ f = self.f
145
+ df = self.df
146
+ d2f = self.d2f
147
+ while True:
148
+ prevx = x
149
+ fx = f(x)
150
+ if fx == 0:
151
+ break
152
+ dfx = df(x)
153
+ d2fx = d2f(x)
154
+ # x = x - F(x)/F'(x) with F(x) = f(x)/f'(x)
155
+ x -= fx / (dfx - fx * d2fx / dfx)
156
+ error = abs(x - prevx)
157
+ yield x, error
158
+
159
+ class Halley:
160
+ """
161
+ 1d-solver generating pairs of approximative root and error.
162
+
163
+ Needs a starting point x0 close to the root.
164
+ Uses Halley's method with cubic convergence rate.
165
+
166
+ Pro:
167
+
168
+ * converges even faster the Newton's method
169
+ * useful when computing with *many* digits
170
+
171
+ Contra:
172
+
173
+ * needs first and second derivative of f
174
+ * 3 function evaluations per iteration
175
+ * converges slowly for multiple roots
176
+ """
177
+
178
+ maxsteps = 20
179
+
180
+ def __init__(self, ctx, f, x0, **kwargs):
181
+ self.ctx = ctx
182
+ if not len(x0) == 1:
183
+ raise ValueError('expected 1 starting point, got %i' % len(x0))
184
+ self.x0 = x0[0]
185
+ self.f = f
186
+ if not 'df' in kwargs:
187
+ def df(x):
188
+ return self.ctx.diff(f, x)
189
+ else:
190
+ df = kwargs['df']
191
+ self.df = df
192
+ if not 'd2f' in kwargs:
193
+ def d2f(x):
194
+ return self.ctx.diff(df, x)
195
+ else:
196
+ d2f = kwargs['df']
197
+ self.d2f = d2f
198
+
199
+ def __iter__(self):
200
+ x = self.x0
201
+ f = self.f
202
+ df = self.df
203
+ d2f = self.d2f
204
+ while True:
205
+ prevx = x
206
+ fx = f(x)
207
+ dfx = df(x)
208
+ d2fx = d2f(x)
209
+ x -= 2*fx*dfx / (2*dfx**2 - fx*d2fx)
210
+ error = abs(x - prevx)
211
+ yield x, error
212
+
213
+ class Muller:
214
+ """
215
+ 1d-solver generating pairs of approximative root and error.
216
+
217
+ Needs starting points x0, x1 and x2 close to the root.
218
+ x1 defaults to x0 + 0.25; x2 to x1 + 0.25.
219
+ Uses Muller's method that converges towards complex roots.
220
+
221
+ Pro:
222
+
223
+ * converges fast (somewhat faster than secant)
224
+ * can find complex roots
225
+
226
+ Contra:
227
+
228
+ * converges slowly for multiple roots
229
+ * may have complex values for real starting points and real roots
230
+
231
+ http://en.wikipedia.org/wiki/Muller's_method
232
+ """
233
+ maxsteps = 30
234
+
235
+ def __init__(self, ctx, f, x0, **kwargs):
236
+ self.ctx = ctx
237
+ if len(x0) == 1:
238
+ self.x0 = x0[0]
239
+ self.x1 = self.x0 + 0.25
240
+ self.x2 = self.x1 + 0.25
241
+ elif len(x0) == 2:
242
+ self.x0 = x0[0]
243
+ self.x1 = x0[1]
244
+ self.x2 = self.x1 + 0.25
245
+ elif len(x0) == 3:
246
+ self.x0 = x0[0]
247
+ self.x1 = x0[1]
248
+ self.x2 = x0[2]
249
+ else:
250
+ raise ValueError('expected 1, 2 or 3 starting points, got %i'
251
+ % len(x0))
252
+ self.f = f
253
+ self.verbose = kwargs['verbose']
254
+
255
+ def __iter__(self):
256
+ f = self.f
257
+ x0 = self.x0
258
+ x1 = self.x1
259
+ x2 = self.x2
260
+ fx0 = f(x0)
261
+ fx1 = f(x1)
262
+ fx2 = f(x2)
263
+ while True:
264
+ # TODO: maybe refactoring with function for divided differences
265
+ # calculate divided differences
266
+ fx2x1 = (fx1 - fx2) / (x1 - x2)
267
+ fx2x0 = (fx0 - fx2) / (x0 - x2)
268
+ fx1x0 = (fx0 - fx1) / (x0 - x1)
269
+ w = fx2x1 + fx2x0 - fx1x0
270
+ fx2x1x0 = (fx1x0 - fx2x1) / (x0 - x2)
271
+ if w == 0 and fx2x1x0 == 0:
272
+ if self.verbose:
273
+ print('canceled with')
274
+ print('x0 =', x0, ', x1 =', x1, 'and x2 =', x2)
275
+ break
276
+ x0 = x1
277
+ fx0 = fx1
278
+ x1 = x2
279
+ fx1 = fx2
280
+ # denominator should be as large as possible => choose sign
281
+ r = self.ctx.sqrt(w**2 - 4*fx2*fx2x1x0)
282
+ if abs(w - r) > abs(w + r):
283
+ r = -r
284
+ x2 -= 2*fx2 / (w + r)
285
+ fx2 = f(x2)
286
+ error = abs(x2 - x1)
287
+ yield x2, error
288
+
289
+ # TODO: consider raising a ValueError when there's no sign change in a and b
290
+ class Bisection:
291
+ """
292
+ 1d-solver generating pairs of approximative root and error.
293
+
294
+ Uses bisection method to find a root of f in [a, b].
295
+ Might fail for multiple roots (needs sign change).
296
+
297
+ Pro:
298
+
299
+ * robust and reliable
300
+
301
+ Contra:
302
+
303
+ * converges slowly
304
+ * needs sign change
305
+ """
306
+ maxsteps = 100
307
+
308
+ def __init__(self, ctx, f, x0, **kwargs):
309
+ self.ctx = ctx
310
+ if len(x0) != 2:
311
+ raise ValueError('expected interval of 2 points, got %i' % len(x0))
312
+ self.f = f
313
+ self.a = x0[0]
314
+ self.b = x0[1]
315
+
316
+ def __iter__(self):
317
+ f = self.f
318
+ a = self.a
319
+ b = self.b
320
+ l = b - a
321
+ fb = f(b)
322
+ while True:
323
+ m = self.ctx.ldexp(a + b, -1)
324
+ fm = f(m)
325
+ sign = fm * fb
326
+ if sign < 0:
327
+ a = m
328
+ elif sign > 0:
329
+ b = m
330
+ fb = fm
331
+ else:
332
+ yield m, self.ctx.zero
333
+ l /= 2
334
+ yield (a + b)/2, abs(l)
335
+
336
+ def _getm(method):
337
+ """
338
+ Return a function to calculate m for Illinois-like methods.
339
+ """
340
+ if method == 'illinois':
341
+ def getm(fz, fb):
342
+ return 0.5
343
+ elif method == 'pegasus':
344
+ def getm(fz, fb):
345
+ return fb/(fb + fz)
346
+ elif method == 'anderson':
347
+ def getm(fz, fb):
348
+ m = 1 - fz/fb
349
+ if m > 0:
350
+ return m
351
+ else:
352
+ return 0.5
353
+ else:
354
+ raise ValueError("method '%s' not recognized" % method)
355
+ return getm
356
+
357
+ class Illinois:
358
+ """
359
+ 1d-solver generating pairs of approximative root and error.
360
+
361
+ Uses Illinois method or similar to find a root of f in [a, b].
362
+ Might fail for multiple roots (needs sign change).
363
+ Combines bisect with secant (improved regula falsi).
364
+
365
+ The only difference between the methods is the scaling factor m, which is
366
+ used to ensure convergence (you can choose one using the 'method' keyword):
367
+
368
+ Illinois method ('illinois'):
369
+ m = 0.5
370
+
371
+ Pegasus method ('pegasus'):
372
+ m = fb/(fb + fz)
373
+
374
+ Anderson-Bjoerk method ('anderson'):
375
+ m = 1 - fz/fb if positive else 0.5
376
+
377
+ Pro:
378
+
379
+ * converges very fast
380
+
381
+ Contra:
382
+
383
+ * has problems with multiple roots
384
+ * needs sign change
385
+ """
386
+ maxsteps = 30
387
+
388
+ def __init__(self, ctx, f, x0, **kwargs):
389
+ self.ctx = ctx
390
+ if len(x0) != 2:
391
+ raise ValueError('expected interval of 2 points, got %i' % len(x0))
392
+ self.a = x0[0]
393
+ self.b = x0[1]
394
+ self.f = f
395
+ self.tol = kwargs['tol']
396
+ self.verbose = kwargs['verbose']
397
+ self.method = kwargs.get('method', 'illinois')
398
+ self.getm = _getm(self.method)
399
+ if self.verbose:
400
+ print('using %s method' % self.method)
401
+
402
+ def __iter__(self):
403
+ method = self.method
404
+ f = self.f
405
+ a = self.a
406
+ b = self.b
407
+ fa = f(a)
408
+ fb = f(b)
409
+ m = None
410
+ while True:
411
+ l = b - a
412
+ if l == 0:
413
+ break
414
+ s = (fb - fa) / l
415
+ z = a - fa/s
416
+ fz = f(z)
417
+ if abs(fz) < self.tol:
418
+ # TODO: better condition (when f is very flat)
419
+ if self.verbose:
420
+ print('canceled with z =', z)
421
+ yield z, l
422
+ break
423
+ if fz * fb < 0: # root in [z, b]
424
+ a = b
425
+ fa = fb
426
+ b = z
427
+ fb = fz
428
+ else: # root in [a, z]
429
+ m = self.getm(fz, fb)
430
+ b = z
431
+ fb = fz
432
+ fa = m*fa # scale down to ensure convergence
433
+ if self.verbose and m and not method == 'illinois':
434
+ print('m:', m)
435
+ yield (a + b)/2, abs(l)
436
+
437
+ def Pegasus(*args, **kwargs):
438
+ """
439
+ 1d-solver generating pairs of approximative root and error.
440
+
441
+ Uses Pegasus method to find a root of f in [a, b].
442
+ Wrapper for illinois to use method='pegasus'.
443
+ """
444
+ kwargs['method'] = 'pegasus'
445
+ return Illinois(*args, **kwargs)
446
+
447
+ def Anderson(*args, **kwargs):
448
+ """
449
+ 1d-solver generating pairs of approximative root and error.
450
+
451
+ Uses Anderson-Bjoerk method to find a root of f in [a, b].
452
+ Wrapper for illinois to use method='pegasus'.
453
+ """
454
+ kwargs['method'] = 'anderson'
455
+ return Illinois(*args, **kwargs)
456
+
457
+ # TODO: check whether it's possible to combine it with Illinois stuff
458
+ class Ridder:
459
+ """
460
+ 1d-solver generating pairs of approximative root and error.
461
+
462
+ Ridders' method to find a root of f in [a, b].
463
+ Is told to perform as well as Brent's method while being simpler.
464
+
465
+ Pro:
466
+
467
+ * very fast
468
+ * simpler than Brent's method
469
+
470
+ Contra:
471
+
472
+ * two function evaluations per step
473
+ * has problems with multiple roots
474
+ * needs sign change
475
+
476
+ http://en.wikipedia.org/wiki/Ridders'_method
477
+ """
478
+ maxsteps = 30
479
+
480
+ def __init__(self, ctx, f, x0, **kwargs):
481
+ self.ctx = ctx
482
+ self.f = f
483
+ if len(x0) != 2:
484
+ raise ValueError('expected interval of 2 points, got %i' % len(x0))
485
+ self.x1 = x0[0]
486
+ self.x2 = x0[1]
487
+ self.verbose = kwargs['verbose']
488
+ self.tol = kwargs['tol']
489
+
490
+ def __iter__(self):
491
+ ctx = self.ctx
492
+ f = self.f
493
+ x1 = self.x1
494
+ fx1 = f(x1)
495
+ x2 = self.x2
496
+ fx2 = f(x2)
497
+ while True:
498
+ x3 = 0.5*(x1 + x2)
499
+ fx3 = f(x3)
500
+ x4 = x3 + (x3 - x1) * ctx.sign(fx1 - fx2) * fx3 / ctx.sqrt(fx3**2 - fx1*fx2)
501
+ fx4 = f(x4)
502
+ if abs(fx4) < self.tol:
503
+ # TODO: better condition (when f is very flat)
504
+ if self.verbose:
505
+ print('canceled with f(x4) =', fx4)
506
+ yield x4, abs(x1 - x2)
507
+ break
508
+ if fx4 * fx2 < 0: # root in [x4, x2]
509
+ x1 = x4
510
+ fx1 = fx4
511
+ else: # root in [x1, x4]
512
+ x2 = x4
513
+ fx2 = fx4
514
+ error = abs(x1 - x2)
515
+ yield (x1 + x2)/2, error
516
+
517
+ class ANewton:
518
+ """
519
+ EXPERIMENTAL 1d-solver generating pairs of approximative root and error.
520
+
521
+ Uses Newton's method modified to use Steffensens method when convergence is
522
+ slow. (I.e. for multiple roots.)
523
+ """
524
+ maxsteps = 20
525
+
526
+ def __init__(self, ctx, f, x0, **kwargs):
527
+ self.ctx = ctx
528
+ if not len(x0) == 1:
529
+ raise ValueError('expected 1 starting point, got %i' % len(x0))
530
+ self.x0 = x0[0]
531
+ self.f = f
532
+ if not 'df' in kwargs:
533
+ def df(x):
534
+ return self.ctx.diff(f, x)
535
+ else:
536
+ df = kwargs['df']
537
+ self.df = df
538
+ def phi(x):
539
+ return x - f(x) / df(x)
540
+ self.phi = phi
541
+ self.verbose = kwargs['verbose']
542
+
543
+ def __iter__(self):
544
+ x0 = self.x0
545
+ f = self.f
546
+ df = self.df
547
+ phi = self.phi
548
+ error = 0
549
+ counter = 0
550
+ while True:
551
+ prevx = x0
552
+ try:
553
+ x0 = phi(x0)
554
+ except ZeroDivisionError:
555
+ if self.verbose:
556
+ print('ZeroDivisionError: canceled with x =', x0)
557
+ break
558
+ preverror = error
559
+ error = abs(prevx - x0)
560
+ # TODO: decide not to use convergence acceleration
561
+ if error and abs(error - preverror) / error < 1:
562
+ if self.verbose:
563
+ print('converging slowly')
564
+ counter += 1
565
+ if counter >= 3:
566
+ # accelerate convergence
567
+ phi = steffensen(phi)
568
+ counter = 0
569
+ if self.verbose:
570
+ print('accelerating convergence')
571
+ yield x0, error
572
+
573
+ # TODO: add Brent
574
+
575
+ ############################
576
+ # MULTIDIMENSIONAL SOLVERS #
577
+ ############################
578
+
579
+ def jacobian(ctx, f, x):
580
+ """
581
+ Calculate the Jacobian matrix of a function at the point x0.
582
+
583
+ This is the first derivative of a vectorial function:
584
+
585
+ f : R^m -> R^n with m >= n
586
+ """
587
+ x = ctx.matrix(x)
588
+ h = ctx.sqrt(ctx.eps)
589
+ fx = ctx.matrix(f(*x))
590
+ m = len(fx)
591
+ n = len(x)
592
+ J = ctx.matrix(m, n)
593
+ for j in xrange(n):
594
+ xj = x.copy()
595
+ xj[j] += h
596
+ Jj = (ctx.matrix(f(*xj)) - fx) / h
597
+ for i in xrange(m):
598
+ J[i,j] = Jj[i]
599
+ return J
600
+
601
+ # TODO: test with user-specified jacobian matrix
602
+ class MDNewton:
603
+ """
604
+ Find the root of a vector function numerically using Newton's method.
605
+
606
+ f is a vector function representing a nonlinear equation system.
607
+
608
+ x0 is the starting point close to the root.
609
+
610
+ J is a function returning the Jacobian matrix for a point.
611
+
612
+ Supports overdetermined systems.
613
+
614
+ Use the 'norm' keyword to specify which norm to use. Defaults to max-norm.
615
+ The function to calculate the Jacobian matrix can be given using the
616
+ keyword 'J'. Otherwise it will be calculated numerically.
617
+
618
+ Please note that this method converges only locally. Especially for high-
619
+ dimensional systems it is not trivial to find a good starting point being
620
+ close enough to the root.
621
+
622
+ It is recommended to use a faster, low-precision solver from SciPy [1] or
623
+ OpenOpt [2] to get an initial guess. Afterwards you can use this method for
624
+ root-polishing to any precision.
625
+
626
+ [1] http://scipy.org
627
+
628
+ [2] http://openopt.org/Welcome
629
+ """
630
+ maxsteps = 10
631
+
632
+ def __init__(self, ctx, f, x0, **kwargs):
633
+ self.ctx = ctx
634
+ self.f = f
635
+ if isinstance(x0, (tuple, list)):
636
+ x0 = ctx.matrix(x0)
637
+ assert x0.cols == 1, 'need a vector'
638
+ self.x0 = x0
639
+ if 'J' in kwargs:
640
+ self.J = kwargs['J']
641
+ else:
642
+ def J(*x):
643
+ return ctx.jacobian(f, x)
644
+ self.J = J
645
+ self.norm = kwargs['norm']
646
+ self.verbose = kwargs['verbose']
647
+
648
+ def __iter__(self):
649
+ f = self.f
650
+ x0 = self.x0
651
+ norm = self.norm
652
+ J = self.J
653
+ fx = self.ctx.matrix(f(*x0))
654
+ fxnorm = norm(fx)
655
+ cancel = False
656
+ while not cancel:
657
+ # get direction of descent
658
+ fxn = -fx
659
+ Jx = J(*x0)
660
+ s = self.ctx.lu_solve(Jx, fxn)
661
+ if self.verbose:
662
+ print('Jx:')
663
+ print(Jx)
664
+ print('s:', s)
665
+ # damping step size TODO: better strategy (hard task)
666
+ l = self.ctx.one
667
+ x1 = x0 + s
668
+ while True:
669
+ if x1 == x0:
670
+ if self.verbose:
671
+ print("canceled, won't get more excact")
672
+ cancel = True
673
+ break
674
+ fx = self.ctx.matrix(f(*x1))
675
+ newnorm = norm(fx)
676
+ if newnorm < fxnorm:
677
+ # new x accepted
678
+ fxnorm = newnorm
679
+ x0 = x1
680
+ break
681
+ l /= 2
682
+ x1 = x0 + l*s
683
+ yield (x0, fxnorm)
684
+
685
+ #############
686
+ # UTILITIES #
687
+ #############
688
+
689
+ str2solver = {'newton':Newton, 'secant':Secant, 'mnewton':MNewton,
690
+ 'halley':Halley, 'muller':Muller, 'bisect':Bisection,
691
+ 'illinois':Illinois, 'pegasus':Pegasus, 'anderson':Anderson,
692
+ 'ridder':Ridder, 'anewton':ANewton, 'mdnewton':MDNewton}
693
+
694
+ def findroot(ctx, f, x0, solver='secant', tol=None, verbose=False, verify=True, **kwargs):
695
+ r"""
696
+ Find an approximate solution to `f(x) = 0`, using *x0* as starting point or
697
+ interval for *x*.
698
+
699
+ Multidimensional overdetermined systems are supported.
700
+ You can specify them using a function or a list of functions.
701
+
702
+ Mathematically speaking, this function returns `x` such that
703
+ `|f(x)|^2 \leq \mathrm{tol}` is true within the current working precision.
704
+ If the computed value does not meet this criterion, an exception is raised.
705
+ This exception can be disabled with *verify=False*.
706
+
707
+ For interval arithmetic (``iv.findroot()``), please note that
708
+ the returned interval ``x`` is not guaranteed to contain `f(x)=0`!
709
+ It is only some `x` for which `|f(x)|^2 \leq \mathrm{tol}` certainly holds
710
+ regardless of numerical error. This may be improved in the future.
711
+
712
+ **Arguments**
713
+
714
+ *f*
715
+ one dimensional function
716
+ *x0*
717
+ starting point, several starting points or interval (depends on solver)
718
+ *tol*
719
+ the returned solution has an error smaller than this
720
+ *verbose*
721
+ print additional information for each iteration if true
722
+ *verify*
723
+ verify the solution and raise a ValueError if `|f(x)|^2 > \mathrm{tol}`
724
+ *solver*
725
+ a generator for *f* and *x0* returning approximative solution and error
726
+ *maxsteps*
727
+ after how many steps the solver will cancel
728
+ *df*
729
+ first derivative of *f* (used by some solvers)
730
+ *d2f*
731
+ second derivative of *f* (used by some solvers)
732
+ *multidimensional*
733
+ force multidimensional solving
734
+ *J*
735
+ Jacobian matrix of *f* (used by multidimensional solvers)
736
+ *norm*
737
+ used vector norm (used by multidimensional solvers)
738
+
739
+ solver has to be callable with ``(f, x0, **kwargs)`` and return an generator
740
+ yielding pairs of approximative solution and estimated error (which is
741
+ expected to be positive).
742
+ You can use the following string aliases:
743
+ 'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson',
744
+ 'ridder', 'anewton', 'bisect'
745
+
746
+ See mpmath.calculus.optimization for their documentation.
747
+
748
+ **Examples**
749
+
750
+ The function :func:`~mpmath.findroot` locates a root of a given function using the
751
+ secant method by default. A simple example use of the secant method is to
752
+ compute `\pi` as the root of `\sin x` closest to `x_0 = 3`::
753
+
754
+ >>> from mpmath import *
755
+ >>> mp.dps = 30; mp.pretty = True
756
+ >>> findroot(sin, 3)
757
+ 3.14159265358979323846264338328
758
+
759
+ The secant method can be used to find complex roots of analytic functions,
760
+ although it must in that case generally be given a nonreal starting value
761
+ (or else it will never leave the real line)::
762
+
763
+ >>> mp.dps = 15
764
+ >>> findroot(lambda x: x**3 + 2*x + 1, j)
765
+ (0.226698825758202 + 1.46771150871022j)
766
+
767
+ A nice application is to compute nontrivial roots of the Riemann zeta
768
+ function with many digits (good initial values are needed for convergence)::
769
+
770
+ >>> mp.dps = 30
771
+ >>> findroot(zeta, 0.5+14j)
772
+ (0.5 + 14.1347251417346937904572519836j)
773
+
774
+ The secant method can also be used as an optimization algorithm, by passing
775
+ it a derivative of a function. The following example locates the positive
776
+ minimum of the gamma function::
777
+
778
+ >>> mp.dps = 20
779
+ >>> findroot(lambda x: diff(gamma, x), 1)
780
+ 1.4616321449683623413
781
+
782
+ Finally, a useful application is to compute inverse functions, such as the
783
+ Lambert W function which is the inverse of `w e^w`, given the first
784
+ term of the solution's asymptotic expansion as the initial value. In basic
785
+ cases, this gives identical results to mpmath's built-in ``lambertw``
786
+ function::
787
+
788
+ >>> def lambert(x):
789
+ ... return findroot(lambda w: w*exp(w) - x, log(1+x))
790
+ ...
791
+ >>> mp.dps = 15
792
+ >>> lambert(1); lambertw(1)
793
+ 0.567143290409784
794
+ 0.567143290409784
795
+ >>> lambert(1000); lambert(1000)
796
+ 5.2496028524016
797
+ 5.2496028524016
798
+
799
+ Multidimensional functions are also supported::
800
+
801
+ >>> f = [lambda x1, x2: x1**2 + x2,
802
+ ... lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3]
803
+ >>> findroot(f, (0, 0))
804
+ [-0.618033988749895]
805
+ [-0.381966011250105]
806
+ >>> findroot(f, (10, 10))
807
+ [ 1.61803398874989]
808
+ [-2.61803398874989]
809
+
810
+ You can verify this by solving the system manually.
811
+
812
+ Please note that the following (more general) syntax also works::
813
+
814
+ >>> def f(x1, x2):
815
+ ... return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3
816
+ ...
817
+ >>> findroot(f, (0, 0))
818
+ [-0.618033988749895]
819
+ [-0.381966011250105]
820
+
821
+
822
+ **Multiple roots**
823
+
824
+ For multiple roots all methods of the Newtonian family (including secant)
825
+ converge slowly. Consider this example::
826
+
827
+ >>> f = lambda x: (x - 1)**99
828
+ >>> findroot(f, 0.9, verify=False)
829
+ 0.918073542444929
830
+
831
+ Even for a very close starting point the secant method converges very
832
+ slowly. Use ``verbose=True`` to illustrate this.
833
+
834
+ It is possible to modify Newton's method to make it converge regardless of
835
+ the root's multiplicity::
836
+
837
+ >>> findroot(f, -10, solver='mnewton')
838
+ 1.0
839
+
840
+ This variant uses the first and second derivative of the function, which is
841
+ not very efficient.
842
+
843
+ Alternatively you can use an experimental Newtonian solver that keeps track
844
+ of the speed of convergence and accelerates it using Steffensen's method if
845
+ necessary::
846
+
847
+ >>> findroot(f, -10, solver='anewton', verbose=True)
848
+ x: -9.88888888888888888889
849
+ error: 0.111111111111111111111
850
+ converging slowly
851
+ x: -9.77890011223344556678
852
+ error: 0.10998877665544332211
853
+ converging slowly
854
+ x: -9.67002233332199662166
855
+ error: 0.108877778911448945119
856
+ converging slowly
857
+ accelerating convergence
858
+ x: -9.5622443299551077669
859
+ error: 0.107778003366888854764
860
+ converging slowly
861
+ x: 0.99999999999999999214
862
+ error: 10.562244329955107759
863
+ x: 1.0
864
+ error: 7.8598304758094664213e-18
865
+ ZeroDivisionError: canceled with x = 1.0
866
+ 1.0
867
+
868
+ **Complex roots**
869
+
870
+ For complex roots it's recommended to use Muller's method as it converges
871
+ even for real starting points very fast::
872
+
873
+ >>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller')
874
+ (0.727136084491197 + 0.934099289460529j)
875
+
876
+
877
+ **Intersection methods**
878
+
879
+ When you need to find a root in a known interval, it's highly recommended to
880
+ use an intersection-based solver like ``'anderson'`` or ``'ridder'``.
881
+ Usually they converge faster and more reliable. They have however problems
882
+ with multiple roots and usually need a sign change to find a root::
883
+
884
+ >>> findroot(lambda x: x**3, (-1, 1), solver='anderson')
885
+ 0.0
886
+
887
+ Be careful with symmetric functions::
888
+
889
+ >>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS
890
+ Traceback (most recent call last):
891
+ ...
892
+ ZeroDivisionError
893
+
894
+ It fails even for better starting points, because there is no sign change::
895
+
896
+ >>> findroot(lambda x: x**2, (-1, .5), solver='anderson')
897
+ Traceback (most recent call last):
898
+ ...
899
+ ValueError: Could not find root within given tolerance. (1.0 > 2.16840434497100886801e-19)
900
+ Try another starting point or tweak arguments.
901
+
902
+ """
903
+ prec = ctx.prec
904
+ try:
905
+ ctx.prec += 20
906
+
907
+ # initialize arguments
908
+ if tol is None:
909
+ tol = ctx.eps * 2**10
910
+
911
+ kwargs['verbose'] = kwargs.get('verbose', verbose)
912
+
913
+ if 'd1f' in kwargs:
914
+ kwargs['df'] = kwargs['d1f']
915
+
916
+ kwargs['tol'] = tol
917
+ if isinstance(x0, (list, tuple)):
918
+ x0 = [ctx.convert(x) for x in x0]
919
+ else:
920
+ x0 = [ctx.convert(x0)]
921
+
922
+ if isinstance(solver, str):
923
+ try:
924
+ solver = str2solver[solver]
925
+ except KeyError:
926
+ raise ValueError('could not recognize solver')
927
+
928
+ # accept list of functions
929
+ if isinstance(f, (list, tuple)):
930
+ f2 = copy(f)
931
+ def tmp(*args):
932
+ return [fn(*args) for fn in f2]
933
+ f = tmp
934
+
935
+ # detect multidimensional functions
936
+ try:
937
+ fx = f(*x0)
938
+ multidimensional = isinstance(fx, (list, tuple, ctx.matrix))
939
+ except TypeError:
940
+ fx = f(x0[0])
941
+ multidimensional = False
942
+ if 'multidimensional' in kwargs:
943
+ multidimensional = kwargs['multidimensional']
944
+ if multidimensional:
945
+ # only one multidimensional solver available at the moment
946
+ solver = MDNewton
947
+ if not 'norm' in kwargs:
948
+ norm = lambda x: ctx.norm(x, 'inf')
949
+ kwargs['norm'] = norm
950
+ else:
951
+ norm = kwargs['norm']
952
+ else:
953
+ norm = abs
954
+
955
+ # happily return starting point if it's a root
956
+ if norm(fx) == 0:
957
+ if multidimensional:
958
+ return ctx.matrix(x0)
959
+ else:
960
+ return x0[0]
961
+
962
+ # use solver
963
+ iterations = solver(ctx, f, x0, **kwargs)
964
+ if 'maxsteps' in kwargs:
965
+ maxsteps = kwargs['maxsteps']
966
+ else:
967
+ maxsteps = iterations.maxsteps
968
+ i = 0
969
+ for x, error in iterations:
970
+ if verbose:
971
+ print('x: ', x)
972
+ print('error:', error)
973
+ i += 1
974
+ if error < tol * max(1, norm(x)) or i >= maxsteps:
975
+ break
976
+ else:
977
+ if not i:
978
+ raise ValueError('Could not find root using the given solver.\n'
979
+ 'Try another starting point or tweak arguments.')
980
+ if not isinstance(x, (list, tuple, ctx.matrix)):
981
+ xl = [x]
982
+ else:
983
+ xl = x
984
+ if verify and norm(f(*xl))**2 > tol: # TODO: better condition?
985
+ raise ValueError('Could not find root within given tolerance. '
986
+ '(%s > %s)\n'
987
+ 'Try another starting point or tweak arguments.'
988
+ % (norm(f(*xl))**2, tol))
989
+ return x
990
+ finally:
991
+ ctx.prec = prec
992
+
993
+
994
+ def multiplicity(ctx, f, root, tol=None, maxsteps=10, **kwargs):
995
+ """
996
+ Return the multiplicity of a given root of f.
997
+
998
+ Internally, numerical derivatives are used. This might be inefficient for
999
+ higher order derviatives. Due to this, ``multiplicity`` cancels after
1000
+ evaluating 10 derivatives by default. You can be specify the n-th derivative
1001
+ using the dnf keyword.
1002
+
1003
+ >>> from mpmath import *
1004
+ >>> multiplicity(lambda x: sin(x) - 1, pi/2)
1005
+ 2
1006
+
1007
+ """
1008
+ if tol is None:
1009
+ tol = ctx.eps ** 0.8
1010
+ kwargs['d0f'] = f
1011
+ for i in xrange(maxsteps):
1012
+ dfstr = 'd' + str(i) + 'f'
1013
+ if dfstr in kwargs:
1014
+ df = kwargs[dfstr]
1015
+ else:
1016
+ df = lambda x: ctx.diff(f, x, i)
1017
+ if not abs(df(root)) < tol:
1018
+ break
1019
+ return i
1020
+
1021
+ def steffensen(f):
1022
+ """
1023
+ linear convergent function -> quadratic convergent function
1024
+
1025
+ Steffensen's method for quadratic convergence of a linear converging
1026
+ sequence.
1027
+ Don not use it for higher rates of convergence.
1028
+ It may even work for divergent sequences.
1029
+
1030
+ Definition:
1031
+ F(x) = (x*f(f(x)) - f(x)**2) / (f(f(x)) - 2*f(x) + x)
1032
+
1033
+ Example
1034
+ .......
1035
+
1036
+ You can use Steffensen's method to accelerate a fixpoint iteration of linear
1037
+ (or less) convergence.
1038
+
1039
+ x* is a fixpoint of the iteration x_{k+1} = phi(x_k) if x* = phi(x*). For
1040
+ phi(x) = x**2 there are two fixpoints: 0 and 1.
1041
+
1042
+ Let's try Steffensen's method:
1043
+
1044
+ >>> f = lambda x: x**2
1045
+ >>> from mpmath.calculus.optimization import steffensen
1046
+ >>> F = steffensen(f)
1047
+ >>> for x in [0.5, 0.9, 2.0]:
1048
+ ... fx = Fx = x
1049
+ ... for i in xrange(9):
1050
+ ... try:
1051
+ ... fx = f(fx)
1052
+ ... except OverflowError:
1053
+ ... pass
1054
+ ... try:
1055
+ ... Fx = F(Fx)
1056
+ ... except ZeroDivisionError:
1057
+ ... pass
1058
+ ... print('%20g %20g' % (fx, Fx))
1059
+ 0.25 -0.5
1060
+ 0.0625 0.1
1061
+ 0.00390625 -0.0011236
1062
+ 1.52588e-05 1.41691e-09
1063
+ 2.32831e-10 -2.84465e-27
1064
+ 5.42101e-20 2.30189e-80
1065
+ 2.93874e-39 -1.2197e-239
1066
+ 8.63617e-78 0
1067
+ 7.45834e-155 0
1068
+ 0.81 1.02676
1069
+ 0.6561 1.00134
1070
+ 0.430467 1
1071
+ 0.185302 1
1072
+ 0.0343368 1
1073
+ 0.00117902 1
1074
+ 1.39008e-06 1
1075
+ 1.93233e-12 1
1076
+ 3.73392e-24 1
1077
+ 4 1.6
1078
+ 16 1.2962
1079
+ 256 1.10194
1080
+ 65536 1.01659
1081
+ 4.29497e+09 1.00053
1082
+ 1.84467e+19 1
1083
+ 3.40282e+38 1
1084
+ 1.15792e+77 1
1085
+ 1.34078e+154 1
1086
+
1087
+ Unmodified, the iteration converges only towards 0. Modified it converges
1088
+ not only much faster, it converges even to the repelling fixpoint 1.
1089
+ """
1090
+ def F(x):
1091
+ fx = f(x)
1092
+ ffx = f(fx)
1093
+ return (x*ffx - fx**2) / (ffx - 2*fx + x)
1094
+ return F
1095
+
1096
+ OptimizationMethods.jacobian = jacobian
1097
+ OptimizationMethods.findroot = findroot
1098
+ OptimizationMethods.multiplicity = multiplicity
1099
+
1100
+ if __name__ == '__main__':
1101
+ import doctest
1102
+ doctest.testmod()
lib/python3.11/site-packages/mpmath/calculus/polynomials.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+ from .calculus import defun
3
+
4
+ #----------------------------------------------------------------------------#
5
+ # Polynomials #
6
+ #----------------------------------------------------------------------------#
7
+
8
+ # XXX: extra precision
9
+ @defun
10
+ def polyval(ctx, coeffs, x, derivative=False):
11
+ r"""
12
+ Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
13
+ :func:`~mpmath.polyval` evaluates the polynomial
14
+
15
+ .. math ::
16
+
17
+ P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
18
+
19
+ If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
20
+ evaluates `P(x)` with the derivative, `P'(x)`, and returns the
21
+ tuple `(P(x), P'(x))`.
22
+
23
+ >>> from mpmath import *
24
+ >>> mp.pretty = True
25
+ >>> polyval([3, 0, 2], 0.5)
26
+ 2.75
27
+ >>> polyval([3, 0, 2], 0.5, derivative=True)
28
+ (2.75, 3.0)
29
+
30
+ The coefficients and the evaluation point may be any combination
31
+ of real or complex numbers.
32
+ """
33
+ if not coeffs:
34
+ return ctx.zero
35
+ p = ctx.convert(coeffs[0])
36
+ q = ctx.zero
37
+ for c in coeffs[1:]:
38
+ if derivative:
39
+ q = p + x*q
40
+ p = c + x*p
41
+ if derivative:
42
+ return p, q
43
+ else:
44
+ return p
45
+
46
+ @defun
47
+ def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10,
48
+ error=False, roots_init=None):
49
+ """
50
+ Computes all roots (real or complex) of a given polynomial.
51
+
52
+ The roots are returned as a sorted list, where real roots appear first
53
+ followed by complex conjugate roots as adjacent elements. The polynomial
54
+ should be given as a list of coefficients, in the format used by
55
+ :func:`~mpmath.polyval`. The leading coefficient must be nonzero.
56
+
57
+ With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)*
58
+ where *err* is an estimate of the maximum error among the computed roots.
59
+
60
+ **Examples**
61
+
62
+ Finding the three real roots of `x^3 - x^2 - 14x + 24`::
63
+
64
+ >>> from mpmath import *
65
+ >>> mp.dps = 15; mp.pretty = True
66
+ >>> nprint(polyroots([1,-1,-14,24]), 4)
67
+ [-4.0, 2.0, 3.0]
68
+
69
+ Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
70
+ error estimate::
71
+
72
+ >>> roots, err = polyroots([4,3,2], error=True)
73
+ >>> for r in roots:
74
+ ... print(r)
75
+ ...
76
+ (-0.375 + 0.59947894041409j)
77
+ (-0.375 - 0.59947894041409j)
78
+ >>>
79
+ >>> err
80
+ 2.22044604925031e-16
81
+ >>>
82
+ >>> polyval([4,3,2], roots[0])
83
+ (2.22044604925031e-16 + 0.0j)
84
+ >>> polyval([4,3,2], roots[1])
85
+ (2.22044604925031e-16 + 0.0j)
86
+
87
+ The following example computes all the 5th roots of unity; that is,
88
+ the roots of `x^5 - 1`::
89
+
90
+ >>> mp.dps = 20
91
+ >>> for r in polyroots([1, 0, 0, 0, 0, -1]):
92
+ ... print(r)
93
+ ...
94
+ 1.0
95
+ (-0.8090169943749474241 + 0.58778525229247312917j)
96
+ (-0.8090169943749474241 - 0.58778525229247312917j)
97
+ (0.3090169943749474241 + 0.95105651629515357212j)
98
+ (0.3090169943749474241 - 0.95105651629515357212j)
99
+
100
+ **Precision and conditioning**
101
+
102
+ The roots are computed to the current working precision accuracy. If this
103
+ accuracy cannot be achieved in ``maxsteps`` steps, then a
104
+ ``NoConvergence`` exception is raised. The algorithm internally is using
105
+ the current working precision extended by ``extraprec``. If
106
+ ``NoConvergence`` was raised, that is caused either by not having enough
107
+ extra precision to achieve convergence (in which case increasing
108
+ ``extraprec`` should fix the problem) or too low ``maxsteps`` (in which
109
+ case increasing ``maxsteps`` should fix the problem), or a combination of
110
+ both.
111
+
112
+ The user should always do a convergence study with regards to
113
+ ``extraprec`` to ensure accurate results. It is possible to get
114
+ convergence to a wrong answer with too low ``extraprec``.
115
+
116
+ Provided there are no repeated roots, :func:`~mpmath.polyroots` can
117
+ typically compute all roots of an arbitrary polynomial to high precision::
118
+
119
+ >>> mp.dps = 60
120
+ >>> for r in polyroots([1, 0, -10, 0, 1]):
121
+ ... print(r)
122
+ ...
123
+ -3.14626436994197234232913506571557044551247712918732870123249
124
+ -0.317837245195782244725757617296174288373133378433432554879127
125
+ 0.317837245195782244725757617296174288373133378433432554879127
126
+ 3.14626436994197234232913506571557044551247712918732870123249
127
+ >>>
128
+ >>> sqrt(3) + sqrt(2)
129
+ 3.14626436994197234232913506571557044551247712918732870123249
130
+ >>> sqrt(3) - sqrt(2)
131
+ 0.317837245195782244725757617296174288373133378433432554879127
132
+
133
+ **Algorithm**
134
+
135
+ :func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
136
+ uses complex arithmetic to locate all roots simultaneously.
137
+ The Durand-Kerner method can be viewed as approximately performing
138
+ simultaneous Newton iteration for all the roots. In particular,
139
+ the convergence to simple roots is quadratic, just like Newton's
140
+ method.
141
+
142
+ Although all roots are internally calculated using complex arithmetic, any
143
+ root found to have an imaginary part smaller than the estimated numerical
144
+ error is truncated to a real number (small real parts are also chopped).
145
+ Real roots are placed first in the returned list, sorted by value. The
146
+ remaining complex roots are sorted by their real parts so that conjugate
147
+ roots end up next to each other.
148
+
149
+ **References**
150
+
151
+ 1. http://en.wikipedia.org/wiki/Durand-Kerner_method
152
+
153
+ """
154
+ if len(coeffs) <= 1:
155
+ if not coeffs or not coeffs[0]:
156
+ raise ValueError("Input to polyroots must not be the zero polynomial")
157
+ # Constant polynomial with no roots
158
+ return []
159
+
160
+ orig = ctx.prec
161
+ tol = +ctx.eps
162
+ with ctx.extraprec(extraprec):
163
+ deg = len(coeffs) - 1
164
+ # Must be monic
165
+ lead = ctx.convert(coeffs[0])
166
+ if lead == 1:
167
+ coeffs = [ctx.convert(c) for c in coeffs]
168
+ else:
169
+ coeffs = [c/lead for c in coeffs]
170
+ f = lambda x: ctx.polyval(coeffs, x)
171
+ if roots_init is None:
172
+ roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
173
+ else:
174
+ roots = [None]*deg;
175
+ deg_init = min(deg, len(roots_init))
176
+ roots[:deg_init] = list(roots_init[:deg_init])
177
+ roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n
178
+ in xrange(deg_init,deg)]
179
+ err = [ctx.one for n in xrange(deg)]
180
+ # Durand-Kerner iteration until convergence
181
+ for step in xrange(maxsteps):
182
+ if abs(max(err)) < tol:
183
+ break
184
+ for i in xrange(deg):
185
+ p = roots[i]
186
+ x = f(p)
187
+ for j in range(deg):
188
+ if i != j:
189
+ try:
190
+ x /= (p-roots[j])
191
+ except ZeroDivisionError:
192
+ continue
193
+ roots[i] = p - x
194
+ err[i] = abs(x)
195
+ if abs(max(err)) >= tol:
196
+ raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \
197
+ % maxsteps)
198
+ # Remove small real or imaginary parts
199
+ if cleanup:
200
+ for i in xrange(deg):
201
+ if abs(roots[i]) < tol:
202
+ roots[i] = ctx.zero
203
+ elif abs(ctx._im(roots[i])) < tol:
204
+ roots[i] = roots[i].real
205
+ elif abs(ctx._re(roots[i])) < tol:
206
+ roots[i] = roots[i].imag * 1j
207
+ roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
208
+ if error:
209
+ err = max(err)
210
+ err = max(err, ctx.ldexp(1, -orig+1))
211
+ return [+r for r in roots], +err
212
+ else:
213
+ return [+r for r in roots]
lib/python3.11/site-packages/mpmath/calculus/quadrature.py ADDED
@@ -0,0 +1,1115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ from ..libmp.backend import xrange
4
+
5
+ class QuadratureRule(object):
6
+ """
7
+ Quadrature rules are implemented using this class, in order to
8
+ simplify the code and provide a common infrastructure
9
+ for tasks such as error estimation and node caching.
10
+
11
+ You can implement a custom quadrature rule by subclassing
12
+ :class:`QuadratureRule` and implementing the appropriate
13
+ methods. The subclass can then be used by :func:`~mpmath.quad` by
14
+ passing it as the *method* argument.
15
+
16
+ :class:`QuadratureRule` instances are supposed to be singletons.
17
+ :class:`QuadratureRule` therefore implements instance caching
18
+ in :func:`~mpmath.__new__`.
19
+ """
20
+
21
+ def __init__(self, ctx):
22
+ self.ctx = ctx
23
+ self.standard_cache = {}
24
+ self.transformed_cache = {}
25
+ self.interval_count = {}
26
+
27
+ def clear(self):
28
+ """
29
+ Delete cached node data.
30
+ """
31
+ self.standard_cache = {}
32
+ self.transformed_cache = {}
33
+ self.interval_count = {}
34
+
35
+ def calc_nodes(self, degree, prec, verbose=False):
36
+ r"""
37
+ Compute nodes for the standard interval `[-1, 1]`. Subclasses
38
+ should probably implement only this method, and use
39
+ :func:`~mpmath.get_nodes` method to retrieve the nodes.
40
+ """
41
+ raise NotImplementedError
42
+
43
+ def get_nodes(self, a, b, degree, prec, verbose=False):
44
+ """
45
+ Return nodes for given interval, degree and precision. The
46
+ nodes are retrieved from a cache if already computed;
47
+ otherwise they are computed by calling :func:`~mpmath.calc_nodes`
48
+ and are then cached.
49
+
50
+ Subclasses should probably not implement this method,
51
+ but just implement :func:`~mpmath.calc_nodes` for the actual
52
+ node computation.
53
+ """
54
+ key = (a, b, degree, prec)
55
+ if key in self.transformed_cache:
56
+ return self.transformed_cache[key]
57
+ orig = self.ctx.prec
58
+ try:
59
+ self.ctx.prec = prec+20
60
+ # Get nodes on standard interval
61
+ if (degree, prec) in self.standard_cache:
62
+ nodes = self.standard_cache[degree, prec]
63
+ else:
64
+ nodes = self.calc_nodes(degree, prec, verbose)
65
+ self.standard_cache[degree, prec] = nodes
66
+ # Transform to general interval
67
+ nodes = self.transform_nodes(nodes, a, b, verbose)
68
+ if key in self.interval_count:
69
+ self.transformed_cache[key] = nodes
70
+ else:
71
+ self.interval_count[key] = True
72
+ finally:
73
+ self.ctx.prec = orig
74
+ return nodes
75
+
76
+ def transform_nodes(self, nodes, a, b, verbose=False):
77
+ r"""
78
+ Rescale standardized nodes (for `[-1, 1]`) to a general
79
+ interval `[a, b]`. For a finite interval, a simple linear
80
+ change of variables is used. Otherwise, the following
81
+ transformations are used:
82
+
83
+ .. math ::
84
+
85
+ \lbrack a, \infty \rbrack : t = \frac{1}{x} + (a-1)
86
+
87
+ \lbrack -\infty, b \rbrack : t = (b+1) - \frac{1}{x}
88
+
89
+ \lbrack -\infty, \infty \rbrack : t = \frac{x}{\sqrt{1-x^2}}
90
+
91
+ """
92
+ ctx = self.ctx
93
+ a = ctx.convert(a)
94
+ b = ctx.convert(b)
95
+ one = ctx.one
96
+ if (a, b) == (-one, one):
97
+ return nodes
98
+ half = ctx.mpf(0.5)
99
+ new_nodes = []
100
+ if ctx.isinf(a) or ctx.isinf(b):
101
+ if (a, b) == (ctx.ninf, ctx.inf):
102
+ p05 = -half
103
+ for x, w in nodes:
104
+ x2 = x*x
105
+ px1 = one-x2
106
+ spx1 = px1**p05
107
+ x = x*spx1
108
+ w *= spx1/px1
109
+ new_nodes.append((x, w))
110
+ elif a == ctx.ninf:
111
+ b1 = b+1
112
+ for x, w in nodes:
113
+ u = 2/(x+one)
114
+ x = b1-u
115
+ w *= half*u**2
116
+ new_nodes.append((x, w))
117
+ elif b == ctx.inf:
118
+ a1 = a-1
119
+ for x, w in nodes:
120
+ u = 2/(x+one)
121
+ x = a1+u
122
+ w *= half*u**2
123
+ new_nodes.append((x, w))
124
+ elif a == ctx.inf or b == ctx.ninf:
125
+ return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)]
126
+ else:
127
+ raise NotImplementedError
128
+ else:
129
+ # Simple linear change of variables
130
+ C = (b-a)/2
131
+ D = (b+a)/2
132
+ for x, w in nodes:
133
+ new_nodes.append((D+C*x, C*w))
134
+ return new_nodes
135
+
136
+ def guess_degree(self, prec):
137
+ """
138
+ Given a desired precision `p` in bits, estimate the degree `m`
139
+ of the quadrature required to accomplish full accuracy for
140
+ typical integrals. By default, :func:`~mpmath.quad` will perform up
141
+ to `m` iterations. The value of `m` should be a slight
142
+ overestimate, so that "slightly bad" integrals can be dealt
143
+ with automatically using a few extra iterations. On the
144
+ other hand, it should not be too big, so :func:`~mpmath.quad` can
145
+ quit within a reasonable amount of time when it is given
146
+ an "unsolvable" integral.
147
+
148
+ The default formula used by :func:`~mpmath.guess_degree` is tuned
149
+ for both :class:`TanhSinh` and :class:`GaussLegendre`.
150
+ The output is roughly as follows:
151
+
152
+ +---------+---------+
153
+ | `p` | `m` |
154
+ +=========+=========+
155
+ | 50 | 6 |
156
+ +---------+---------+
157
+ | 100 | 7 |
158
+ +---------+---------+
159
+ | 500 | 10 |
160
+ +---------+---------+
161
+ | 3000 | 12 |
162
+ +---------+---------+
163
+
164
+ This formula is based purely on a limited amount of
165
+ experimentation and will sometimes be wrong.
166
+ """
167
+ # Expected degree
168
+ # XXX: use mag
169
+ g = int(4 + max(0, self.ctx.log(prec/30.0, 2)))
170
+ # Reasonable "worst case"
171
+ g += 2
172
+ return g
173
+
174
+ def estimate_error(self, results, prec, epsilon):
175
+ r"""
176
+ Given results from integrations `[I_1, I_2, \ldots, I_k]` done
177
+ with a quadrature of rule of degree `1, 2, \ldots, k`, estimate
178
+ the error of `I_k`.
179
+
180
+ For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`.
181
+
182
+ For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|`
183
+ from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption
184
+ that each degree increment roughly doubles the accuracy of
185
+ the quadrature rule (this is true for both :class:`TanhSinh`
186
+ and :class:`GaussLegendre`). The extrapolation formula is given
187
+ by Borwein, Bailey & Girgensohn. Although not very conservative,
188
+ this method seems to be very robust in practice.
189
+ """
190
+ if len(results) == 2:
191
+ return abs(results[0]-results[1])
192
+ try:
193
+ if results[-1] == results[-2] == results[-3]:
194
+ return self.ctx.zero
195
+ D1 = self.ctx.log(abs(results[-1]-results[-2]), 10)
196
+ D2 = self.ctx.log(abs(results[-1]-results[-3]), 10)
197
+ except ValueError:
198
+ return epsilon
199
+ D3 = -prec
200
+ D4 = min(0, max(D1**2/D2, 2*D1, D3))
201
+ return self.ctx.mpf(10) ** int(D4)
202
+
203
+ def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
204
+ """
205
+ Main integration function. Computes the 1D integral over
206
+ the interval specified by *points*. For each subinterval,
207
+ performs quadrature of degree from 1 up to *max_degree*
208
+ until :func:`~mpmath.estimate_error` signals convergence.
209
+
210
+ :func:`~mpmath.summation` transforms each subintegration to
211
+ the standard interval and then calls :func:`~mpmath.sum_next`.
212
+ """
213
+ ctx = self.ctx
214
+ I = total_err = ctx.zero
215
+ for i in xrange(len(points)-1):
216
+ a, b = points[i], points[i+1]
217
+ if a == b:
218
+ continue
219
+ # XXX: we could use a single variable transformation,
220
+ # but this is not good in practice. We get better accuracy
221
+ # by having 0 as an endpoint.
222
+ if (a, b) == (ctx.ninf, ctx.inf):
223
+ _f = f
224
+ f = lambda x: _f(-x) + _f(x)
225
+ a, b = (ctx.zero, ctx.inf)
226
+ results = []
227
+ err = ctx.zero
228
+ for degree in xrange(1, max_degree+1):
229
+ nodes = self.get_nodes(a, b, degree, prec, verbose)
230
+ if verbose:
231
+ print("Integrating from %s to %s (degree %s of %s)" % \
232
+ (ctx.nstr(a), ctx.nstr(b), degree, max_degree))
233
+ result = self.sum_next(f, nodes, degree, prec, results, verbose)
234
+ results.append(result)
235
+ if degree > 1:
236
+ err = self.estimate_error(results, prec, epsilon)
237
+ if verbose:
238
+ print("Estimated error:", ctx.nstr(err), " epsilon:", ctx.nstr(epsilon), " result: ", ctx.nstr(result))
239
+ if err <= epsilon:
240
+ break
241
+ I += results[-1]
242
+ total_err += err
243
+ if total_err > epsilon:
244
+ if verbose:
245
+ print("Failed to reach full accuracy. Estimated error:", ctx.nstr(total_err))
246
+ return I, total_err
247
+
248
+ def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
249
+ r"""
250
+ Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list
251
+ contains the `(w_k, x_k)` pairs.
252
+
253
+ :func:`~mpmath.summation` will supply the list *results* of
254
+ values computed by :func:`~mpmath.sum_next` at previous degrees, in
255
+ case the quadrature rule is able to reuse them.
256
+ """
257
+ return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
258
+
259
+
260
+ class TanhSinh(QuadratureRule):
261
+ r"""
262
+ This class implements "tanh-sinh" or "doubly exponential"
263
+ quadrature. This quadrature rule is based on the Euler-Maclaurin
264
+ integral formula. By performing a change of variables involving
265
+ nested exponentials / hyperbolic functions (hence the name), the
266
+ derivatives at the endpoints vanish rapidly. Since the error term
267
+ in the Euler-Maclaurin formula depends on the derivatives at the
268
+ endpoints, a simple step sum becomes extremely accurate. In
269
+ practice, this means that doubling the number of evaluation
270
+ points roughly doubles the number of accurate digits.
271
+
272
+ Comparison to Gauss-Legendre:
273
+ * Initial computation of nodes is usually faster
274
+ * Handles endpoint singularities better
275
+ * Handles infinite integration intervals better
276
+ * Is slower for smooth integrands once nodes have been computed
277
+
278
+ The implementation of the tanh-sinh algorithm is based on the
279
+ description given in Borwein, Bailey & Girgensohn, "Experimentation
280
+ in Mathematics - Computational Paths to Discovery", A K Peters,
281
+ 2003, pages 312-313. In the present implementation, a few
282
+ improvements have been made:
283
+
284
+ * A more efficient scheme is used to compute nodes (exploiting
285
+ recurrence for the exponential function)
286
+ * The nodes are computed successively instead of all at once
287
+
288
+ **References**
289
+
290
+ * [Bailey]_
291
+ * http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf
292
+
293
+ """
294
+
295
+ def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
296
+ """
297
+ Step sum for tanh-sinh quadrature of degree `m`. We exploit the
298
+ fact that half of the abscissas at degree `m` are precisely the
299
+ abscissas from degree `m-1`. Thus reusing the result from
300
+ the previous level allows a 2x speedup.
301
+ """
302
+ h = self.ctx.mpf(2)**(-degree)
303
+ # Abscissas overlap, so reusing saves half of the time
304
+ if previous:
305
+ S = previous[-1]/(h*2)
306
+ else:
307
+ S = self.ctx.zero
308
+ S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)
309
+ return h*S
310
+
311
+ def calc_nodes(self, degree, prec, verbose=False):
312
+ r"""
313
+ The abscissas and weights for tanh-sinh quadrature of degree
314
+ `m` are given by
315
+
316
+ .. math::
317
+
318
+ x_k = \tanh(\pi/2 \sinh(t_k))
319
+
320
+ w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2
321
+
322
+ where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
323
+ list of nodes is actually infinite, but the weights die off so
324
+ rapidly that only a few are needed.
325
+ """
326
+ ctx = self.ctx
327
+ nodes = []
328
+
329
+ extra = 20
330
+ ctx.prec += extra
331
+ tol = ctx.ldexp(1, -prec-10)
332
+ pi4 = ctx.pi/4
333
+
334
+ # For simplicity, we work in steps h = 1/2^n, with the first point
335
+ # offset so that we can reuse the sum from the previous degree
336
+
337
+ # We define degree 1 to include the "degree 0" steps, including
338
+ # the point x = 0. (It doesn't work well otherwise; not sure why.)
339
+ t0 = ctx.ldexp(1, -degree)
340
+ if degree == 1:
341
+ #nodes.append((mpf(0), pi4))
342
+ #nodes.append((-mpf(0), pi4))
343
+ nodes.append((ctx.zero, ctx.pi/2))
344
+ h = t0
345
+ else:
346
+ h = t0*2
347
+
348
+ # Since h is fixed, we can compute the next exponential
349
+ # by simply multiplying by exp(h)
350
+ expt0 = ctx.exp(t0)
351
+ a = pi4 * expt0
352
+ b = pi4 / expt0
353
+ udelta = ctx.exp(h)
354
+ urdelta = 1/udelta
355
+
356
+ for k in xrange(0, 20*2**degree+1):
357
+ # Reference implementation:
358
+ # t = t0 + k*h
359
+ # x = tanh(pi/2 * sinh(t))
360
+ # w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2
361
+
362
+ # Fast implementation. Note that c = exp(pi/2 * sinh(t))
363
+ c = ctx.exp(a-b)
364
+ d = 1/c
365
+ co = (c+d)/2
366
+ si = (c-d)/2
367
+ x = si / co
368
+ w = (a+b) / co**2
369
+ diff = abs(x-1)
370
+ if diff <= tol:
371
+ break
372
+
373
+ nodes.append((x, w))
374
+ nodes.append((-x, w))
375
+
376
+ a *= udelta
377
+ b *= urdelta
378
+
379
+ if verbose and k % 300 == 150:
380
+ # Note: the number displayed is rather arbitrary. Should
381
+ # figure out how to print something that looks more like a
382
+ # percentage
383
+ print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec))
384
+
385
+ ctx.prec -= extra
386
+ return nodes
387
+
388
+
389
+ class GaussLegendre(QuadratureRule):
390
+ r"""
391
+ This class implements Gauss-Legendre quadrature, which is
392
+ exceptionally efficient for polynomials and polynomial-like (i.e.
393
+ very smooth) integrands.
394
+
395
+ The abscissas and weights are given by roots and values of
396
+ Legendre polynomials, which are the orthogonal polynomials
397
+ on `[-1, 1]` with respect to the unit weight
398
+ (see :func:`~mpmath.legendre`).
399
+
400
+ In this implementation, we take the "degree" `m` of the quadrature
401
+ to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following
402
+ Borwein, Bailey & Girgensohn). This way we get quadratic, rather
403
+ than linear, convergence as the degree is incremented.
404
+
405
+ Comparison to tanh-sinh quadrature:
406
+ * Is faster for smooth integrands once nodes have been computed
407
+ * Initial computation of nodes is usually slower
408
+ * Handles endpoint singularities worse
409
+ * Handles infinite integration intervals worse
410
+
411
+ """
412
+
413
+ def calc_nodes(self, degree, prec, verbose=False):
414
+ r"""
415
+ Calculates the abscissas and weights for Gauss-Legendre
416
+ quadrature of degree of given degree (actually `3 \cdot 2^m`).
417
+ """
418
+ ctx = self.ctx
419
+ # It is important that the epsilon is set lower than the
420
+ # "real" epsilon
421
+ epsilon = ctx.ldexp(1, -prec-8)
422
+ # Fairly high precision might be required for accurate
423
+ # evaluation of the roots
424
+ orig = ctx.prec
425
+ ctx.prec = int(prec*1.5)
426
+ if degree == 1:
427
+ x = ctx.sqrt(ctx.mpf(3)/5)
428
+ w = ctx.mpf(5)/9
429
+ nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)]
430
+ ctx.prec = orig
431
+ return nodes
432
+ nodes = []
433
+ n = 3*2**(degree-1)
434
+ upto = n//2 + 1
435
+ for j in xrange(1, upto):
436
+ # Asymptotic formula for the roots
437
+ r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5)))
438
+ # Newton iteration
439
+ while 1:
440
+ t1, t2 = 1, 0
441
+ # Evaluates the Legendre polynomial using its defining
442
+ # recurrence relation
443
+ for j1 in xrange(1,n+1):
444
+ t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1
445
+ t4 = n*(r*t1-t2)/(r**2-1)
446
+ a = t1/t4
447
+ r = r - a
448
+ if abs(a) < epsilon:
449
+ break
450
+ x = r
451
+ w = 2/((1-r**2)*t4**2)
452
+ if verbose and j % 30 == 15:
453
+ print("Computing nodes (%i of %i)" % (j, upto))
454
+ nodes.append((x, w))
455
+ nodes.append((-x, w))
456
+ ctx.prec = orig
457
+ return nodes
458
+
459
+ class QuadratureMethods(object):
460
+
461
+ def __init__(ctx, *args, **kwargs):
462
+ ctx._gauss_legendre = GaussLegendre(ctx)
463
+ ctx._tanh_sinh = TanhSinh(ctx)
464
+
465
+ def quad(ctx, f, *points, **kwargs):
466
+ r"""
467
+ Computes a single, double or triple integral over a given
468
+ 1D interval, 2D rectangle, or 3D cuboid. A basic example::
469
+
470
+ >>> from mpmath import *
471
+ >>> mp.dps = 15; mp.pretty = True
472
+ >>> quad(sin, [0, pi])
473
+ 2.0
474
+
475
+ A basic 2D integral::
476
+
477
+ >>> f = lambda x, y: cos(x+y/2)
478
+ >>> quad(f, [-pi/2, pi/2], [0, pi])
479
+ 4.0
480
+
481
+ **Interval format**
482
+
483
+ The integration range for each dimension may be specified
484
+ using a list or tuple. Arguments are interpreted as follows:
485
+
486
+ ``quad(f, [x1, x2])`` -- calculates
487
+ `\int_{x_1}^{x_2} f(x) \, dx`
488
+
489
+ ``quad(f, [x1, x2], [y1, y2])`` -- calculates
490
+ `\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx`
491
+
492
+ ``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates
493
+ `\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z)
494
+ \, dz \, dy \, dx`
495
+
496
+ Endpoints may be finite or infinite. An interval descriptor
497
+ may also contain more than two points. In this
498
+ case, the integration is split into subintervals, between
499
+ each pair of consecutive points. This is useful for
500
+ dealing with mid-interval discontinuities, or integrating
501
+ over large intervals where the function is irregular or
502
+ oscillates.
503
+
504
+ **Options**
505
+
506
+ :func:`~mpmath.quad` recognizes the following keyword arguments:
507
+
508
+ *method*
509
+ Chooses integration algorithm (described below).
510
+ *error*
511
+ If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the
512
+ integral and `e` is the estimated error.
513
+ *maxdegree*
514
+ Maximum degree of the quadrature rule to try before
515
+ quitting.
516
+ *verbose*
517
+ Print details about progress.
518
+
519
+ **Algorithms**
520
+
521
+ Mpmath presently implements two integration algorithms: tanh-sinh
522
+ quadrature and Gauss-Legendre quadrature. These can be selected
523
+ using *method='tanh-sinh'* or *method='gauss-legendre'* or by
524
+ passing the classes *method=TanhSinh*, *method=GaussLegendre*.
525
+ The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available
526
+ as shortcuts.
527
+
528
+ Both algorithms have the property that doubling the number of
529
+ evaluation points roughly doubles the accuracy, so both are ideal
530
+ for high precision quadrature (hundreds or thousands of digits).
531
+
532
+ At high precision, computing the nodes and weights for the
533
+ integration can be expensive (more expensive than computing the
534
+ function values). To make repeated integrations fast, nodes
535
+ are automatically cached.
536
+
537
+ The advantages of the tanh-sinh algorithm are that it tends to
538
+ handle endpoint singularities well, and that the nodes are cheap
539
+ to compute on the first run. For these reasons, it is used by
540
+ :func:`~mpmath.quad` as the default algorithm.
541
+
542
+ Gauss-Legendre quadrature often requires fewer function
543
+ evaluations, and is therefore often faster for repeated use, but
544
+ the algorithm does not handle endpoint singularities as well and
545
+ the nodes are more expensive to compute. Gauss-Legendre quadrature
546
+ can be a better choice if the integrand is smooth and repeated
547
+ integrations are required (e.g. for multiple integrals).
548
+
549
+ See the documentation for :class:`TanhSinh` and
550
+ :class:`GaussLegendre` for additional details.
551
+
552
+ **Examples of 1D integrals**
553
+
554
+ Intervals may be infinite or half-infinite. The following two
555
+ examples evaluate the limits of the inverse tangent function
556
+ (`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral
557
+ `\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`::
558
+
559
+ >>> mp.dps = 15
560
+ >>> quad(lambda x: 2/(x**2+1), [0, inf])
561
+ 3.14159265358979
562
+ >>> quad(lambda x: exp(-x**2), [-inf, inf])**2
563
+ 3.14159265358979
564
+
565
+ Integrals can typically be resolved to high precision.
566
+ The following computes 50 digits of `\pi` by integrating the
567
+ area of the half-circle defined by `x^2 + y^2 \le 1`,
568
+ `-1 \le x \le 1`, `y \ge 0`::
569
+
570
+ >>> mp.dps = 50
571
+ >>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1])
572
+ 3.1415926535897932384626433832795028841971693993751
573
+
574
+ One can just as well compute 1000 digits (output truncated)::
575
+
576
+ >>> mp.dps = 1000
577
+ >>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS
578
+ 3.141592653589793238462643383279502884...216420199
579
+
580
+ Complex integrals are supported. The following computes
581
+ a residue at `z = 0` by integrating counterclockwise along the
582
+ diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`::
583
+
584
+ >>> mp.dps = 15
585
+ >>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1]))
586
+ (0.0 + 6.28318530717959j)
587
+
588
+ **Examples of 2D and 3D integrals**
589
+
590
+ Here are several nice examples of analytically solvable
591
+ 2D integrals (taken from MathWorld [1]) that can be evaluated
592
+ to high precision fairly rapidly by :func:`~mpmath.quad`::
593
+
594
+ >>> mp.dps = 30
595
+ >>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y))
596
+ >>> quad(f, [0, 1], [0, 1])
597
+ 0.577215664901532860606512090082
598
+ >>> +euler
599
+ 0.577215664901532860606512090082
600
+
601
+ >>> f = lambda x, y: 1/sqrt(1+x**2+y**2)
602
+ >>> quad(f, [-1, 1], [-1, 1])
603
+ 3.17343648530607134219175646705
604
+ >>> 4*log(2+sqrt(3))-2*pi/3
605
+ 3.17343648530607134219175646705
606
+
607
+ >>> f = lambda x, y: 1/(1-x**2 * y**2)
608
+ >>> quad(f, [0, 1], [0, 1])
609
+ 1.23370055013616982735431137498
610
+ >>> pi**2 / 8
611
+ 1.23370055013616982735431137498
612
+
613
+ >>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1])
614
+ 1.64493406684822643647241516665
615
+ >>> pi**2 / 6
616
+ 1.64493406684822643647241516665
617
+
618
+ Multiple integrals may be done over infinite ranges::
619
+
620
+ >>> mp.dps = 15
621
+ >>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf]))
622
+ 0.367879441171442
623
+ >>> print(1/e)
624
+ 0.367879441171442
625
+
626
+ For nonrectangular areas, one can call :func:`~mpmath.quad` recursively.
627
+ For example, we can replicate the earlier example of calculating
628
+ `\pi` by integrating over the unit-circle, and actually use double
629
+ quadrature to actually measure the area circle::
630
+
631
+ >>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)])
632
+ >>> quad(f, [-1, 1])
633
+ 3.14159265358979
634
+
635
+ Here is a simple triple integral::
636
+
637
+ >>> mp.dps = 15
638
+ >>> f = lambda x,y,z: x*y/(1+z)
639
+ >>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre')
640
+ 0.101366277027041
641
+ >>> (log(3)-log(2))/4
642
+ 0.101366277027041
643
+
644
+ **Singularities**
645
+
646
+ Both tanh-sinh and Gauss-Legendre quadrature are designed to
647
+ integrate smooth (infinitely differentiable) functions. Neither
648
+ algorithm copes well with mid-interval singularities (such as
649
+ mid-interval discontinuities in `f(x)` or `f'(x)`).
650
+ The best solution is to split the integral into parts::
651
+
652
+ >>> mp.dps = 15
653
+ >>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad
654
+ 3.99900894176779
655
+ >>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good
656
+ 4.0
657
+
658
+ The tanh-sinh rule often works well for integrands having a
659
+ singularity at one or both endpoints::
660
+
661
+ >>> mp.dps = 15
662
+ >>> quad(log, [0, 1], method='tanh-sinh') # Good
663
+ -1.0
664
+ >>> quad(log, [0, 1], method='gauss-legendre') # Bad
665
+ -0.999932197413801
666
+
667
+ However, the result may still be inaccurate for some functions::
668
+
669
+ >>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
670
+ 1.99999999946942
671
+
672
+ This problem is not due to the quadrature rule per se, but to
673
+ numerical amplification of errors in the nodes. The problem can be
674
+ circumvented by temporarily increasing the precision::
675
+
676
+ >>> mp.dps = 30
677
+ >>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
678
+ >>> mp.dps = 15
679
+ >>> +a
680
+ 2.0
681
+
682
+ **Highly variable functions**
683
+
684
+ For functions that are smooth (in the sense of being infinitely
685
+ differentiable) but contain sharp mid-interval peaks or many
686
+ "bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For
687
+ example, with default settings, :func:`~mpmath.quad` is able to integrate
688
+ `\sin(x)` accurately over an interval of length 100 but not over
689
+ length 1000::
690
+
691
+ >>> quad(sin, [0, 100]); 1-cos(100) # Good
692
+ 0.137681127712316
693
+ 0.137681127712316
694
+ >>> quad(sin, [0, 1000]); 1-cos(1000) # Bad
695
+ -37.8587612408485
696
+ 0.437620923709297
697
+
698
+ One solution is to break the integration into 10 intervals of
699
+ length 100::
700
+
701
+ >>> quad(sin, linspace(0, 1000, 10)) # Good
702
+ 0.437620923709297
703
+
704
+ Another is to increase the degree of the quadrature::
705
+
706
+ >>> quad(sin, [0, 1000], maxdegree=10) # Also good
707
+ 0.437620923709297
708
+
709
+ Whether splitting the interval or increasing the degree is
710
+ more efficient differs from case to case. Another example is the
711
+ function `1/(1+x^2)`, which has a sharp peak centered around
712
+ `x = 0`::
713
+
714
+ >>> f = lambda x: 1/(1+x**2)
715
+ >>> quad(f, [-100, 100]) # Bad
716
+ 3.64804647105268
717
+ >>> quad(f, [-100, 100], maxdegree=10) # Good
718
+ 3.12159332021646
719
+ >>> quad(f, [-100, 0, 100]) # Also good
720
+ 3.12159332021646
721
+
722
+ **References**
723
+
724
+ 1. http://mathworld.wolfram.com/DoubleIntegral.html
725
+
726
+ """
727
+ rule = kwargs.get('method', 'tanh-sinh')
728
+ if type(rule) is str:
729
+ if rule == 'tanh-sinh':
730
+ rule = ctx._tanh_sinh
731
+ elif rule == 'gauss-legendre':
732
+ rule = ctx._gauss_legendre
733
+ else:
734
+ raise ValueError("unknown quadrature rule: %s" % rule)
735
+ else:
736
+ rule = rule(ctx)
737
+ verbose = kwargs.get('verbose')
738
+ dim = len(points)
739
+ orig = prec = ctx.prec
740
+ epsilon = ctx.eps/8
741
+ m = kwargs.get('maxdegree') or rule.guess_degree(prec)
742
+ points = [ctx._as_points(p) for p in points]
743
+ try:
744
+ ctx.prec += 20
745
+ if dim == 1:
746
+ v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
747
+ elif dim == 2:
748
+ v, err = rule.summation(lambda x: \
749
+ rule.summation(lambda y: f(x,y), \
750
+ points[1], prec, epsilon, m)[0],
751
+ points[0], prec, epsilon, m, verbose)
752
+ elif dim == 3:
753
+ v, err = rule.summation(lambda x: \
754
+ rule.summation(lambda y: \
755
+ rule.summation(lambda z: f(x,y,z), \
756
+ points[2], prec, epsilon, m)[0],
757
+ points[1], prec, epsilon, m)[0],
758
+ points[0], prec, epsilon, m, verbose)
759
+ else:
760
+ raise NotImplementedError("quadrature must have dim 1, 2 or 3")
761
+ finally:
762
+ ctx.prec = orig
763
+ if kwargs.get("error"):
764
+ return +v, err
765
+ return +v
766
+
767
+ def quadts(ctx, *args, **kwargs):
768
+ """
769
+ Performs tanh-sinh quadrature. The call
770
+
771
+ quadts(func, *points, ...)
772
+
773
+ is simply a shortcut for:
774
+
775
+ quad(func, *points, ..., method=TanhSinh)
776
+
777
+ For example, a single integral and a double integral:
778
+
779
+ quadts(lambda x: exp(cos(x)), [0, 1])
780
+ quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
781
+
782
+ See the documentation for quad for information about how points
783
+ arguments and keyword arguments are parsed.
784
+
785
+ See documentation for TanhSinh for algorithmic information about
786
+ tanh-sinh quadrature.
787
+ """
788
+ kwargs['method'] = 'tanh-sinh'
789
+ return ctx.quad(*args, **kwargs)
790
+
791
+ def quadgl(ctx, *args, **kwargs):
792
+ """
793
+ Performs Gauss-Legendre quadrature. The call
794
+
795
+ quadgl(func, *points, ...)
796
+
797
+ is simply a shortcut for:
798
+
799
+ quad(func, *points, ..., method=GaussLegendre)
800
+
801
+ For example, a single integral and a double integral:
802
+
803
+ quadgl(lambda x: exp(cos(x)), [0, 1])
804
+ quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
805
+
806
+ See the documentation for quad for information about how points
807
+ arguments and keyword arguments are parsed.
808
+
809
+ See documentation for TanhSinh for algorithmic information about
810
+ tanh-sinh quadrature.
811
+ """
812
+ kwargs['method'] = 'gauss-legendre'
813
+ return ctx.quad(*args, **kwargs)
814
+
815
+ def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
816
+ r"""
817
+ Calculates
818
+
819
+ .. math ::
820
+
821
+ I = \int_a^b f(x) dx
822
+
823
+ where at least one of `a` and `b` is infinite and where
824
+ `f(x) = g(x) \cos(\omega x + \phi)` for some slowly
825
+ decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc`
826
+ can also handle oscillatory integrals where the oscillation
827
+ rate is different from a pure sine or cosine wave.
828
+
829
+ In the standard case when `|a| < \infty, b = \infty`,
830
+ :func:`~mpmath.quadosc` works by evaluating the infinite series
831
+
832
+ .. math ::
833
+
834
+ I = \int_a^{x_1} f(x) dx +
835
+ \sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx
836
+
837
+ where `x_k` are consecutive zeros (alternatively
838
+ some other periodic reference point) of `f(x)`.
839
+ Accordingly, :func:`~mpmath.quadosc` requires information about the
840
+ zeros of `f(x)`. For a periodic function, you can specify
841
+ the zeros by either providing the angular frequency `\omega`
842
+ (*omega*) or the *period* `2 \pi/\omega`. In general, you can
843
+ specify the `n`-th zero by providing the *zeros* arguments.
844
+ Below is an example of each::
845
+
846
+ >>> from mpmath import *
847
+ >>> mp.dps = 15; mp.pretty = True
848
+ >>> f = lambda x: sin(3*x)/(x**2+1)
849
+ >>> quadosc(f, [0,inf], omega=3)
850
+ 0.37833007080198
851
+ >>> quadosc(f, [0,inf], period=2*pi/3)
852
+ 0.37833007080198
853
+ >>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
854
+ 0.37833007080198
855
+ >>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica
856
+ 0.37833007080198
857
+
858
+ Note that *zeros* was specified to multiply `n` by the
859
+ *half-period*, not the full period. In theory, it does not matter
860
+ whether each partial integral is done over a half period or a full
861
+ period. However, if done over half-periods, the infinite series
862
+ passed to :func:`~mpmath.nsum` becomes an *alternating series* and this
863
+ typically makes the extrapolation much more efficient.
864
+
865
+ Here is an example of an integration over the entire real line,
866
+ and a half-infinite integration starting at `-\infty`::
867
+
868
+ >>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
869
+ 1.15572734979092
870
+ >>> pi/e
871
+ 1.15572734979092
872
+ >>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
873
+ -0.0844109505595739
874
+ >>> cos(1)+si(1)-pi/2
875
+ -0.0844109505595738
876
+
877
+ Of course, the integrand may contain a complex exponential just as
878
+ well as a real sine or cosine::
879
+
880
+ >>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
881
+ (0.156410688228254 + 0.0j)
882
+ >>> pi/e**3
883
+ 0.156410688228254
884
+ >>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
885
+ (0.00317486988463794 - 0.0447701735209082j)
886
+ >>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
887
+ (0.00317486988463794 - 0.0447701735209082j)
888
+
889
+ **Non-periodic functions**
890
+
891
+ If `f(x) = g(x) h(x)` for some function `h(x)` that is not
892
+ strictly periodic, *omega* or *period* might not work, and it might
893
+ be necessary to use *zeros*.
894
+
895
+ A notable exception can be made for Bessel functions which, though not
896
+ periodic, are "asymptotically periodic" in a sufficiently strong sense
897
+ that the sum extrapolation will work out::
898
+
899
+ >>> quadosc(j0, [0, inf], period=2*pi)
900
+ 1.0
901
+ >>> quadosc(j1, [0, inf], period=2*pi)
902
+ 1.0
903
+
904
+ More properly, one should provide the exact Bessel function zeros::
905
+
906
+ >>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
907
+ >>> quadosc(j0, [0, inf], zeros=j0zero)
908
+ 1.0
909
+
910
+ For an example where *zeros* becomes necessary, consider the
911
+ complete Fresnel integrals
912
+
913
+ .. math ::
914
+
915
+ \int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
916
+ = \sqrt{\frac{\pi}{8}}.
917
+
918
+ Although the integrands do not decrease in magnitude as
919
+ `x \to \infty`, the integrals are convergent since the oscillation
920
+ rate increases (causing consecutive periods to asymptotically
921
+ cancel out). These integrals are virtually impossible to calculate
922
+ to any kind of accuracy using standard quadrature rules. However,
923
+ if one provides the correct asymptotic distribution of zeros
924
+ (`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works::
925
+
926
+ >>> mp.dps = 30
927
+ >>> f = lambda x: cos(x**2)
928
+ >>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
929
+ 0.626657068657750125603941321203
930
+ >>> f = lambda x: sin(x**2)
931
+ >>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
932
+ 0.626657068657750125603941321203
933
+ >>> sqrt(pi/8)
934
+ 0.626657068657750125603941321203
935
+
936
+ (Interestingly, these integrals can still be evaluated if one
937
+ places some other constant than `\pi` in the square root sign.)
938
+
939
+ In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
940
+ the inverse-function distribution `h^{-1}(x)`::
941
+
942
+ >>> mp.dps = 15
943
+ >>> f = lambda x: sin(exp(x))
944
+ >>> quadosc(f, [1,inf], zeros=lambda n: log(n))
945
+ -0.25024394235267
946
+ >>> pi/2-si(e)
947
+ -0.250243942352671
948
+
949
+ **Non-alternating functions**
950
+
951
+ If the integrand oscillates around a positive value, without
952
+ alternating signs, the extrapolation might fail. A simple trick
953
+ that sometimes works is to multiply or divide the frequency by 2::
954
+
955
+ >>> f = lambda x: 1/x**2+sin(x)/x**4
956
+ >>> quadosc(f, [1,inf], omega=1) # Bad
957
+ 1.28642190869861
958
+ >>> quadosc(f, [1,inf], omega=0.5) # Perfect
959
+ 1.28652953559617
960
+ >>> 1+(cos(1)+ci(1)+sin(1))/6
961
+ 1.28652953559617
962
+
963
+ **Fast decay**
964
+
965
+ :func:`~mpmath.quadosc` is primarily useful for slowly decaying
966
+ integrands. If the integrand decreases exponentially or faster,
967
+ :func:`~mpmath.quad` will likely handle it without trouble (and generally be
968
+ much faster than :func:`~mpmath.quadosc`)::
969
+
970
+ >>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
971
+ 0.5
972
+ >>> quad(lambda x: cos(x)/exp(x), [0, inf])
973
+ 0.5
974
+
975
+ """
976
+ a, b = ctx._as_points(interval)
977
+ a = ctx.convert(a)
978
+ b = ctx.convert(b)
979
+ if [omega, period, zeros].count(None) != 2:
980
+ raise ValueError( \
981
+ "must specify exactly one of omega, period, zeros")
982
+ if a == ctx.ninf and b == ctx.inf:
983
+ s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
984
+ s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
985
+ return s1 + s2
986
+ if a == ctx.ninf:
987
+ if zeros:
988
+ return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
989
+ else:
990
+ return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
991
+ if b != ctx.inf:
992
+ raise ValueError("quadosc requires an infinite integration interval")
993
+ if not zeros:
994
+ if omega:
995
+ period = 2*ctx.pi/omega
996
+ zeros = lambda n: n*period/2
997
+ #for n in range(1,10):
998
+ # p = zeros(n)
999
+ # if p > a:
1000
+ # break
1001
+ #if n >= 9:
1002
+ # raise ValueError("zeros do not appear to be correctly indexed")
1003
+ n = 1
1004
+ s = ctx.quadgl(f, [a, zeros(n)])
1005
+ def term(k):
1006
+ return ctx.quadgl(f, [zeros(k), zeros(k+1)])
1007
+ s += ctx.nsum(term, [n, ctx.inf])
1008
+ return s
1009
+
1010
+ def quadsubdiv(ctx, f, interval, tol=None, maxintervals=None, **kwargs):
1011
+ """
1012
+ Computes the integral of *f* over the interval or path specified
1013
+ by *interval*, using :func:`~mpmath.quad` together with adaptive
1014
+ subdivision of the interval.
1015
+
1016
+ This function gives an accurate answer for some integrals where
1017
+ :func:`~mpmath.quad` fails::
1018
+
1019
+ >>> from mpmath import *
1020
+ >>> mp.dps = 15; mp.pretty = True
1021
+ >>> quad(lambda x: abs(sin(x)), [0, 2*pi])
1022
+ 3.99900894176779
1023
+ >>> quadsubdiv(lambda x: abs(sin(x)), [0, 2*pi])
1024
+ 4.0
1025
+ >>> quadsubdiv(sin, [0, 1000])
1026
+ 0.437620923709297
1027
+ >>> quadsubdiv(lambda x: 1/(1+x**2), [-100, 100])
1028
+ 3.12159332021646
1029
+ >>> quadsubdiv(lambda x: ceil(x), [0, 100])
1030
+ 5050.0
1031
+ >>> quadsubdiv(lambda x: sin(x+exp(x)), [0,8])
1032
+ 0.347400172657248
1033
+
1034
+ The argument *maxintervals* can be set to limit the permissible
1035
+ subdivision::
1036
+
1037
+ >>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=5, error=True)
1038
+ (-5.40487904307774, 5.011)
1039
+ >>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=100, error=True)
1040
+ (0.631417921866934, 1.10101120134116e-17)
1041
+
1042
+ Subdivision does not guarantee a correct answer since, the error
1043
+ estimate on subintervals may be inaccurate::
1044
+
1045
+ >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
1046
+ (0.210802735500549, 1.0001111101e-17)
1047
+ >>> mp.dps = 20
1048
+ >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
1049
+ (0.21080273550054927738, 2.200000001e-24)
1050
+
1051
+ The second answer is correct. We can get an accurate result at lower
1052
+ precision by forcing a finer initial subdivision::
1053
+
1054
+ >>> mp.dps = 15
1055
+ >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, linspace(0,1,5))
1056
+ 0.210802735500549
1057
+
1058
+ The following integral is too oscillatory for convergence, but we can get a
1059
+ reasonable estimate::
1060
+
1061
+ >>> v, err = fp.quadsubdiv(lambda x: fp.sin(1/x), [0,1], error=True)
1062
+ >>> round(v, 6), round(err, 6)
1063
+ (0.504067, 1e-06)
1064
+ >>> sin(1) - ci(1)
1065
+ 0.504067061906928
1066
+
1067
+ """
1068
+ queue = []
1069
+ for i in range(len(interval)-1):
1070
+ queue.append((interval[i], interval[i+1]))
1071
+ total = ctx.zero
1072
+ total_error = ctx.zero
1073
+ if maxintervals is None:
1074
+ maxintervals = 10 * ctx.prec
1075
+ count = 0
1076
+ quad_args = kwargs.copy()
1077
+ quad_args["verbose"] = False
1078
+ quad_args["error"] = True
1079
+ if tol is None:
1080
+ tol = +ctx.eps
1081
+ orig = ctx.prec
1082
+ try:
1083
+ ctx.prec += 5
1084
+ while queue:
1085
+ a, b = queue.pop()
1086
+ s, err = ctx.quad(f, [a, b], **quad_args)
1087
+ if kwargs.get("verbose"):
1088
+ print("subinterval", count, a, b, err)
1089
+ if err < tol or count > maxintervals:
1090
+ total += s
1091
+ total_error += err
1092
+ else:
1093
+ count += 1
1094
+ if count == maxintervals and kwargs.get("verbose"):
1095
+ print("warning: number of intervals exceeded maxintervals")
1096
+ if a == -ctx.inf and b == ctx.inf:
1097
+ m = 0
1098
+ elif a == -ctx.inf:
1099
+ m = min(b-1, 2*b)
1100
+ elif b == ctx.inf:
1101
+ m = max(a+1, 2*a)
1102
+ else:
1103
+ m = a + (b - a) / 2
1104
+ queue.append((a, m))
1105
+ queue.append((m, b))
1106
+ finally:
1107
+ ctx.prec = orig
1108
+ if kwargs.get("error"):
1109
+ return +total, +total_error
1110
+ else:
1111
+ return +total
1112
+
1113
+ if __name__ == '__main__':
1114
+ import doctest
1115
+ doctest.testmod()
lib/python3.11/site-packages/mpmath/ctx_base.py ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from operator import gt, lt
2
+
3
+ from .libmp.backend import xrange
4
+
5
+ from .functions.functions import SpecialFunctions
6
+ from .functions.rszeta import RSCache
7
+ from .calculus.quadrature import QuadratureMethods
8
+ from .calculus.inverselaplace import LaplaceTransformInversionMethods
9
+ from .calculus.calculus import CalculusMethods
10
+ from .calculus.optimization import OptimizationMethods
11
+ from .calculus.odes import ODEMethods
12
+ from .matrices.matrices import MatrixMethods
13
+ from .matrices.calculus import MatrixCalculusMethods
14
+ from .matrices.linalg import LinearAlgebraMethods
15
+ from .matrices.eigen import Eigen
16
+ from .identification import IdentificationMethods
17
+ from .visualization import VisualizationMethods
18
+
19
+ from . import libmp
20
+
21
+ class Context(object):
22
+ pass
23
+
24
+ class StandardBaseContext(Context,
25
+ SpecialFunctions,
26
+ RSCache,
27
+ QuadratureMethods,
28
+ LaplaceTransformInversionMethods,
29
+ CalculusMethods,
30
+ MatrixMethods,
31
+ MatrixCalculusMethods,
32
+ LinearAlgebraMethods,
33
+ Eigen,
34
+ IdentificationMethods,
35
+ OptimizationMethods,
36
+ ODEMethods,
37
+ VisualizationMethods):
38
+
39
+ NoConvergence = libmp.NoConvergence
40
+ ComplexResult = libmp.ComplexResult
41
+
42
+ def __init__(ctx):
43
+ ctx._aliases = {}
44
+ # Call those that need preinitialization (e.g. for wrappers)
45
+ SpecialFunctions.__init__(ctx)
46
+ RSCache.__init__(ctx)
47
+ QuadratureMethods.__init__(ctx)
48
+ LaplaceTransformInversionMethods.__init__(ctx)
49
+ CalculusMethods.__init__(ctx)
50
+ MatrixMethods.__init__(ctx)
51
+
52
+ def _init_aliases(ctx):
53
+ for alias, value in ctx._aliases.items():
54
+ try:
55
+ setattr(ctx, alias, getattr(ctx, value))
56
+ except AttributeError:
57
+ pass
58
+
59
+ _fixed_precision = False
60
+
61
+ # XXX
62
+ verbose = False
63
+
64
+ def warn(ctx, msg):
65
+ print("Warning:", msg)
66
+
67
+ def bad_domain(ctx, msg):
68
+ raise ValueError(msg)
69
+
70
+ def _re(ctx, x):
71
+ if hasattr(x, "real"):
72
+ return x.real
73
+ return x
74
+
75
+ def _im(ctx, x):
76
+ if hasattr(x, "imag"):
77
+ return x.imag
78
+ return ctx.zero
79
+
80
+ def _as_points(ctx, x):
81
+ return x
82
+
83
+ def fneg(ctx, x, **kwargs):
84
+ return -ctx.convert(x)
85
+
86
+ def fadd(ctx, x, y, **kwargs):
87
+ return ctx.convert(x)+ctx.convert(y)
88
+
89
+ def fsub(ctx, x, y, **kwargs):
90
+ return ctx.convert(x)-ctx.convert(y)
91
+
92
+ def fmul(ctx, x, y, **kwargs):
93
+ return ctx.convert(x)*ctx.convert(y)
94
+
95
+ def fdiv(ctx, x, y, **kwargs):
96
+ return ctx.convert(x)/ctx.convert(y)
97
+
98
+ def fsum(ctx, args, absolute=False, squared=False):
99
+ if absolute:
100
+ if squared:
101
+ return sum((abs(x)**2 for x in args), ctx.zero)
102
+ return sum((abs(x) for x in args), ctx.zero)
103
+ if squared:
104
+ return sum((x**2 for x in args), ctx.zero)
105
+ return sum(args, ctx.zero)
106
+
107
+ def fdot(ctx, xs, ys=None, conjugate=False):
108
+ if ys is not None:
109
+ xs = zip(xs, ys)
110
+ if conjugate:
111
+ cf = ctx.conj
112
+ return sum((x*cf(y) for (x,y) in xs), ctx.zero)
113
+ else:
114
+ return sum((x*y for (x,y) in xs), ctx.zero)
115
+
116
+ def fprod(ctx, args):
117
+ prod = ctx.one
118
+ for arg in args:
119
+ prod *= arg
120
+ return prod
121
+
122
+ def nprint(ctx, x, n=6, **kwargs):
123
+ """
124
+ Equivalent to ``print(nstr(x, n))``.
125
+ """
126
+ print(ctx.nstr(x, n, **kwargs))
127
+
128
+ def chop(ctx, x, tol=None):
129
+ """
130
+ Chops off small real or imaginary parts, or converts
131
+ numbers close to zero to exact zeros. The input can be a
132
+ single number or an iterable::
133
+
134
+ >>> from mpmath import *
135
+ >>> mp.dps = 15; mp.pretty = False
136
+ >>> chop(5+1e-10j, tol=1e-9)
137
+ mpf('5.0')
138
+ >>> nprint(chop([1.0, 1e-20, 3+1e-18j, -4, 2]))
139
+ [1.0, 0.0, 3.0, -4.0, 2.0]
140
+
141
+ The tolerance defaults to ``100*eps``.
142
+ """
143
+ if tol is None:
144
+ tol = 100*ctx.eps
145
+ try:
146
+ x = ctx.convert(x)
147
+ absx = abs(x)
148
+ if abs(x) < tol:
149
+ return ctx.zero
150
+ if ctx._is_complex_type(x):
151
+ #part_tol = min(tol, absx*tol)
152
+ part_tol = max(tol, absx*tol)
153
+ if abs(x.imag) < part_tol:
154
+ return x.real
155
+ if abs(x.real) < part_tol:
156
+ return ctx.mpc(0, x.imag)
157
+ except TypeError:
158
+ if isinstance(x, ctx.matrix):
159
+ return x.apply(lambda a: ctx.chop(a, tol))
160
+ if hasattr(x, "__iter__"):
161
+ return [ctx.chop(a, tol) for a in x]
162
+ return x
163
+
164
+ def almosteq(ctx, s, t, rel_eps=None, abs_eps=None):
165
+ r"""
166
+ Determine whether the difference between `s` and `t` is smaller
167
+ than a given epsilon, either relatively or absolutely.
168
+
169
+ Both a maximum relative difference and a maximum difference
170
+ ('epsilons') may be specified. The absolute difference is
171
+ defined as `|s-t|` and the relative difference is defined
172
+ as `|s-t|/\max(|s|, |t|)`.
173
+
174
+ If only one epsilon is given, both are set to the same value.
175
+ If none is given, both epsilons are set to `2^{-p+m}` where
176
+ `p` is the current working precision and `m` is a small
177
+ integer. The default setting typically allows :func:`~mpmath.almosteq`
178
+ to be used to check for mathematical equality
179
+ in the presence of small rounding errors.
180
+
181
+ **Examples**
182
+
183
+ >>> from mpmath import *
184
+ >>> mp.dps = 15
185
+ >>> almosteq(3.141592653589793, 3.141592653589790)
186
+ True
187
+ >>> almosteq(3.141592653589793, 3.141592653589700)
188
+ False
189
+ >>> almosteq(3.141592653589793, 3.141592653589700, 1e-10)
190
+ True
191
+ >>> almosteq(1e-20, 2e-20)
192
+ True
193
+ >>> almosteq(1e-20, 2e-20, rel_eps=0, abs_eps=0)
194
+ False
195
+
196
+ """
197
+ t = ctx.convert(t)
198
+ if abs_eps is None and rel_eps is None:
199
+ rel_eps = abs_eps = ctx.ldexp(1, -ctx.prec+4)
200
+ if abs_eps is None:
201
+ abs_eps = rel_eps
202
+ elif rel_eps is None:
203
+ rel_eps = abs_eps
204
+ diff = abs(s-t)
205
+ if diff <= abs_eps:
206
+ return True
207
+ abss = abs(s)
208
+ abst = abs(t)
209
+ if abss < abst:
210
+ err = diff/abst
211
+ else:
212
+ err = diff/abss
213
+ return err <= rel_eps
214
+
215
+ def arange(ctx, *args):
216
+ r"""
217
+ This is a generalized version of Python's :func:`~mpmath.range` function
218
+ that accepts fractional endpoints and step sizes and
219
+ returns a list of ``mpf`` instances. Like :func:`~mpmath.range`,
220
+ :func:`~mpmath.arange` can be called with 1, 2 or 3 arguments:
221
+
222
+ ``arange(b)``
223
+ `[0, 1, 2, \ldots, x]`
224
+ ``arange(a, b)``
225
+ `[a, a+1, a+2, \ldots, x]`
226
+ ``arange(a, b, h)``
227
+ `[a, a+h, a+h, \ldots, x]`
228
+
229
+ where `b-1 \le x < b` (in the third case, `b-h \le x < b`).
230
+
231
+ Like Python's :func:`~mpmath.range`, the endpoint is not included. To
232
+ produce ranges where the endpoint is included, :func:`~mpmath.linspace`
233
+ is more convenient.
234
+
235
+ **Examples**
236
+
237
+ >>> from mpmath import *
238
+ >>> mp.dps = 15; mp.pretty = False
239
+ >>> arange(4)
240
+ [mpf('0.0'), mpf('1.0'), mpf('2.0'), mpf('3.0')]
241
+ >>> arange(1, 2, 0.25)
242
+ [mpf('1.0'), mpf('1.25'), mpf('1.5'), mpf('1.75')]
243
+ >>> arange(1, -1, -0.75)
244
+ [mpf('1.0'), mpf('0.25'), mpf('-0.5')]
245
+
246
+ """
247
+ if not len(args) <= 3:
248
+ raise TypeError('arange expected at most 3 arguments, got %i'
249
+ % len(args))
250
+ if not len(args) >= 1:
251
+ raise TypeError('arange expected at least 1 argument, got %i'
252
+ % len(args))
253
+ # set default
254
+ a = 0
255
+ dt = 1
256
+ # interpret arguments
257
+ if len(args) == 1:
258
+ b = args[0]
259
+ elif len(args) >= 2:
260
+ a = args[0]
261
+ b = args[1]
262
+ if len(args) == 3:
263
+ dt = args[2]
264
+ a, b, dt = ctx.mpf(a), ctx.mpf(b), ctx.mpf(dt)
265
+ assert a + dt != a, 'dt is too small and would cause an infinite loop'
266
+ # adapt code for sign of dt
267
+ if a > b:
268
+ if dt > 0:
269
+ return []
270
+ op = gt
271
+ else:
272
+ if dt < 0:
273
+ return []
274
+ op = lt
275
+ # create list
276
+ result = []
277
+ i = 0
278
+ t = a
279
+ while 1:
280
+ t = a + dt*i
281
+ i += 1
282
+ if op(t, b):
283
+ result.append(t)
284
+ else:
285
+ break
286
+ return result
287
+
288
+ def linspace(ctx, *args, **kwargs):
289
+ """
290
+ ``linspace(a, b, n)`` returns a list of `n` evenly spaced
291
+ samples from `a` to `b`. The syntax ``linspace(mpi(a,b), n)``
292
+ is also valid.
293
+
294
+ This function is often more convenient than :func:`~mpmath.arange`
295
+ for partitioning an interval into subintervals, since
296
+ the endpoint is included::
297
+
298
+ >>> from mpmath import *
299
+ >>> mp.dps = 15; mp.pretty = False
300
+ >>> linspace(1, 4, 4)
301
+ [mpf('1.0'), mpf('2.0'), mpf('3.0'), mpf('4.0')]
302
+
303
+ You may also provide the keyword argument ``endpoint=False``::
304
+
305
+ >>> linspace(1, 4, 4, endpoint=False)
306
+ [mpf('1.0'), mpf('1.75'), mpf('2.5'), mpf('3.25')]
307
+
308
+ """
309
+ if len(args) == 3:
310
+ a = ctx.mpf(args[0])
311
+ b = ctx.mpf(args[1])
312
+ n = int(args[2])
313
+ elif len(args) == 2:
314
+ assert hasattr(args[0], '_mpi_')
315
+ a = args[0].a
316
+ b = args[0].b
317
+ n = int(args[1])
318
+ else:
319
+ raise TypeError('linspace expected 2 or 3 arguments, got %i' \
320
+ % len(args))
321
+ if n < 1:
322
+ raise ValueError('n must be greater than 0')
323
+ if not 'endpoint' in kwargs or kwargs['endpoint']:
324
+ if n == 1:
325
+ return [ctx.mpf(a)]
326
+ step = (b - a) / ctx.mpf(n - 1)
327
+ y = [i*step + a for i in xrange(n)]
328
+ y[-1] = b
329
+ else:
330
+ step = (b - a) / ctx.mpf(n)
331
+ y = [i*step + a for i in xrange(n)]
332
+ return y
333
+
334
+ def cos_sin(ctx, z, **kwargs):
335
+ return ctx.cos(z, **kwargs), ctx.sin(z, **kwargs)
336
+
337
+ def cospi_sinpi(ctx, z, **kwargs):
338
+ return ctx.cospi(z, **kwargs), ctx.sinpi(z, **kwargs)
339
+
340
+ def _default_hyper_maxprec(ctx, p):
341
+ return int(1000 * p**0.25 + 4*p)
342
+
343
+ _gcd = staticmethod(libmp.gcd)
344
+ list_primes = staticmethod(libmp.list_primes)
345
+ isprime = staticmethod(libmp.isprime)
346
+ bernfrac = staticmethod(libmp.bernfrac)
347
+ moebius = staticmethod(libmp.moebius)
348
+ _ifac = staticmethod(libmp.ifac)
349
+ _eulernum = staticmethod(libmp.eulernum)
350
+ _stirling1 = staticmethod(libmp.stirling1)
351
+ _stirling2 = staticmethod(libmp.stirling2)
352
+
353
+ def sum_accurately(ctx, terms, check_step=1):
354
+ prec = ctx.prec
355
+ try:
356
+ extraprec = 10
357
+ while 1:
358
+ ctx.prec = prec + extraprec + 5
359
+ max_mag = ctx.ninf
360
+ s = ctx.zero
361
+ k = 0
362
+ for term in terms():
363
+ s += term
364
+ if (not k % check_step) and term:
365
+ term_mag = ctx.mag(term)
366
+ max_mag = max(max_mag, term_mag)
367
+ sum_mag = ctx.mag(s)
368
+ if sum_mag - term_mag > ctx.prec:
369
+ break
370
+ k += 1
371
+ cancellation = max_mag - sum_mag
372
+ if cancellation != cancellation:
373
+ break
374
+ if cancellation < extraprec or ctx._fixed_precision:
375
+ break
376
+ extraprec += min(ctx.prec, cancellation)
377
+ return s
378
+ finally:
379
+ ctx.prec = prec
380
+
381
+ def mul_accurately(ctx, factors, check_step=1):
382
+ prec = ctx.prec
383
+ try:
384
+ extraprec = 10
385
+ while 1:
386
+ ctx.prec = prec + extraprec + 5
387
+ max_mag = ctx.ninf
388
+ one = ctx.one
389
+ s = one
390
+ k = 0
391
+ for factor in factors():
392
+ s *= factor
393
+ term = factor - one
394
+ if (not k % check_step):
395
+ term_mag = ctx.mag(term)
396
+ max_mag = max(max_mag, term_mag)
397
+ sum_mag = ctx.mag(s-one)
398
+ #if sum_mag - term_mag > ctx.prec:
399
+ # break
400
+ if -term_mag > ctx.prec:
401
+ break
402
+ k += 1
403
+ cancellation = max_mag - sum_mag
404
+ if cancellation != cancellation:
405
+ break
406
+ if cancellation < extraprec or ctx._fixed_precision:
407
+ break
408
+ extraprec += min(ctx.prec, cancellation)
409
+ return s
410
+ finally:
411
+ ctx.prec = prec
412
+
413
+ def power(ctx, x, y):
414
+ r"""Converts `x` and `y` to mpmath numbers and evaluates
415
+ `x^y = \exp(y \log(x))`::
416
+
417
+ >>> from mpmath import *
418
+ >>> mp.dps = 30; mp.pretty = True
419
+ >>> power(2, 0.5)
420
+ 1.41421356237309504880168872421
421
+
422
+ This shows the leading few digits of a large Mersenne prime
423
+ (performing the exact calculation ``2**43112609-1`` and
424
+ displaying the result in Python would be very slow)::
425
+
426
+ >>> power(2, 43112609)-1
427
+ 3.16470269330255923143453723949e+12978188
428
+ """
429
+ return ctx.convert(x) ** ctx.convert(y)
430
+
431
+ def _zeta_int(ctx, n):
432
+ return ctx.zeta(n)
433
+
434
+ def maxcalls(ctx, f, N):
435
+ """
436
+ Return a wrapped copy of *f* that raises ``NoConvergence`` when *f*
437
+ has been called more than *N* times::
438
+
439
+ >>> from mpmath import *
440
+ >>> mp.dps = 15
441
+ >>> f = maxcalls(sin, 10)
442
+ >>> print(sum(f(n) for n in range(10)))
443
+ 1.95520948210738
444
+ >>> f(10) # doctest: +IGNORE_EXCEPTION_DETAIL
445
+ Traceback (most recent call last):
446
+ ...
447
+ NoConvergence: maxcalls: function evaluated 10 times
448
+
449
+ """
450
+ counter = [0]
451
+ def f_maxcalls_wrapped(*args, **kwargs):
452
+ counter[0] += 1
453
+ if counter[0] > N:
454
+ raise ctx.NoConvergence("maxcalls: function evaluated %i times" % N)
455
+ return f(*args, **kwargs)
456
+ return f_maxcalls_wrapped
457
+
458
+ def memoize(ctx, f):
459
+ """
460
+ Return a wrapped copy of *f* that caches computed values, i.e.
461
+ a memoized copy of *f*. Values are only reused if the cached precision
462
+ is equal to or higher than the working precision::
463
+
464
+ >>> from mpmath import *
465
+ >>> mp.dps = 15; mp.pretty = True
466
+ >>> f = memoize(maxcalls(sin, 1))
467
+ >>> f(2)
468
+ 0.909297426825682
469
+ >>> f(2)
470
+ 0.909297426825682
471
+ >>> mp.dps = 25
472
+ >>> f(2) # doctest: +IGNORE_EXCEPTION_DETAIL
473
+ Traceback (most recent call last):
474
+ ...
475
+ NoConvergence: maxcalls: function evaluated 1 times
476
+
477
+ """
478
+ f_cache = {}
479
+ def f_cached(*args, **kwargs):
480
+ if kwargs:
481
+ key = args, tuple(kwargs.items())
482
+ else:
483
+ key = args
484
+ prec = ctx.prec
485
+ if key in f_cache:
486
+ cprec, cvalue = f_cache[key]
487
+ if cprec >= prec:
488
+ return +cvalue
489
+ value = f(*args, **kwargs)
490
+ f_cache[key] = (prec, value)
491
+ return value
492
+ f_cached.__name__ = f.__name__
493
+ f_cached.__doc__ = f.__doc__
494
+ return f_cached
lib/python3.11/site-packages/mpmath/ctx_fp.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .ctx_base import StandardBaseContext
2
+
3
+ import math
4
+ import cmath
5
+ from . import math2
6
+
7
+ from . import function_docs
8
+
9
+ from .libmp import mpf_bernoulli, to_float, int_types
10
+ from . import libmp
11
+
12
+ class FPContext(StandardBaseContext):
13
+ """
14
+ Context for fast low-precision arithmetic (53-bit precision, giving at most
15
+ about 15-digit accuracy), using Python's builtin float and complex.
16
+ """
17
+
18
+ def __init__(ctx):
19
+ StandardBaseContext.__init__(ctx)
20
+
21
+ # Override SpecialFunctions implementation
22
+ ctx.loggamma = math2.loggamma
23
+ ctx._bernoulli_cache = {}
24
+ ctx.pretty = False
25
+
26
+ ctx._init_aliases()
27
+
28
+ _mpq = lambda cls, x: float(x[0])/x[1]
29
+
30
+ NoConvergence = libmp.NoConvergence
31
+
32
+ def _get_prec(ctx): return 53
33
+ def _set_prec(ctx, p): return
34
+ def _get_dps(ctx): return 15
35
+ def _set_dps(ctx, p): return
36
+
37
+ _fixed_precision = True
38
+
39
+ prec = property(_get_prec, _set_prec)
40
+ dps = property(_get_dps, _set_dps)
41
+
42
+ zero = 0.0
43
+ one = 1.0
44
+ eps = math2.EPS
45
+ inf = math2.INF
46
+ ninf = math2.NINF
47
+ nan = math2.NAN
48
+ j = 1j
49
+
50
+ # Called by SpecialFunctions.__init__()
51
+ @classmethod
52
+ def _wrap_specfun(cls, name, f, wrap):
53
+ if wrap:
54
+ def f_wrapped(ctx, *args, **kwargs):
55
+ convert = ctx.convert
56
+ args = [convert(a) for a in args]
57
+ return f(ctx, *args, **kwargs)
58
+ else:
59
+ f_wrapped = f
60
+ f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__)
61
+ setattr(cls, name, f_wrapped)
62
+
63
+ def bernoulli(ctx, n):
64
+ cache = ctx._bernoulli_cache
65
+ if n in cache:
66
+ return cache[n]
67
+ cache[n] = to_float(mpf_bernoulli(n, 53, 'n'), strict=True)
68
+ return cache[n]
69
+
70
+ pi = math2.pi
71
+ e = math2.e
72
+ euler = math2.euler
73
+ sqrt2 = 1.4142135623730950488
74
+ sqrt5 = 2.2360679774997896964
75
+ phi = 1.6180339887498948482
76
+ ln2 = 0.69314718055994530942
77
+ ln10 = 2.302585092994045684
78
+ euler = 0.57721566490153286061
79
+ catalan = 0.91596559417721901505
80
+ khinchin = 2.6854520010653064453
81
+ apery = 1.2020569031595942854
82
+ glaisher = 1.2824271291006226369
83
+
84
+ absmin = absmax = abs
85
+
86
+ def is_special(ctx, x):
87
+ return x - x != 0.0
88
+
89
+ def isnan(ctx, x):
90
+ return x != x
91
+
92
+ def isinf(ctx, x):
93
+ return abs(x) == math2.INF
94
+
95
+ def isnormal(ctx, x):
96
+ if x:
97
+ return x - x == 0.0
98
+ return False
99
+
100
+ def isnpint(ctx, x):
101
+ if type(x) is complex:
102
+ if x.imag:
103
+ return False
104
+ x = x.real
105
+ return x <= 0.0 and round(x) == x
106
+
107
+ mpf = float
108
+ mpc = complex
109
+
110
+ def convert(ctx, x):
111
+ try:
112
+ return float(x)
113
+ except:
114
+ return complex(x)
115
+
116
+ power = staticmethod(math2.pow)
117
+ sqrt = staticmethod(math2.sqrt)
118
+ exp = staticmethod(math2.exp)
119
+ ln = log = staticmethod(math2.log)
120
+ cos = staticmethod(math2.cos)
121
+ sin = staticmethod(math2.sin)
122
+ tan = staticmethod(math2.tan)
123
+ cos_sin = staticmethod(math2.cos_sin)
124
+ acos = staticmethod(math2.acos)
125
+ asin = staticmethod(math2.asin)
126
+ atan = staticmethod(math2.atan)
127
+ cosh = staticmethod(math2.cosh)
128
+ sinh = staticmethod(math2.sinh)
129
+ tanh = staticmethod(math2.tanh)
130
+ gamma = staticmethod(math2.gamma)
131
+ rgamma = staticmethod(math2.rgamma)
132
+ fac = factorial = staticmethod(math2.factorial)
133
+ floor = staticmethod(math2.floor)
134
+ ceil = staticmethod(math2.ceil)
135
+ cospi = staticmethod(math2.cospi)
136
+ sinpi = staticmethod(math2.sinpi)
137
+ cbrt = staticmethod(math2.cbrt)
138
+ _nthroot = staticmethod(math2.nthroot)
139
+ _ei = staticmethod(math2.ei)
140
+ _e1 = staticmethod(math2.e1)
141
+ _zeta = _zeta_int = staticmethod(math2.zeta)
142
+
143
+ # XXX: math2
144
+ def arg(ctx, z):
145
+ z = complex(z)
146
+ return math.atan2(z.imag, z.real)
147
+
148
+ def expj(ctx, x):
149
+ return ctx.exp(ctx.j*x)
150
+
151
+ def expjpi(ctx, x):
152
+ return ctx.exp(ctx.j*ctx.pi*x)
153
+
154
+ ldexp = math.ldexp
155
+ frexp = math.frexp
156
+
157
+ def mag(ctx, z):
158
+ if z:
159
+ return ctx.frexp(abs(z))[1]
160
+ return ctx.ninf
161
+
162
+ def isint(ctx, z):
163
+ if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5
164
+ if z.imag:
165
+ return False
166
+ z = z.real
167
+ try:
168
+ return z == int(z)
169
+ except:
170
+ return False
171
+
172
+ def nint_distance(ctx, z):
173
+ if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5
174
+ n = round(z.real)
175
+ else:
176
+ n = round(z)
177
+ if n == z:
178
+ return n, ctx.ninf
179
+ return n, ctx.mag(abs(z-n))
180
+
181
+ def _convert_param(ctx, z):
182
+ if type(z) is tuple:
183
+ p, q = z
184
+ return ctx.mpf(p) / q, 'R'
185
+ if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5
186
+ intz = int(z.real)
187
+ else:
188
+ intz = int(z)
189
+ if z == intz:
190
+ return intz, 'Z'
191
+ return z, 'R'
192
+
193
+ def _is_real_type(ctx, z):
194
+ return isinstance(z, float) or isinstance(z, int_types)
195
+
196
+ def _is_complex_type(ctx, z):
197
+ return isinstance(z, complex)
198
+
199
+ def hypsum(ctx, p, q, types, coeffs, z, maxterms=6000, **kwargs):
200
+ coeffs = list(coeffs)
201
+ num = range(p)
202
+ den = range(p,p+q)
203
+ tol = ctx.eps
204
+ s = t = 1.0
205
+ k = 0
206
+ while 1:
207
+ for i in num: t *= (coeffs[i]+k)
208
+ for i in den: t /= (coeffs[i]+k)
209
+ k += 1; t /= k; t *= z; s += t
210
+ if abs(t) < tol:
211
+ return s
212
+ if k > maxterms:
213
+ raise ctx.NoConvergence
214
+
215
+ def atan2(ctx, x, y):
216
+ return math.atan2(x, y)
217
+
218
+ def psi(ctx, m, z):
219
+ m = int(m)
220
+ if m == 0:
221
+ return ctx.digamma(z)
222
+ return (-1)**(m+1) * ctx.fac(m) * ctx.zeta(m+1, z)
223
+
224
+ digamma = staticmethod(math2.digamma)
225
+
226
+ def harmonic(ctx, x):
227
+ x = ctx.convert(x)
228
+ if x == 0 or x == 1:
229
+ return x
230
+ return ctx.digamma(x+1) + ctx.euler
231
+
232
+ nstr = str
233
+
234
+ def to_fixed(ctx, x, prec):
235
+ return int(math.ldexp(x, prec))
236
+
237
+ def rand(ctx):
238
+ import random
239
+ return random.random()
240
+
241
+ _erf = staticmethod(math2.erf)
242
+ _erfc = staticmethod(math2.erfc)
243
+
244
+ def sum_accurately(ctx, terms, check_step=1):
245
+ s = ctx.zero
246
+ k = 0
247
+ for term in terms():
248
+ s += term
249
+ if (not k % check_step) and term:
250
+ if abs(term) <= 1e-18*abs(s):
251
+ break
252
+ k += 1
253
+ return s
lib/python3.11/site-packages/mpmath/ctx_iv.py ADDED
@@ -0,0 +1,551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+
3
+ from . import libmp
4
+
5
+ from .libmp.backend import basestring
6
+
7
+ from .libmp import (
8
+ int_types, MPZ_ONE,
9
+ prec_to_dps, dps_to_prec, repr_dps,
10
+ round_floor, round_ceiling,
11
+ fzero, finf, fninf, fnan,
12
+ mpf_le, mpf_neg,
13
+ from_int, from_float, from_str, from_rational,
14
+ mpi_mid, mpi_delta, mpi_str,
15
+ mpi_abs, mpi_pos, mpi_neg, mpi_add, mpi_sub,
16
+ mpi_mul, mpi_div, mpi_pow_int, mpi_pow,
17
+ mpi_from_str,
18
+ mpci_pos, mpci_neg, mpci_add, mpci_sub, mpci_mul, mpci_div, mpci_pow,
19
+ mpci_abs, mpci_pow, mpci_exp, mpci_log,
20
+ ComplexResult,
21
+ mpf_hash, mpc_hash)
22
+ from .matrices.matrices import _matrix
23
+
24
+ mpi_zero = (fzero, fzero)
25
+
26
+ from .ctx_base import StandardBaseContext
27
+
28
+ new = object.__new__
29
+
30
+ def convert_mpf_(x, prec, rounding):
31
+ if hasattr(x, "_mpf_"): return x._mpf_
32
+ if isinstance(x, int_types): return from_int(x, prec, rounding)
33
+ if isinstance(x, float): return from_float(x, prec, rounding)
34
+ if isinstance(x, basestring): return from_str(x, prec, rounding)
35
+ raise NotImplementedError
36
+
37
+
38
+ class ivmpf(object):
39
+ """
40
+ Interval arithmetic class. Precision is controlled by iv.prec.
41
+ """
42
+
43
+ def __new__(cls, x=0):
44
+ return cls.ctx.convert(x)
45
+
46
+ def cast(self, cls, f_convert):
47
+ a, b = self._mpi_
48
+ if a == b:
49
+ return cls(f_convert(a))
50
+ raise ValueError
51
+
52
+ def __int__(self):
53
+ return self.cast(int, libmp.to_int)
54
+
55
+ def __float__(self):
56
+ return self.cast(float, libmp.to_float)
57
+
58
+ def __complex__(self):
59
+ return self.cast(complex, libmp.to_float)
60
+
61
+ def __hash__(self):
62
+ a, b = self._mpi_
63
+ if a == b:
64
+ return mpf_hash(a)
65
+ else:
66
+ return hash(self._mpi_)
67
+
68
+ @property
69
+ def real(self): return self
70
+
71
+ @property
72
+ def imag(self): return self.ctx.zero
73
+
74
+ def conjugate(self): return self
75
+
76
+ @property
77
+ def a(self):
78
+ a, b = self._mpi_
79
+ return self.ctx.make_mpf((a, a))
80
+
81
+ @property
82
+ def b(self):
83
+ a, b = self._mpi_
84
+ return self.ctx.make_mpf((b, b))
85
+
86
+ @property
87
+ def mid(self):
88
+ ctx = self.ctx
89
+ v = mpi_mid(self._mpi_, ctx.prec)
90
+ return ctx.make_mpf((v, v))
91
+
92
+ @property
93
+ def delta(self):
94
+ ctx = self.ctx
95
+ v = mpi_delta(self._mpi_, ctx.prec)
96
+ return ctx.make_mpf((v,v))
97
+
98
+ @property
99
+ def _mpci_(self):
100
+ return self._mpi_, mpi_zero
101
+
102
+ def _compare(*args):
103
+ raise TypeError("no ordering relation is defined for intervals")
104
+
105
+ __gt__ = _compare
106
+ __le__ = _compare
107
+ __gt__ = _compare
108
+ __ge__ = _compare
109
+
110
+ def __contains__(self, t):
111
+ t = self.ctx.mpf(t)
112
+ return (self.a <= t.a) and (t.b <= self.b)
113
+
114
+ def __str__(self):
115
+ return mpi_str(self._mpi_, self.ctx.prec)
116
+
117
+ def __repr__(self):
118
+ if self.ctx.pretty:
119
+ return str(self)
120
+ a, b = self._mpi_
121
+ n = repr_dps(self.ctx.prec)
122
+ a = libmp.to_str(a, n)
123
+ b = libmp.to_str(b, n)
124
+ return "mpi(%r, %r)" % (a, b)
125
+
126
+ def _compare(s, t, cmpfun):
127
+ if not hasattr(t, "_mpi_"):
128
+ try:
129
+ t = s.ctx.convert(t)
130
+ except:
131
+ return NotImplemented
132
+ return cmpfun(s._mpi_, t._mpi_)
133
+
134
+ def __eq__(s, t): return s._compare(t, libmp.mpi_eq)
135
+ def __ne__(s, t): return s._compare(t, libmp.mpi_ne)
136
+ def __lt__(s, t): return s._compare(t, libmp.mpi_lt)
137
+ def __le__(s, t): return s._compare(t, libmp.mpi_le)
138
+ def __gt__(s, t): return s._compare(t, libmp.mpi_gt)
139
+ def __ge__(s, t): return s._compare(t, libmp.mpi_ge)
140
+
141
+ def __abs__(self):
142
+ return self.ctx.make_mpf(mpi_abs(self._mpi_, self.ctx.prec))
143
+ def __pos__(self):
144
+ return self.ctx.make_mpf(mpi_pos(self._mpi_, self.ctx.prec))
145
+ def __neg__(self):
146
+ return self.ctx.make_mpf(mpi_neg(self._mpi_, self.ctx.prec))
147
+
148
+ def ae(s, t, rel_eps=None, abs_eps=None):
149
+ return s.ctx.almosteq(s, t, rel_eps, abs_eps)
150
+
151
+ class ivmpc(object):
152
+
153
+ def __new__(cls, re=0, im=0):
154
+ re = cls.ctx.convert(re)
155
+ im = cls.ctx.convert(im)
156
+ y = new(cls)
157
+ y._mpci_ = re._mpi_, im._mpi_
158
+ return y
159
+
160
+ def __hash__(self):
161
+ (a, b), (c,d) = self._mpci_
162
+ if a == b and c == d:
163
+ return mpc_hash((a, c))
164
+ else:
165
+ return hash(self._mpci_)
166
+
167
+ def __repr__(s):
168
+ if s.ctx.pretty:
169
+ return str(s)
170
+ return "iv.mpc(%s, %s)" % (repr(s.real), repr(s.imag))
171
+
172
+ def __str__(s):
173
+ return "(%s + %s*j)" % (str(s.real), str(s.imag))
174
+
175
+ @property
176
+ def a(self):
177
+ (a, b), (c,d) = self._mpci_
178
+ return self.ctx.make_mpf((a, a))
179
+
180
+ @property
181
+ def b(self):
182
+ (a, b), (c,d) = self._mpci_
183
+ return self.ctx.make_mpf((b, b))
184
+
185
+ @property
186
+ def c(self):
187
+ (a, b), (c,d) = self._mpci_
188
+ return self.ctx.make_mpf((c, c))
189
+
190
+ @property
191
+ def d(self):
192
+ (a, b), (c,d) = self._mpci_
193
+ return self.ctx.make_mpf((d, d))
194
+
195
+ @property
196
+ def real(s):
197
+ return s.ctx.make_mpf(s._mpci_[0])
198
+
199
+ @property
200
+ def imag(s):
201
+ return s.ctx.make_mpf(s._mpci_[1])
202
+
203
+ def conjugate(s):
204
+ a, b = s._mpci_
205
+ return s.ctx.make_mpc((a, mpf_neg(b)))
206
+
207
+ def overlap(s, t):
208
+ t = s.ctx.convert(t)
209
+ real_overlap = (s.a <= t.a <= s.b) or (s.a <= t.b <= s.b) or (t.a <= s.a <= t.b) or (t.a <= s.b <= t.b)
210
+ imag_overlap = (s.c <= t.c <= s.d) or (s.c <= t.d <= s.d) or (t.c <= s.c <= t.d) or (t.c <= s.d <= t.d)
211
+ return real_overlap and imag_overlap
212
+
213
+ def __contains__(s, t):
214
+ t = s.ctx.convert(t)
215
+ return t.real in s.real and t.imag in s.imag
216
+
217
+ def _compare(s, t, ne=False):
218
+ if not isinstance(t, s.ctx._types):
219
+ try:
220
+ t = s.ctx.convert(t)
221
+ except:
222
+ return NotImplemented
223
+ if hasattr(t, '_mpi_'):
224
+ tval = t._mpi_, mpi_zero
225
+ elif hasattr(t, '_mpci_'):
226
+ tval = t._mpci_
227
+ if ne:
228
+ return s._mpci_ != tval
229
+ return s._mpci_ == tval
230
+
231
+ def __eq__(s, t): return s._compare(t)
232
+ def __ne__(s, t): return s._compare(t, True)
233
+
234
+ def __lt__(s, t): raise TypeError("complex intervals cannot be ordered")
235
+ __le__ = __gt__ = __ge__ = __lt__
236
+
237
+ def __neg__(s): return s.ctx.make_mpc(mpci_neg(s._mpci_, s.ctx.prec))
238
+ def __pos__(s): return s.ctx.make_mpc(mpci_pos(s._mpci_, s.ctx.prec))
239
+ def __abs__(s): return s.ctx.make_mpf(mpci_abs(s._mpci_, s.ctx.prec))
240
+
241
+ def ae(s, t, rel_eps=None, abs_eps=None):
242
+ return s.ctx.almosteq(s, t, rel_eps, abs_eps)
243
+
244
+ def _binary_op(f_real, f_complex):
245
+ def g_complex(ctx, sval, tval):
246
+ return ctx.make_mpc(f_complex(sval, tval, ctx.prec))
247
+ def g_real(ctx, sval, tval):
248
+ try:
249
+ return ctx.make_mpf(f_real(sval, tval, ctx.prec))
250
+ except ComplexResult:
251
+ sval = (sval, mpi_zero)
252
+ tval = (tval, mpi_zero)
253
+ return g_complex(ctx, sval, tval)
254
+ def lop_real(s, t):
255
+ if isinstance(t, _matrix): return NotImplemented
256
+ ctx = s.ctx
257
+ if not isinstance(t, ctx._types): t = ctx.convert(t)
258
+ if hasattr(t, "_mpi_"): return g_real(ctx, s._mpi_, t._mpi_)
259
+ if hasattr(t, "_mpci_"): return g_complex(ctx, (s._mpi_, mpi_zero), t._mpci_)
260
+ return NotImplemented
261
+ def rop_real(s, t):
262
+ ctx = s.ctx
263
+ if not isinstance(t, ctx._types): t = ctx.convert(t)
264
+ if hasattr(t, "_mpi_"): return g_real(ctx, t._mpi_, s._mpi_)
265
+ if hasattr(t, "_mpci_"): return g_complex(ctx, t._mpci_, (s._mpi_, mpi_zero))
266
+ return NotImplemented
267
+ def lop_complex(s, t):
268
+ if isinstance(t, _matrix): return NotImplemented
269
+ ctx = s.ctx
270
+ if not isinstance(t, s.ctx._types):
271
+ try:
272
+ t = s.ctx.convert(t)
273
+ except (ValueError, TypeError):
274
+ return NotImplemented
275
+ return g_complex(ctx, s._mpci_, t._mpci_)
276
+ def rop_complex(s, t):
277
+ ctx = s.ctx
278
+ if not isinstance(t, s.ctx._types):
279
+ t = s.ctx.convert(t)
280
+ return g_complex(ctx, t._mpci_, s._mpci_)
281
+ return lop_real, rop_real, lop_complex, rop_complex
282
+
283
+ ivmpf.__add__, ivmpf.__radd__, ivmpc.__add__, ivmpc.__radd__ = _binary_op(mpi_add, mpci_add)
284
+ ivmpf.__sub__, ivmpf.__rsub__, ivmpc.__sub__, ivmpc.__rsub__ = _binary_op(mpi_sub, mpci_sub)
285
+ ivmpf.__mul__, ivmpf.__rmul__, ivmpc.__mul__, ivmpc.__rmul__ = _binary_op(mpi_mul, mpci_mul)
286
+ ivmpf.__div__, ivmpf.__rdiv__, ivmpc.__div__, ivmpc.__rdiv__ = _binary_op(mpi_div, mpci_div)
287
+ ivmpf.__pow__, ivmpf.__rpow__, ivmpc.__pow__, ivmpc.__rpow__ = _binary_op(mpi_pow, mpci_pow)
288
+
289
+ ivmpf.__truediv__ = ivmpf.__div__; ivmpf.__rtruediv__ = ivmpf.__rdiv__
290
+ ivmpc.__truediv__ = ivmpc.__div__; ivmpc.__rtruediv__ = ivmpc.__rdiv__
291
+
292
+ class ivmpf_constant(ivmpf):
293
+ def __new__(cls, f):
294
+ self = new(cls)
295
+ self._f = f
296
+ return self
297
+ def _get_mpi_(self):
298
+ prec = self.ctx._prec[0]
299
+ a = self._f(prec, round_floor)
300
+ b = self._f(prec, round_ceiling)
301
+ return a, b
302
+ _mpi_ = property(_get_mpi_)
303
+
304
+ class MPIntervalContext(StandardBaseContext):
305
+
306
+ def __init__(ctx):
307
+ ctx.mpf = type('ivmpf', (ivmpf,), {})
308
+ ctx.mpc = type('ivmpc', (ivmpc,), {})
309
+ ctx._types = (ctx.mpf, ctx.mpc)
310
+ ctx._constant = type('ivmpf_constant', (ivmpf_constant,), {})
311
+ ctx._prec = [53]
312
+ ctx._set_prec(53)
313
+ ctx._constant._ctxdata = ctx.mpf._ctxdata = ctx.mpc._ctxdata = [ctx.mpf, new, ctx._prec]
314
+ ctx._constant.ctx = ctx.mpf.ctx = ctx.mpc.ctx = ctx
315
+ ctx.pretty = False
316
+ StandardBaseContext.__init__(ctx)
317
+ ctx._init_builtins()
318
+
319
+ def _mpi(ctx, a, b=None):
320
+ if b is None:
321
+ return ctx.mpf(a)
322
+ return ctx.mpf((a,b))
323
+
324
+ def _init_builtins(ctx):
325
+ ctx.one = ctx.mpf(1)
326
+ ctx.zero = ctx.mpf(0)
327
+ ctx.inf = ctx.mpf('inf')
328
+ ctx.ninf = -ctx.inf
329
+ ctx.nan = ctx.mpf('nan')
330
+ ctx.j = ctx.mpc(0,1)
331
+ ctx.exp = ctx._wrap_mpi_function(libmp.mpi_exp, libmp.mpci_exp)
332
+ ctx.sqrt = ctx._wrap_mpi_function(libmp.mpi_sqrt)
333
+ ctx.ln = ctx._wrap_mpi_function(libmp.mpi_log, libmp.mpci_log)
334
+ ctx.cos = ctx._wrap_mpi_function(libmp.mpi_cos, libmp.mpci_cos)
335
+ ctx.sin = ctx._wrap_mpi_function(libmp.mpi_sin, libmp.mpci_sin)
336
+ ctx.tan = ctx._wrap_mpi_function(libmp.mpi_tan)
337
+ ctx.gamma = ctx._wrap_mpi_function(libmp.mpi_gamma, libmp.mpci_gamma)
338
+ ctx.loggamma = ctx._wrap_mpi_function(libmp.mpi_loggamma, libmp.mpci_loggamma)
339
+ ctx.rgamma = ctx._wrap_mpi_function(libmp.mpi_rgamma, libmp.mpci_rgamma)
340
+ ctx.factorial = ctx._wrap_mpi_function(libmp.mpi_factorial, libmp.mpci_factorial)
341
+ ctx.fac = ctx.factorial
342
+
343
+ ctx.eps = ctx._constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1))
344
+ ctx.pi = ctx._constant(libmp.mpf_pi)
345
+ ctx.e = ctx._constant(libmp.mpf_e)
346
+ ctx.ln2 = ctx._constant(libmp.mpf_ln2)
347
+ ctx.ln10 = ctx._constant(libmp.mpf_ln10)
348
+ ctx.phi = ctx._constant(libmp.mpf_phi)
349
+ ctx.euler = ctx._constant(libmp.mpf_euler)
350
+ ctx.catalan = ctx._constant(libmp.mpf_catalan)
351
+ ctx.glaisher = ctx._constant(libmp.mpf_glaisher)
352
+ ctx.khinchin = ctx._constant(libmp.mpf_khinchin)
353
+ ctx.twinprime = ctx._constant(libmp.mpf_twinprime)
354
+
355
+ def _wrap_mpi_function(ctx, f_real, f_complex=None):
356
+ def g(x, **kwargs):
357
+ if kwargs:
358
+ prec = kwargs.get('prec', ctx._prec[0])
359
+ else:
360
+ prec = ctx._prec[0]
361
+ x = ctx.convert(x)
362
+ if hasattr(x, "_mpi_"):
363
+ return ctx.make_mpf(f_real(x._mpi_, prec))
364
+ if hasattr(x, "_mpci_"):
365
+ return ctx.make_mpc(f_complex(x._mpci_, prec))
366
+ raise ValueError
367
+ return g
368
+
369
+ @classmethod
370
+ def _wrap_specfun(cls, name, f, wrap):
371
+ if wrap:
372
+ def f_wrapped(ctx, *args, **kwargs):
373
+ convert = ctx.convert
374
+ args = [convert(a) for a in args]
375
+ prec = ctx.prec
376
+ try:
377
+ ctx.prec += 10
378
+ retval = f(ctx, *args, **kwargs)
379
+ finally:
380
+ ctx.prec = prec
381
+ return +retval
382
+ else:
383
+ f_wrapped = f
384
+ setattr(cls, name, f_wrapped)
385
+
386
+ def _set_prec(ctx, n):
387
+ ctx._prec[0] = max(1, int(n))
388
+ ctx._dps = prec_to_dps(n)
389
+
390
+ def _set_dps(ctx, n):
391
+ ctx._prec[0] = dps_to_prec(n)
392
+ ctx._dps = max(1, int(n))
393
+
394
+ prec = property(lambda ctx: ctx._prec[0], _set_prec)
395
+ dps = property(lambda ctx: ctx._dps, _set_dps)
396
+
397
+ def make_mpf(ctx, v):
398
+ a = new(ctx.mpf)
399
+ a._mpi_ = v
400
+ return a
401
+
402
+ def make_mpc(ctx, v):
403
+ a = new(ctx.mpc)
404
+ a._mpci_ = v
405
+ return a
406
+
407
+ def _mpq(ctx, pq):
408
+ p, q = pq
409
+ a = libmp.from_rational(p, q, ctx.prec, round_floor)
410
+ b = libmp.from_rational(p, q, ctx.prec, round_ceiling)
411
+ return ctx.make_mpf((a, b))
412
+
413
+ def convert(ctx, x):
414
+ if isinstance(x, (ctx.mpf, ctx.mpc)):
415
+ return x
416
+ if isinstance(x, ctx._constant):
417
+ return +x
418
+ if isinstance(x, complex) or hasattr(x, "_mpc_"):
419
+ re = ctx.convert(x.real)
420
+ im = ctx.convert(x.imag)
421
+ return ctx.mpc(re,im)
422
+ if isinstance(x, basestring):
423
+ v = mpi_from_str(x, ctx.prec)
424
+ return ctx.make_mpf(v)
425
+ if hasattr(x, "_mpi_"):
426
+ a, b = x._mpi_
427
+ else:
428
+ try:
429
+ a, b = x
430
+ except (TypeError, ValueError):
431
+ a = b = x
432
+ if hasattr(a, "_mpi_"):
433
+ a = a._mpi_[0]
434
+ else:
435
+ a = convert_mpf_(a, ctx.prec, round_floor)
436
+ if hasattr(b, "_mpi_"):
437
+ b = b._mpi_[1]
438
+ else:
439
+ b = convert_mpf_(b, ctx.prec, round_ceiling)
440
+ if a == fnan or b == fnan:
441
+ a = fninf
442
+ b = finf
443
+ assert mpf_le(a, b), "endpoints must be properly ordered"
444
+ return ctx.make_mpf((a, b))
445
+
446
+ def nstr(ctx, x, n=5, **kwargs):
447
+ x = ctx.convert(x)
448
+ if hasattr(x, "_mpi_"):
449
+ return libmp.mpi_to_str(x._mpi_, n, **kwargs)
450
+ if hasattr(x, "_mpci_"):
451
+ re = libmp.mpi_to_str(x._mpci_[0], n, **kwargs)
452
+ im = libmp.mpi_to_str(x._mpci_[1], n, **kwargs)
453
+ return "(%s + %s*j)" % (re, im)
454
+
455
+ def mag(ctx, x):
456
+ x = ctx.convert(x)
457
+ if isinstance(x, ctx.mpc):
458
+ return max(ctx.mag(x.real), ctx.mag(x.imag)) + 1
459
+ a, b = libmp.mpi_abs(x._mpi_)
460
+ sign, man, exp, bc = b
461
+ if man:
462
+ return exp+bc
463
+ if b == fzero:
464
+ return ctx.ninf
465
+ if b == fnan:
466
+ return ctx.nan
467
+ return ctx.inf
468
+
469
+ def isnan(ctx, x):
470
+ return False
471
+
472
+ def isinf(ctx, x):
473
+ return x == ctx.inf
474
+
475
+ def isint(ctx, x):
476
+ x = ctx.convert(x)
477
+ a, b = x._mpi_
478
+ if a == b:
479
+ sign, man, exp, bc = a
480
+ if man:
481
+ return exp >= 0
482
+ return a == fzero
483
+ return None
484
+
485
+ def ldexp(ctx, x, n):
486
+ a, b = ctx.convert(x)._mpi_
487
+ a = libmp.mpf_shift(a, n)
488
+ b = libmp.mpf_shift(b, n)
489
+ return ctx.make_mpf((a,b))
490
+
491
+ def absmin(ctx, x):
492
+ return abs(ctx.convert(x)).a
493
+
494
+ def absmax(ctx, x):
495
+ return abs(ctx.convert(x)).b
496
+
497
+ def atan2(ctx, y, x):
498
+ y = ctx.convert(y)._mpi_
499
+ x = ctx.convert(x)._mpi_
500
+ return ctx.make_mpf(libmp.mpi_atan2(y,x,ctx.prec))
501
+
502
+ def _convert_param(ctx, x):
503
+ if isinstance(x, libmp.int_types):
504
+ return x, 'Z'
505
+ if isinstance(x, tuple):
506
+ p, q = x
507
+ return (ctx.mpf(p) / ctx.mpf(q), 'R')
508
+ x = ctx.convert(x)
509
+ if isinstance(x, ctx.mpf):
510
+ return x, 'R'
511
+ if isinstance(x, ctx.mpc):
512
+ return x, 'C'
513
+ raise ValueError
514
+
515
+ def _is_real_type(ctx, z):
516
+ return isinstance(z, ctx.mpf) or isinstance(z, int_types)
517
+
518
+ def _is_complex_type(ctx, z):
519
+ return isinstance(z, ctx.mpc)
520
+
521
+ def hypsum(ctx, p, q, types, coeffs, z, maxterms=6000, **kwargs):
522
+ coeffs = list(coeffs)
523
+ num = range(p)
524
+ den = range(p,p+q)
525
+ #tol = ctx.eps
526
+ s = t = ctx.one
527
+ k = 0
528
+ while 1:
529
+ for i in num: t *= (coeffs[i]+k)
530
+ for i in den: t /= (coeffs[i]+k)
531
+ k += 1; t /= k; t *= z; s += t
532
+ if t == 0:
533
+ return s
534
+ #if abs(t) < tol:
535
+ # return s
536
+ if k > maxterms:
537
+ raise ctx.NoConvergence
538
+
539
+
540
+ # Register with "numbers" ABC
541
+ # We do not subclass, hence we do not use the @abstractmethod checks. While
542
+ # this is less invasive it may turn out that we do not actually support
543
+ # parts of the expected interfaces. See
544
+ # http://docs.python.org/2/library/numbers.html for list of abstract
545
+ # methods.
546
+ try:
547
+ import numbers
548
+ numbers.Complex.register(ivmpc)
549
+ numbers.Real.register(ivmpf)
550
+ except ImportError:
551
+ pass
lib/python3.11/site-packages/mpmath/ctx_mp.py ADDED
@@ -0,0 +1,1339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module defines the mpf, mpc classes, and standard functions for
3
+ operating with them.
4
+ """
5
+ __docformat__ = 'plaintext'
6
+
7
+ import functools
8
+
9
+ import re
10
+
11
+ from .ctx_base import StandardBaseContext
12
+
13
+ from .libmp.backend import basestring, BACKEND
14
+
15
+ from . import libmp
16
+
17
+ from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps,
18
+ round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps,
19
+ ComplexResult, to_pickable, from_pickable, normalize,
20
+ from_int, from_float, from_str, to_int, to_float, to_str,
21
+ from_rational, from_man_exp,
22
+ fone, fzero, finf, fninf, fnan,
23
+ mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
24
+ mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod,
25
+ mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge,
26
+ mpf_hash, mpf_rand,
27
+ mpf_sum,
28
+ bitcount, to_fixed,
29
+ mpc_to_str,
30
+ mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate,
31
+ mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf,
32
+ mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int,
33
+ mpc_mpf_div,
34
+ mpf_pow,
35
+ mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10,
36
+ mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin,
37
+ mpf_glaisher, mpf_twinprime, mpf_mertens,
38
+ int_types)
39
+
40
+ from . import function_docs
41
+ from . import rational
42
+
43
+ new = object.__new__
44
+
45
+ get_complex = re.compile(r'^\(?(?P<re>[\+\-]?\d*(\.\d*)?(e[\+\-]?\d+)?)??'
46
+ r'(?P<im>[\+\-]?\d*(\.\d*)?(e[\+\-]?\d+)?j)?\)?$')
47
+
48
+ if BACKEND == 'sage':
49
+ from sage.libs.mpmath.ext_main import Context as BaseMPContext
50
+ # pickle hack
51
+ import sage.libs.mpmath.ext_main as _mpf_module
52
+ else:
53
+ from .ctx_mp_python import PythonMPContext as BaseMPContext
54
+ from . import ctx_mp_python as _mpf_module
55
+
56
+ from .ctx_mp_python import _mpf, _mpc, mpnumeric
57
+
58
+ class MPContext(BaseMPContext, StandardBaseContext):
59
+ """
60
+ Context for multiprecision arithmetic with a global precision.
61
+ """
62
+
63
+ def __init__(ctx):
64
+ BaseMPContext.__init__(ctx)
65
+ ctx.trap_complex = False
66
+ ctx.pretty = False
67
+ ctx.types = [ctx.mpf, ctx.mpc, ctx.constant]
68
+ ctx._mpq = rational.mpq
69
+ ctx.default()
70
+ StandardBaseContext.__init__(ctx)
71
+
72
+ ctx.mpq = rational.mpq
73
+ ctx.init_builtins()
74
+
75
+ ctx.hyp_summators = {}
76
+
77
+ ctx._init_aliases()
78
+
79
+ # XXX: automate
80
+ try:
81
+ ctx.bernoulli.im_func.func_doc = function_docs.bernoulli
82
+ ctx.primepi.im_func.func_doc = function_docs.primepi
83
+ ctx.psi.im_func.func_doc = function_docs.psi
84
+ ctx.atan2.im_func.func_doc = function_docs.atan2
85
+ except AttributeError:
86
+ # python 3
87
+ ctx.bernoulli.__func__.func_doc = function_docs.bernoulli
88
+ ctx.primepi.__func__.func_doc = function_docs.primepi
89
+ ctx.psi.__func__.func_doc = function_docs.psi
90
+ ctx.atan2.__func__.func_doc = function_docs.atan2
91
+
92
+ ctx.digamma.func_doc = function_docs.digamma
93
+ ctx.cospi.func_doc = function_docs.cospi
94
+ ctx.sinpi.func_doc = function_docs.sinpi
95
+
96
+ def init_builtins(ctx):
97
+
98
+ mpf = ctx.mpf
99
+ mpc = ctx.mpc
100
+
101
+ # Exact constants
102
+ ctx.one = ctx.make_mpf(fone)
103
+ ctx.zero = ctx.make_mpf(fzero)
104
+ ctx.j = ctx.make_mpc((fzero,fone))
105
+ ctx.inf = ctx.make_mpf(finf)
106
+ ctx.ninf = ctx.make_mpf(fninf)
107
+ ctx.nan = ctx.make_mpf(fnan)
108
+
109
+ eps = ctx.constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1),
110
+ "epsilon of working precision", "eps")
111
+ ctx.eps = eps
112
+
113
+ # Approximate constants
114
+ ctx.pi = ctx.constant(mpf_pi, "pi", "pi")
115
+ ctx.ln2 = ctx.constant(mpf_ln2, "ln(2)", "ln2")
116
+ ctx.ln10 = ctx.constant(mpf_ln10, "ln(10)", "ln10")
117
+ ctx.phi = ctx.constant(mpf_phi, "Golden ratio phi", "phi")
118
+ ctx.e = ctx.constant(mpf_e, "e = exp(1)", "e")
119
+ ctx.euler = ctx.constant(mpf_euler, "Euler's constant", "euler")
120
+ ctx.catalan = ctx.constant(mpf_catalan, "Catalan's constant", "catalan")
121
+ ctx.khinchin = ctx.constant(mpf_khinchin, "Khinchin's constant", "khinchin")
122
+ ctx.glaisher = ctx.constant(mpf_glaisher, "Glaisher's constant", "glaisher")
123
+ ctx.apery = ctx.constant(mpf_apery, "Apery's constant", "apery")
124
+ ctx.degree = ctx.constant(mpf_degree, "1 deg = pi / 180", "degree")
125
+ ctx.twinprime = ctx.constant(mpf_twinprime, "Twin prime constant", "twinprime")
126
+ ctx.mertens = ctx.constant(mpf_mertens, "Mertens' constant", "mertens")
127
+
128
+ # Standard functions
129
+ ctx.sqrt = ctx._wrap_libmp_function(libmp.mpf_sqrt, libmp.mpc_sqrt)
130
+ ctx.cbrt = ctx._wrap_libmp_function(libmp.mpf_cbrt, libmp.mpc_cbrt)
131
+ ctx.ln = ctx._wrap_libmp_function(libmp.mpf_log, libmp.mpc_log)
132
+ ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
133
+ ctx.exp = ctx._wrap_libmp_function(libmp.mpf_exp, libmp.mpc_exp)
134
+ ctx.expj = ctx._wrap_libmp_function(libmp.mpf_expj, libmp.mpc_expj)
135
+ ctx.expjpi = ctx._wrap_libmp_function(libmp.mpf_expjpi, libmp.mpc_expjpi)
136
+ ctx.sin = ctx._wrap_libmp_function(libmp.mpf_sin, libmp.mpc_sin)
137
+ ctx.cos = ctx._wrap_libmp_function(libmp.mpf_cos, libmp.mpc_cos)
138
+ ctx.tan = ctx._wrap_libmp_function(libmp.mpf_tan, libmp.mpc_tan)
139
+ ctx.sinh = ctx._wrap_libmp_function(libmp.mpf_sinh, libmp.mpc_sinh)
140
+ ctx.cosh = ctx._wrap_libmp_function(libmp.mpf_cosh, libmp.mpc_cosh)
141
+ ctx.tanh = ctx._wrap_libmp_function(libmp.mpf_tanh, libmp.mpc_tanh)
142
+ ctx.asin = ctx._wrap_libmp_function(libmp.mpf_asin, libmp.mpc_asin)
143
+ ctx.acos = ctx._wrap_libmp_function(libmp.mpf_acos, libmp.mpc_acos)
144
+ ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
145
+ ctx.asinh = ctx._wrap_libmp_function(libmp.mpf_asinh, libmp.mpc_asinh)
146
+ ctx.acosh = ctx._wrap_libmp_function(libmp.mpf_acosh, libmp.mpc_acosh)
147
+ ctx.atanh = ctx._wrap_libmp_function(libmp.mpf_atanh, libmp.mpc_atanh)
148
+ ctx.sinpi = ctx._wrap_libmp_function(libmp.mpf_sin_pi, libmp.mpc_sin_pi)
149
+ ctx.cospi = ctx._wrap_libmp_function(libmp.mpf_cos_pi, libmp.mpc_cos_pi)
150
+ ctx.floor = ctx._wrap_libmp_function(libmp.mpf_floor, libmp.mpc_floor)
151
+ ctx.ceil = ctx._wrap_libmp_function(libmp.mpf_ceil, libmp.mpc_ceil)
152
+ ctx.nint = ctx._wrap_libmp_function(libmp.mpf_nint, libmp.mpc_nint)
153
+ ctx.frac = ctx._wrap_libmp_function(libmp.mpf_frac, libmp.mpc_frac)
154
+ ctx.fib = ctx.fibonacci = ctx._wrap_libmp_function(libmp.mpf_fibonacci, libmp.mpc_fibonacci)
155
+
156
+ ctx.gamma = ctx._wrap_libmp_function(libmp.mpf_gamma, libmp.mpc_gamma)
157
+ ctx.rgamma = ctx._wrap_libmp_function(libmp.mpf_rgamma, libmp.mpc_rgamma)
158
+ ctx.loggamma = ctx._wrap_libmp_function(libmp.mpf_loggamma, libmp.mpc_loggamma)
159
+ ctx.fac = ctx.factorial = ctx._wrap_libmp_function(libmp.mpf_factorial, libmp.mpc_factorial)
160
+
161
+ ctx.digamma = ctx._wrap_libmp_function(libmp.mpf_psi0, libmp.mpc_psi0)
162
+ ctx.harmonic = ctx._wrap_libmp_function(libmp.mpf_harmonic, libmp.mpc_harmonic)
163
+ ctx.ei = ctx._wrap_libmp_function(libmp.mpf_ei, libmp.mpc_ei)
164
+ ctx.e1 = ctx._wrap_libmp_function(libmp.mpf_e1, libmp.mpc_e1)
165
+ ctx._ci = ctx._wrap_libmp_function(libmp.mpf_ci, libmp.mpc_ci)
166
+ ctx._si = ctx._wrap_libmp_function(libmp.mpf_si, libmp.mpc_si)
167
+ ctx.ellipk = ctx._wrap_libmp_function(libmp.mpf_ellipk, libmp.mpc_ellipk)
168
+ ctx._ellipe = ctx._wrap_libmp_function(libmp.mpf_ellipe, libmp.mpc_ellipe)
169
+ ctx.agm1 = ctx._wrap_libmp_function(libmp.mpf_agm1, libmp.mpc_agm1)
170
+ ctx._erf = ctx._wrap_libmp_function(libmp.mpf_erf, None)
171
+ ctx._erfc = ctx._wrap_libmp_function(libmp.mpf_erfc, None)
172
+ ctx._zeta = ctx._wrap_libmp_function(libmp.mpf_zeta, libmp.mpc_zeta)
173
+ ctx._altzeta = ctx._wrap_libmp_function(libmp.mpf_altzeta, libmp.mpc_altzeta)
174
+
175
+ # Faster versions
176
+ ctx.sqrt = getattr(ctx, "_sage_sqrt", ctx.sqrt)
177
+ ctx.exp = getattr(ctx, "_sage_exp", ctx.exp)
178
+ ctx.ln = getattr(ctx, "_sage_ln", ctx.ln)
179
+ ctx.cos = getattr(ctx, "_sage_cos", ctx.cos)
180
+ ctx.sin = getattr(ctx, "_sage_sin", ctx.sin)
181
+
182
+ def to_fixed(ctx, x, prec):
183
+ return x.to_fixed(prec)
184
+
185
+ def hypot(ctx, x, y):
186
+ r"""
187
+ Computes the Euclidean norm of the vector `(x, y)`, equal
188
+ to `\sqrt{x^2 + y^2}`. Both `x` and `y` must be real."""
189
+ x = ctx.convert(x)
190
+ y = ctx.convert(y)
191
+ return ctx.make_mpf(libmp.mpf_hypot(x._mpf_, y._mpf_, *ctx._prec_rounding))
192
+
193
+ def _gamma_upper_int(ctx, n, z):
194
+ n = int(ctx._re(n))
195
+ if n == 0:
196
+ return ctx.e1(z)
197
+ if not hasattr(z, '_mpf_'):
198
+ raise NotImplementedError
199
+ prec, rounding = ctx._prec_rounding
200
+ real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding, gamma=True)
201
+ if imag is None:
202
+ return ctx.make_mpf(real)
203
+ else:
204
+ return ctx.make_mpc((real, imag))
205
+
206
+ def _expint_int(ctx, n, z):
207
+ n = int(n)
208
+ if n == 1:
209
+ return ctx.e1(z)
210
+ if not hasattr(z, '_mpf_'):
211
+ raise NotImplementedError
212
+ prec, rounding = ctx._prec_rounding
213
+ real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding)
214
+ if imag is None:
215
+ return ctx.make_mpf(real)
216
+ else:
217
+ return ctx.make_mpc((real, imag))
218
+
219
+ def _nthroot(ctx, x, n):
220
+ if hasattr(x, '_mpf_'):
221
+ try:
222
+ return ctx.make_mpf(libmp.mpf_nthroot(x._mpf_, n, *ctx._prec_rounding))
223
+ except ComplexResult:
224
+ if ctx.trap_complex:
225
+ raise
226
+ x = (x._mpf_, libmp.fzero)
227
+ else:
228
+ x = x._mpc_
229
+ return ctx.make_mpc(libmp.mpc_nthroot(x, n, *ctx._prec_rounding))
230
+
231
+ def _besselj(ctx, n, z):
232
+ prec, rounding = ctx._prec_rounding
233
+ if hasattr(z, '_mpf_'):
234
+ return ctx.make_mpf(libmp.mpf_besseljn(n, z._mpf_, prec, rounding))
235
+ elif hasattr(z, '_mpc_'):
236
+ return ctx.make_mpc(libmp.mpc_besseljn(n, z._mpc_, prec, rounding))
237
+
238
+ def _agm(ctx, a, b=1):
239
+ prec, rounding = ctx._prec_rounding
240
+ if hasattr(a, '_mpf_') and hasattr(b, '_mpf_'):
241
+ try:
242
+ v = libmp.mpf_agm(a._mpf_, b._mpf_, prec, rounding)
243
+ return ctx.make_mpf(v)
244
+ except ComplexResult:
245
+ pass
246
+ if hasattr(a, '_mpf_'): a = (a._mpf_, libmp.fzero)
247
+ else: a = a._mpc_
248
+ if hasattr(b, '_mpf_'): b = (b._mpf_, libmp.fzero)
249
+ else: b = b._mpc_
250
+ return ctx.make_mpc(libmp.mpc_agm(a, b, prec, rounding))
251
+
252
+ def bernoulli(ctx, n):
253
+ return ctx.make_mpf(libmp.mpf_bernoulli(int(n), *ctx._prec_rounding))
254
+
255
+ def _zeta_int(ctx, n):
256
+ return ctx.make_mpf(libmp.mpf_zeta_int(int(n), *ctx._prec_rounding))
257
+
258
+ def atan2(ctx, y, x):
259
+ x = ctx.convert(x)
260
+ y = ctx.convert(y)
261
+ return ctx.make_mpf(libmp.mpf_atan2(y._mpf_, x._mpf_, *ctx._prec_rounding))
262
+
263
+ def psi(ctx, m, z):
264
+ z = ctx.convert(z)
265
+ m = int(m)
266
+ if ctx._is_real_type(z):
267
+ return ctx.make_mpf(libmp.mpf_psi(m, z._mpf_, *ctx._prec_rounding))
268
+ else:
269
+ return ctx.make_mpc(libmp.mpc_psi(m, z._mpc_, *ctx._prec_rounding))
270
+
271
+ def cos_sin(ctx, x, **kwargs):
272
+ if type(x) not in ctx.types:
273
+ x = ctx.convert(x)
274
+ prec, rounding = ctx._parse_prec(kwargs)
275
+ if hasattr(x, '_mpf_'):
276
+ c, s = libmp.mpf_cos_sin(x._mpf_, prec, rounding)
277
+ return ctx.make_mpf(c), ctx.make_mpf(s)
278
+ elif hasattr(x, '_mpc_'):
279
+ c, s = libmp.mpc_cos_sin(x._mpc_, prec, rounding)
280
+ return ctx.make_mpc(c), ctx.make_mpc(s)
281
+ else:
282
+ return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
283
+
284
+ def cospi_sinpi(ctx, x, **kwargs):
285
+ if type(x) not in ctx.types:
286
+ x = ctx.convert(x)
287
+ prec, rounding = ctx._parse_prec(kwargs)
288
+ if hasattr(x, '_mpf_'):
289
+ c, s = libmp.mpf_cos_sin_pi(x._mpf_, prec, rounding)
290
+ return ctx.make_mpf(c), ctx.make_mpf(s)
291
+ elif hasattr(x, '_mpc_'):
292
+ c, s = libmp.mpc_cos_sin_pi(x._mpc_, prec, rounding)
293
+ return ctx.make_mpc(c), ctx.make_mpc(s)
294
+ else:
295
+ return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
296
+
297
+ def clone(ctx):
298
+ """
299
+ Create a copy of the context, with the same working precision.
300
+ """
301
+ a = ctx.__class__()
302
+ a.prec = ctx.prec
303
+ return a
304
+
305
+ # Several helper methods
306
+ # TODO: add more of these, make consistent, write docstrings, ...
307
+
308
+ def _is_real_type(ctx, x):
309
+ if hasattr(x, '_mpc_') or type(x) is complex:
310
+ return False
311
+ return True
312
+
313
+ def _is_complex_type(ctx, x):
314
+ if hasattr(x, '_mpc_') or type(x) is complex:
315
+ return True
316
+ return False
317
+
318
+ def isnan(ctx, x):
319
+ """
320
+ Return *True* if *x* is a NaN (not-a-number), or for a complex
321
+ number, whether either the real or complex part is NaN;
322
+ otherwise return *False*::
323
+
324
+ >>> from mpmath import *
325
+ >>> isnan(3.14)
326
+ False
327
+ >>> isnan(nan)
328
+ True
329
+ >>> isnan(mpc(3.14,2.72))
330
+ False
331
+ >>> isnan(mpc(3.14,nan))
332
+ True
333
+
334
+ """
335
+ if hasattr(x, "_mpf_"):
336
+ return x._mpf_ == fnan
337
+ if hasattr(x, "_mpc_"):
338
+ return fnan in x._mpc_
339
+ if isinstance(x, int_types) or isinstance(x, rational.mpq):
340
+ return False
341
+ x = ctx.convert(x)
342
+ if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
343
+ return ctx.isnan(x)
344
+ raise TypeError("isnan() needs a number as input")
345
+
346
+ def isfinite(ctx, x):
347
+ """
348
+ Return *True* if *x* is a finite number, i.e. neither
349
+ an infinity or a NaN.
350
+
351
+ >>> from mpmath import *
352
+ >>> isfinite(inf)
353
+ False
354
+ >>> isfinite(-inf)
355
+ False
356
+ >>> isfinite(3)
357
+ True
358
+ >>> isfinite(nan)
359
+ False
360
+ >>> isfinite(3+4j)
361
+ True
362
+ >>> isfinite(mpc(3,inf))
363
+ False
364
+ >>> isfinite(mpc(nan,3))
365
+ False
366
+
367
+ """
368
+ if ctx.isinf(x) or ctx.isnan(x):
369
+ return False
370
+ return True
371
+
372
+ def isnpint(ctx, x):
373
+ """
374
+ Determine if *x* is a nonpositive integer.
375
+ """
376
+ if not x:
377
+ return True
378
+ if hasattr(x, '_mpf_'):
379
+ sign, man, exp, bc = x._mpf_
380
+ return sign and exp >= 0
381
+ if hasattr(x, '_mpc_'):
382
+ return not x.imag and ctx.isnpint(x.real)
383
+ if type(x) in int_types:
384
+ return x <= 0
385
+ if isinstance(x, ctx.mpq):
386
+ p, q = x._mpq_
387
+ if not p:
388
+ return True
389
+ return q == 1 and p <= 0
390
+ return ctx.isnpint(ctx.convert(x))
391
+
392
+ def __str__(ctx):
393
+ lines = ["Mpmath settings:",
394
+ (" mp.prec = %s" % ctx.prec).ljust(30) + "[default: 53]",
395
+ (" mp.dps = %s" % ctx.dps).ljust(30) + "[default: 15]",
396
+ (" mp.trap_complex = %s" % ctx.trap_complex).ljust(30) + "[default: False]",
397
+ ]
398
+ return "\n".join(lines)
399
+
400
+ @property
401
+ def _repr_digits(ctx):
402
+ return repr_dps(ctx._prec)
403
+
404
+ @property
405
+ def _str_digits(ctx):
406
+ return ctx._dps
407
+
408
+ def extraprec(ctx, n, normalize_output=False):
409
+ """
410
+ The block
411
+
412
+ with extraprec(n):
413
+ <code>
414
+
415
+ increases the precision n bits, executes <code>, and then
416
+ restores the precision.
417
+
418
+ extraprec(n)(f) returns a decorated version of the function f
419
+ that increases the working precision by n bits before execution,
420
+ and restores the parent precision afterwards. With
421
+ normalize_output=True, it rounds the return value to the parent
422
+ precision.
423
+ """
424
+ return PrecisionManager(ctx, lambda p: p + n, None, normalize_output)
425
+
426
+ def extradps(ctx, n, normalize_output=False):
427
+ """
428
+ This function is analogous to extraprec (see documentation)
429
+ but changes the decimal precision instead of the number of bits.
430
+ """
431
+ return PrecisionManager(ctx, None, lambda d: d + n, normalize_output)
432
+
433
+ def workprec(ctx, n, normalize_output=False):
434
+ """
435
+ The block
436
+
437
+ with workprec(n):
438
+ <code>
439
+
440
+ sets the precision to n bits, executes <code>, and then restores
441
+ the precision.
442
+
443
+ workprec(n)(f) returns a decorated version of the function f
444
+ that sets the precision to n bits before execution,
445
+ and restores the precision afterwards. With normalize_output=True,
446
+ it rounds the return value to the parent precision.
447
+ """
448
+ return PrecisionManager(ctx, lambda p: n, None, normalize_output)
449
+
450
+ def workdps(ctx, n, normalize_output=False):
451
+ """
452
+ This function is analogous to workprec (see documentation)
453
+ but changes the decimal precision instead of the number of bits.
454
+ """
455
+ return PrecisionManager(ctx, None, lambda d: n, normalize_output)
456
+
457
+ def autoprec(ctx, f, maxprec=None, catch=(), verbose=False):
458
+ r"""
459
+ Return a wrapped copy of *f* that repeatedly evaluates *f*
460
+ with increasing precision until the result converges to the
461
+ full precision used at the point of the call.
462
+
463
+ This heuristically protects against rounding errors, at the cost of
464
+ roughly a 2x slowdown compared to manually setting the optimal
465
+ precision. This method can, however, easily be fooled if the results
466
+ from *f* depend "discontinuously" on the precision, for instance
467
+ if catastrophic cancellation can occur. Therefore, :func:`~mpmath.autoprec`
468
+ should be used judiciously.
469
+
470
+ **Examples**
471
+
472
+ Many functions are sensitive to perturbations of the input arguments.
473
+ If the arguments are decimal numbers, they may have to be converted
474
+ to binary at a much higher precision. If the amount of required
475
+ extra precision is unknown, :func:`~mpmath.autoprec` is convenient::
476
+
477
+ >>> from mpmath import *
478
+ >>> mp.dps = 15
479
+ >>> mp.pretty = True
480
+ >>> besselj(5, 125 * 10**28) # Exact input
481
+ -8.03284785591801e-17
482
+ >>> besselj(5, '1.25e30') # Bad
483
+ 7.12954868316652e-16
484
+ >>> autoprec(besselj)(5, '1.25e30') # Good
485
+ -8.03284785591801e-17
486
+
487
+ The following fails to converge because `\sin(\pi) = 0` whereas all
488
+ finite-precision approximations of `\pi` give nonzero values::
489
+
490
+ >>> autoprec(sin)(pi) # doctest: +IGNORE_EXCEPTION_DETAIL
491
+ Traceback (most recent call last):
492
+ ...
493
+ NoConvergence: autoprec: prec increased to 2910 without convergence
494
+
495
+ As the following example shows, :func:`~mpmath.autoprec` can protect against
496
+ cancellation, but is fooled by too severe cancellation::
497
+
498
+ >>> x = 1e-10
499
+ >>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
500
+ 1.00000008274037e-10
501
+ 1.00000000005e-10
502
+ 1.00000000005e-10
503
+ >>> x = 1e-50
504
+ >>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
505
+ 0.0
506
+ 1.0e-50
507
+ 0.0
508
+
509
+ With *catch*, an exception or list of exceptions to intercept
510
+ may be specified. The raised exception is interpreted
511
+ as signaling insufficient precision. This permits, for example,
512
+ evaluating a function where a too low precision results in a
513
+ division by zero::
514
+
515
+ >>> f = lambda x: 1/(exp(x)-1)
516
+ >>> f(1e-30)
517
+ Traceback (most recent call last):
518
+ ...
519
+ ZeroDivisionError
520
+ >>> autoprec(f, catch=ZeroDivisionError)(1e-30)
521
+ 1.0e+30
522
+
523
+
524
+ """
525
+ def f_autoprec_wrapped(*args, **kwargs):
526
+ prec = ctx.prec
527
+ if maxprec is None:
528
+ maxprec2 = ctx._default_hyper_maxprec(prec)
529
+ else:
530
+ maxprec2 = maxprec
531
+ try:
532
+ ctx.prec = prec + 10
533
+ try:
534
+ v1 = f(*args, **kwargs)
535
+ except catch:
536
+ v1 = ctx.nan
537
+ prec2 = prec + 20
538
+ while 1:
539
+ ctx.prec = prec2
540
+ try:
541
+ v2 = f(*args, **kwargs)
542
+ except catch:
543
+ v2 = ctx.nan
544
+ if v1 == v2:
545
+ break
546
+ err = ctx.mag(v2-v1) - ctx.mag(v2)
547
+ if err < (-prec):
548
+ break
549
+ if verbose:
550
+ print("autoprec: target=%s, prec=%s, accuracy=%s" \
551
+ % (prec, prec2, -err))
552
+ v1 = v2
553
+ if prec2 >= maxprec2:
554
+ raise ctx.NoConvergence(\
555
+ "autoprec: prec increased to %i without convergence"\
556
+ % prec2)
557
+ prec2 += int(prec2*2)
558
+ prec2 = min(prec2, maxprec2)
559
+ finally:
560
+ ctx.prec = prec
561
+ return +v2
562
+ return f_autoprec_wrapped
563
+
564
+ def nstr(ctx, x, n=6, **kwargs):
565
+ """
566
+ Convert an ``mpf`` or ``mpc`` to a decimal string literal with *n*
567
+ significant digits. The small default value for *n* is chosen to
568
+ make this function useful for printing collections of numbers
569
+ (lists, matrices, etc).
570
+
571
+ If *x* is a list or tuple, :func:`~mpmath.nstr` is applied recursively
572
+ to each element. For unrecognized classes, :func:`~mpmath.nstr`
573
+ simply returns ``str(x)``.
574
+
575
+ The companion function :func:`~mpmath.nprint` prints the result
576
+ instead of returning it.
577
+
578
+ The keyword arguments *strip_zeros*, *min_fixed*, *max_fixed*
579
+ and *show_zero_exponent* are forwarded to :func:`~mpmath.libmp.to_str`.
580
+
581
+ The number will be printed in fixed-point format if the position
582
+ of the leading digit is strictly between min_fixed
583
+ (default = min(-dps/3,-5)) and max_fixed (default = dps).
584
+
585
+ To force fixed-point format always, set min_fixed = -inf,
586
+ max_fixed = +inf. To force floating-point format, set
587
+ min_fixed >= max_fixed.
588
+
589
+ >>> from mpmath import *
590
+ >>> nstr([+pi, ldexp(1,-500)])
591
+ '[3.14159, 3.05494e-151]'
592
+ >>> nprint([+pi, ldexp(1,-500)])
593
+ [3.14159, 3.05494e-151]
594
+ >>> nstr(mpf("5e-10"), 5)
595
+ '5.0e-10'
596
+ >>> nstr(mpf("5e-10"), 5, strip_zeros=False)
597
+ '5.0000e-10'
598
+ >>> nstr(mpf("5e-10"), 5, strip_zeros=False, min_fixed=-11)
599
+ '0.00000000050000'
600
+ >>> nstr(mpf(0), 5, show_zero_exponent=True)
601
+ '0.0e+0'
602
+
603
+ """
604
+ if isinstance(x, list):
605
+ return "[%s]" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
606
+ if isinstance(x, tuple):
607
+ return "(%s)" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
608
+ if hasattr(x, '_mpf_'):
609
+ return to_str(x._mpf_, n, **kwargs)
610
+ if hasattr(x, '_mpc_'):
611
+ return "(" + mpc_to_str(x._mpc_, n, **kwargs) + ")"
612
+ if isinstance(x, basestring):
613
+ return repr(x)
614
+ if isinstance(x, ctx.matrix):
615
+ return x.__nstr__(n, **kwargs)
616
+ return str(x)
617
+
618
+ def _convert_fallback(ctx, x, strings):
619
+ if strings and isinstance(x, basestring):
620
+ if 'j' in x.lower():
621
+ x = x.lower().replace(' ', '')
622
+ match = get_complex.match(x)
623
+ re = match.group('re')
624
+ if not re:
625
+ re = 0
626
+ im = match.group('im').rstrip('j')
627
+ return ctx.mpc(ctx.convert(re), ctx.convert(im))
628
+ if hasattr(x, "_mpi_"):
629
+ a, b = x._mpi_
630
+ if a == b:
631
+ return ctx.make_mpf(a)
632
+ else:
633
+ raise ValueError("can only create mpf from zero-width interval")
634
+ raise TypeError("cannot create mpf from " + repr(x))
635
+
636
+ def mpmathify(ctx, *args, **kwargs):
637
+ return ctx.convert(*args, **kwargs)
638
+
639
+ def _parse_prec(ctx, kwargs):
640
+ if kwargs:
641
+ if kwargs.get('exact'):
642
+ return 0, 'f'
643
+ prec, rounding = ctx._prec_rounding
644
+ if 'rounding' in kwargs:
645
+ rounding = kwargs['rounding']
646
+ if 'prec' in kwargs:
647
+ prec = kwargs['prec']
648
+ if prec == ctx.inf:
649
+ return 0, 'f'
650
+ else:
651
+ prec = int(prec)
652
+ elif 'dps' in kwargs:
653
+ dps = kwargs['dps']
654
+ if dps == ctx.inf:
655
+ return 0, 'f'
656
+ prec = dps_to_prec(dps)
657
+ return prec, rounding
658
+ return ctx._prec_rounding
659
+
660
+ _exact_overflow_msg = "the exact result does not fit in memory"
661
+
662
+ _hypsum_msg = """hypsum() failed to converge to the requested %i bits of accuracy
663
+ using a working precision of %i bits. Try with a higher maxprec,
664
+ maxterms, or set zeroprec."""
665
+
666
+ def hypsum(ctx, p, q, flags, coeffs, z, accurate_small=True, **kwargs):
667
+ if hasattr(z, "_mpf_"):
668
+ key = p, q, flags, 'R'
669
+ v = z._mpf_
670
+ elif hasattr(z, "_mpc_"):
671
+ key = p, q, flags, 'C'
672
+ v = z._mpc_
673
+ if key not in ctx.hyp_summators:
674
+ ctx.hyp_summators[key] = libmp.make_hyp_summator(key)[1]
675
+ summator = ctx.hyp_summators[key]
676
+ prec = ctx.prec
677
+ maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(prec))
678
+ extraprec = 50
679
+ epsshift = 25
680
+ # Jumps in magnitude occur when parameters are close to negative
681
+ # integers. We must ensure that these terms are included in
682
+ # the sum and added accurately
683
+ magnitude_check = {}
684
+ max_total_jump = 0
685
+ for i, c in enumerate(coeffs):
686
+ if flags[i] == 'Z':
687
+ if i >= p and c <= 0:
688
+ ok = False
689
+ for ii, cc in enumerate(coeffs[:p]):
690
+ # Note: c <= cc or c < cc, depending on convention
691
+ if flags[ii] == 'Z' and cc <= 0 and c <= cc:
692
+ ok = True
693
+ if not ok:
694
+ raise ZeroDivisionError("pole in hypergeometric series")
695
+ continue
696
+ n, d = ctx.nint_distance(c)
697
+ n = -int(n)
698
+ d = -d
699
+ if i >= p and n >= 0 and d > 4:
700
+ if n in magnitude_check:
701
+ magnitude_check[n] += d
702
+ else:
703
+ magnitude_check[n] = d
704
+ extraprec = max(extraprec, d - prec + 60)
705
+ max_total_jump += abs(d)
706
+ while 1:
707
+ if extraprec > maxprec:
708
+ raise ValueError(ctx._hypsum_msg % (prec, prec+extraprec))
709
+ wp = prec + extraprec
710
+ if magnitude_check:
711
+ mag_dict = dict((n,None) for n in magnitude_check)
712
+ else:
713
+ mag_dict = {}
714
+ zv, have_complex, magnitude = summator(coeffs, v, prec, wp, \
715
+ epsshift, mag_dict, **kwargs)
716
+ cancel = -magnitude
717
+ jumps_resolved = True
718
+ if extraprec < max_total_jump:
719
+ for n in mag_dict.values():
720
+ if (n is None) or (n < prec):
721
+ jumps_resolved = False
722
+ break
723
+ accurate = (cancel < extraprec-25-5 or not accurate_small)
724
+ if jumps_resolved:
725
+ if accurate:
726
+ break
727
+ # zero?
728
+ zeroprec = kwargs.get('zeroprec')
729
+ if zeroprec is not None:
730
+ if cancel > zeroprec:
731
+ if have_complex:
732
+ return ctx.mpc(0)
733
+ else:
734
+ return ctx.zero
735
+
736
+ # Some near-singularities were not included, so increase
737
+ # precision and repeat until they are
738
+ extraprec *= 2
739
+ # Possible workaround for bad roundoff in fixed-point arithmetic
740
+ epsshift += 5
741
+ extraprec += 5
742
+
743
+ if type(zv) is tuple:
744
+ if have_complex:
745
+ return ctx.make_mpc(zv)
746
+ else:
747
+ return ctx.make_mpf(zv)
748
+ else:
749
+ return zv
750
+
751
+ def ldexp(ctx, x, n):
752
+ r"""
753
+ Computes `x 2^n` efficiently. No rounding is performed.
754
+ The argument `x` must be a real floating-point number (or
755
+ possible to convert into one) and `n` must be a Python ``int``.
756
+
757
+ >>> from mpmath import *
758
+ >>> mp.dps = 15; mp.pretty = False
759
+ >>> ldexp(1, 10)
760
+ mpf('1024.0')
761
+ >>> ldexp(1, -3)
762
+ mpf('0.125')
763
+
764
+ """
765
+ x = ctx.convert(x)
766
+ return ctx.make_mpf(libmp.mpf_shift(x._mpf_, n))
767
+
768
+ def frexp(ctx, x):
769
+ r"""
770
+ Given a real number `x`, returns `(y, n)` with `y \in [0.5, 1)`,
771
+ `n` a Python integer, and such that `x = y 2^n`. No rounding is
772
+ performed.
773
+
774
+ >>> from mpmath import *
775
+ >>> mp.dps = 15; mp.pretty = False
776
+ >>> frexp(7.5)
777
+ (mpf('0.9375'), 3)
778
+
779
+ """
780
+ x = ctx.convert(x)
781
+ y, n = libmp.mpf_frexp(x._mpf_)
782
+ return ctx.make_mpf(y), n
783
+
784
+ def fneg(ctx, x, **kwargs):
785
+ """
786
+ Negates the number *x*, giving a floating-point result, optionally
787
+ using a custom precision and rounding mode.
788
+
789
+ See the documentation of :func:`~mpmath.fadd` for a detailed description
790
+ of how to specify precision and rounding.
791
+
792
+ **Examples**
793
+
794
+ An mpmath number is returned::
795
+
796
+ >>> from mpmath import *
797
+ >>> mp.dps = 15; mp.pretty = False
798
+ >>> fneg(2.5)
799
+ mpf('-2.5')
800
+ >>> fneg(-5+2j)
801
+ mpc(real='5.0', imag='-2.0')
802
+
803
+ Precise control over rounding is possible::
804
+
805
+ >>> x = fadd(2, 1e-100, exact=True)
806
+ >>> fneg(x)
807
+ mpf('-2.0')
808
+ >>> fneg(x, rounding='f')
809
+ mpf('-2.0000000000000004')
810
+
811
+ Negating with and without roundoff::
812
+
813
+ >>> n = 200000000000000000000001
814
+ >>> print(int(-mpf(n)))
815
+ -200000000000000016777216
816
+ >>> print(int(fneg(n)))
817
+ -200000000000000016777216
818
+ >>> print(int(fneg(n, prec=log(n,2)+1)))
819
+ -200000000000000000000001
820
+ >>> print(int(fneg(n, dps=log(n,10)+1)))
821
+ -200000000000000000000001
822
+ >>> print(int(fneg(n, prec=inf)))
823
+ -200000000000000000000001
824
+ >>> print(int(fneg(n, dps=inf)))
825
+ -200000000000000000000001
826
+ >>> print(int(fneg(n, exact=True)))
827
+ -200000000000000000000001
828
+
829
+ """
830
+ prec, rounding = ctx._parse_prec(kwargs)
831
+ x = ctx.convert(x)
832
+ if hasattr(x, '_mpf_'):
833
+ return ctx.make_mpf(mpf_neg(x._mpf_, prec, rounding))
834
+ if hasattr(x, '_mpc_'):
835
+ return ctx.make_mpc(mpc_neg(x._mpc_, prec, rounding))
836
+ raise ValueError("Arguments need to be mpf or mpc compatible numbers")
837
+
838
+ def fadd(ctx, x, y, **kwargs):
839
+ """
840
+ Adds the numbers *x* and *y*, giving a floating-point result,
841
+ optionally using a custom precision and rounding mode.
842
+
843
+ The default precision is the working precision of the context.
844
+ You can specify a custom precision in bits by passing the *prec* keyword
845
+ argument, or by providing an equivalent decimal precision with the *dps*
846
+ keyword argument. If the precision is set to ``+inf``, or if the flag
847
+ *exact=True* is passed, an exact addition with no rounding is performed.
848
+
849
+ When the precision is finite, the optional *rounding* keyword argument
850
+ specifies the direction of rounding. Valid options are ``'n'`` for
851
+ nearest (default), ``'f'`` for floor, ``'c'`` for ceiling, ``'d'``
852
+ for down, ``'u'`` for up.
853
+
854
+ **Examples**
855
+
856
+ Using :func:`~mpmath.fadd` with precision and rounding control::
857
+
858
+ >>> from mpmath import *
859
+ >>> mp.dps = 15; mp.pretty = False
860
+ >>> fadd(2, 1e-20)
861
+ mpf('2.0')
862
+ >>> fadd(2, 1e-20, rounding='u')
863
+ mpf('2.0000000000000004')
864
+ >>> nprint(fadd(2, 1e-20, prec=100), 25)
865
+ 2.00000000000000000001
866
+ >>> nprint(fadd(2, 1e-20, dps=15), 25)
867
+ 2.0
868
+ >>> nprint(fadd(2, 1e-20, dps=25), 25)
869
+ 2.00000000000000000001
870
+ >>> nprint(fadd(2, 1e-20, exact=True), 25)
871
+ 2.00000000000000000001
872
+
873
+ Exact addition avoids cancellation errors, enforcing familiar laws
874
+ of numbers such as `x+y-x = y`, which don't hold in floating-point
875
+ arithmetic with finite precision::
876
+
877
+ >>> x, y = mpf(2), mpf('1e-1000')
878
+ >>> print(x + y - x)
879
+ 0.0
880
+ >>> print(fadd(x, y, prec=inf) - x)
881
+ 1.0e-1000
882
+ >>> print(fadd(x, y, exact=True) - x)
883
+ 1.0e-1000
884
+
885
+ Exact addition can be inefficient and may be impossible to perform
886
+ with large magnitude differences::
887
+
888
+ >>> fadd(1, '1e-100000000000000000000', prec=inf)
889
+ Traceback (most recent call last):
890
+ ...
891
+ OverflowError: the exact result does not fit in memory
892
+
893
+ """
894
+ prec, rounding = ctx._parse_prec(kwargs)
895
+ x = ctx.convert(x)
896
+ y = ctx.convert(y)
897
+ try:
898
+ if hasattr(x, '_mpf_'):
899
+ if hasattr(y, '_mpf_'):
900
+ return ctx.make_mpf(mpf_add(x._mpf_, y._mpf_, prec, rounding))
901
+ if hasattr(y, '_mpc_'):
902
+ return ctx.make_mpc(mpc_add_mpf(y._mpc_, x._mpf_, prec, rounding))
903
+ if hasattr(x, '_mpc_'):
904
+ if hasattr(y, '_mpf_'):
905
+ return ctx.make_mpc(mpc_add_mpf(x._mpc_, y._mpf_, prec, rounding))
906
+ if hasattr(y, '_mpc_'):
907
+ return ctx.make_mpc(mpc_add(x._mpc_, y._mpc_, prec, rounding))
908
+ except (ValueError, OverflowError):
909
+ raise OverflowError(ctx._exact_overflow_msg)
910
+ raise ValueError("Arguments need to be mpf or mpc compatible numbers")
911
+
912
+ def fsub(ctx, x, y, **kwargs):
913
+ """
914
+ Subtracts the numbers *x* and *y*, giving a floating-point result,
915
+ optionally using a custom precision and rounding mode.
916
+
917
+ See the documentation of :func:`~mpmath.fadd` for a detailed description
918
+ of how to specify precision and rounding.
919
+
920
+ **Examples**
921
+
922
+ Using :func:`~mpmath.fsub` with precision and rounding control::
923
+
924
+ >>> from mpmath import *
925
+ >>> mp.dps = 15; mp.pretty = False
926
+ >>> fsub(2, 1e-20)
927
+ mpf('2.0')
928
+ >>> fsub(2, 1e-20, rounding='d')
929
+ mpf('1.9999999999999998')
930
+ >>> nprint(fsub(2, 1e-20, prec=100), 25)
931
+ 1.99999999999999999999
932
+ >>> nprint(fsub(2, 1e-20, dps=15), 25)
933
+ 2.0
934
+ >>> nprint(fsub(2, 1e-20, dps=25), 25)
935
+ 1.99999999999999999999
936
+ >>> nprint(fsub(2, 1e-20, exact=True), 25)
937
+ 1.99999999999999999999
938
+
939
+ Exact subtraction avoids cancellation errors, enforcing familiar laws
940
+ of numbers such as `x-y+y = x`, which don't hold in floating-point
941
+ arithmetic with finite precision::
942
+
943
+ >>> x, y = mpf(2), mpf('1e1000')
944
+ >>> print(x - y + y)
945
+ 0.0
946
+ >>> print(fsub(x, y, prec=inf) + y)
947
+ 2.0
948
+ >>> print(fsub(x, y, exact=True) + y)
949
+ 2.0
950
+
951
+ Exact addition can be inefficient and may be impossible to perform
952
+ with large magnitude differences::
953
+
954
+ >>> fsub(1, '1e-100000000000000000000', prec=inf)
955
+ Traceback (most recent call last):
956
+ ...
957
+ OverflowError: the exact result does not fit in memory
958
+
959
+ """
960
+ prec, rounding = ctx._parse_prec(kwargs)
961
+ x = ctx.convert(x)
962
+ y = ctx.convert(y)
963
+ try:
964
+ if hasattr(x, '_mpf_'):
965
+ if hasattr(y, '_mpf_'):
966
+ return ctx.make_mpf(mpf_sub(x._mpf_, y._mpf_, prec, rounding))
967
+ if hasattr(y, '_mpc_'):
968
+ return ctx.make_mpc(mpc_sub((x._mpf_, fzero), y._mpc_, prec, rounding))
969
+ if hasattr(x, '_mpc_'):
970
+ if hasattr(y, '_mpf_'):
971
+ return ctx.make_mpc(mpc_sub_mpf(x._mpc_, y._mpf_, prec, rounding))
972
+ if hasattr(y, '_mpc_'):
973
+ return ctx.make_mpc(mpc_sub(x._mpc_, y._mpc_, prec, rounding))
974
+ except (ValueError, OverflowError):
975
+ raise OverflowError(ctx._exact_overflow_msg)
976
+ raise ValueError("Arguments need to be mpf or mpc compatible numbers")
977
+
978
+ def fmul(ctx, x, y, **kwargs):
979
+ """
980
+ Multiplies the numbers *x* and *y*, giving a floating-point result,
981
+ optionally using a custom precision and rounding mode.
982
+
983
+ See the documentation of :func:`~mpmath.fadd` for a detailed description
984
+ of how to specify precision and rounding.
985
+
986
+ **Examples**
987
+
988
+ The result is an mpmath number::
989
+
990
+ >>> from mpmath import *
991
+ >>> mp.dps = 15; mp.pretty = False
992
+ >>> fmul(2, 5.0)
993
+ mpf('10.0')
994
+ >>> fmul(0.5j, 0.5)
995
+ mpc(real='0.0', imag='0.25')
996
+
997
+ Avoiding roundoff::
998
+
999
+ >>> x, y = 10**10+1, 10**15+1
1000
+ >>> print(x*y)
1001
+ 10000000001000010000000001
1002
+ >>> print(mpf(x) * mpf(y))
1003
+ 1.0000000001e+25
1004
+ >>> print(int(mpf(x) * mpf(y)))
1005
+ 10000000001000011026399232
1006
+ >>> print(int(fmul(x, y)))
1007
+ 10000000001000011026399232
1008
+ >>> print(int(fmul(x, y, dps=25)))
1009
+ 10000000001000010000000001
1010
+ >>> print(int(fmul(x, y, exact=True)))
1011
+ 10000000001000010000000001
1012
+
1013
+ Exact multiplication with complex numbers can be inefficient and may
1014
+ be impossible to perform with large magnitude differences between
1015
+ real and imaginary parts::
1016
+
1017
+ >>> x = 1+2j
1018
+ >>> y = mpc(2, '1e-100000000000000000000')
1019
+ >>> fmul(x, y)
1020
+ mpc(real='2.0', imag='4.0')
1021
+ >>> fmul(x, y, rounding='u')
1022
+ mpc(real='2.0', imag='4.0000000000000009')
1023
+ >>> fmul(x, y, exact=True)
1024
+ Traceback (most recent call last):
1025
+ ...
1026
+ OverflowError: the exact result does not fit in memory
1027
+
1028
+ """
1029
+ prec, rounding = ctx._parse_prec(kwargs)
1030
+ x = ctx.convert(x)
1031
+ y = ctx.convert(y)
1032
+ try:
1033
+ if hasattr(x, '_mpf_'):
1034
+ if hasattr(y, '_mpf_'):
1035
+ return ctx.make_mpf(mpf_mul(x._mpf_, y._mpf_, prec, rounding))
1036
+ if hasattr(y, '_mpc_'):
1037
+ return ctx.make_mpc(mpc_mul_mpf(y._mpc_, x._mpf_, prec, rounding))
1038
+ if hasattr(x, '_mpc_'):
1039
+ if hasattr(y, '_mpf_'):
1040
+ return ctx.make_mpc(mpc_mul_mpf(x._mpc_, y._mpf_, prec, rounding))
1041
+ if hasattr(y, '_mpc_'):
1042
+ return ctx.make_mpc(mpc_mul(x._mpc_, y._mpc_, prec, rounding))
1043
+ except (ValueError, OverflowError):
1044
+ raise OverflowError(ctx._exact_overflow_msg)
1045
+ raise ValueError("Arguments need to be mpf or mpc compatible numbers")
1046
+
1047
+ def fdiv(ctx, x, y, **kwargs):
1048
+ """
1049
+ Divides the numbers *x* and *y*, giving a floating-point result,
1050
+ optionally using a custom precision and rounding mode.
1051
+
1052
+ See the documentation of :func:`~mpmath.fadd` for a detailed description
1053
+ of how to specify precision and rounding.
1054
+
1055
+ **Examples**
1056
+
1057
+ The result is an mpmath number::
1058
+
1059
+ >>> from mpmath import *
1060
+ >>> mp.dps = 15; mp.pretty = False
1061
+ >>> fdiv(3, 2)
1062
+ mpf('1.5')
1063
+ >>> fdiv(2, 3)
1064
+ mpf('0.66666666666666663')
1065
+ >>> fdiv(2+4j, 0.5)
1066
+ mpc(real='4.0', imag='8.0')
1067
+
1068
+ The rounding direction and precision can be controlled::
1069
+
1070
+ >>> fdiv(2, 3, dps=3) # Should be accurate to at least 3 digits
1071
+ mpf('0.6666259765625')
1072
+ >>> fdiv(2, 3, rounding='d')
1073
+ mpf('0.66666666666666663')
1074
+ >>> fdiv(2, 3, prec=60)
1075
+ mpf('0.66666666666666667')
1076
+ >>> fdiv(2, 3, rounding='u')
1077
+ mpf('0.66666666666666674')
1078
+
1079
+ Checking the error of a division by performing it at higher precision::
1080
+
1081
+ >>> fdiv(2, 3) - fdiv(2, 3, prec=100)
1082
+ mpf('-3.7007434154172148e-17')
1083
+
1084
+ Unlike :func:`~mpmath.fadd`, :func:`~mpmath.fmul`, etc., exact division is not
1085
+ allowed since the quotient of two floating-point numbers generally
1086
+ does not have an exact floating-point representation. (In the
1087
+ future this might be changed to allow the case where the division
1088
+ is actually exact.)
1089
+
1090
+ >>> fdiv(2, 3, exact=True)
1091
+ Traceback (most recent call last):
1092
+ ...
1093
+ ValueError: division is not an exact operation
1094
+
1095
+ """
1096
+ prec, rounding = ctx._parse_prec(kwargs)
1097
+ if not prec:
1098
+ raise ValueError("division is not an exact operation")
1099
+ x = ctx.convert(x)
1100
+ y = ctx.convert(y)
1101
+ if hasattr(x, '_mpf_'):
1102
+ if hasattr(y, '_mpf_'):
1103
+ return ctx.make_mpf(mpf_div(x._mpf_, y._mpf_, prec, rounding))
1104
+ if hasattr(y, '_mpc_'):
1105
+ return ctx.make_mpc(mpc_div((x._mpf_, fzero), y._mpc_, prec, rounding))
1106
+ if hasattr(x, '_mpc_'):
1107
+ if hasattr(y, '_mpf_'):
1108
+ return ctx.make_mpc(mpc_div_mpf(x._mpc_, y._mpf_, prec, rounding))
1109
+ if hasattr(y, '_mpc_'):
1110
+ return ctx.make_mpc(mpc_div(x._mpc_, y._mpc_, prec, rounding))
1111
+ raise ValueError("Arguments need to be mpf or mpc compatible numbers")
1112
+
1113
+ def nint_distance(ctx, x):
1114
+ r"""
1115
+ Return `(n,d)` where `n` is the nearest integer to `x` and `d` is
1116
+ an estimate of `\log_2(|x-n|)`. If `d < 0`, `-d` gives the precision
1117
+ (measured in bits) lost to cancellation when computing `x-n`.
1118
+
1119
+ >>> from mpmath import *
1120
+ >>> n, d = nint_distance(5)
1121
+ >>> print(n); print(d)
1122
+ 5
1123
+ -inf
1124
+ >>> n, d = nint_distance(mpf(5))
1125
+ >>> print(n); print(d)
1126
+ 5
1127
+ -inf
1128
+ >>> n, d = nint_distance(mpf(5.00000001))
1129
+ >>> print(n); print(d)
1130
+ 5
1131
+ -26
1132
+ >>> n, d = nint_distance(mpf(4.99999999))
1133
+ >>> print(n); print(d)
1134
+ 5
1135
+ -26
1136
+ >>> n, d = nint_distance(mpc(5,10))
1137
+ >>> print(n); print(d)
1138
+ 5
1139
+ 4
1140
+ >>> n, d = nint_distance(mpc(5,0.000001))
1141
+ >>> print(n); print(d)
1142
+ 5
1143
+ -19
1144
+
1145
+ """
1146
+ typx = type(x)
1147
+ if typx in int_types:
1148
+ return int(x), ctx.ninf
1149
+ elif typx is rational.mpq:
1150
+ p, q = x._mpq_
1151
+ n, r = divmod(p, q)
1152
+ if 2*r >= q:
1153
+ n += 1
1154
+ elif not r:
1155
+ return n, ctx.ninf
1156
+ # log(p/q-n) = log((p-nq)/q) = log(p-nq) - log(q)
1157
+ d = bitcount(abs(p-n*q)) - bitcount(q)
1158
+ return n, d
1159
+ if hasattr(x, "_mpf_"):
1160
+ re = x._mpf_
1161
+ im_dist = ctx.ninf
1162
+ elif hasattr(x, "_mpc_"):
1163
+ re, im = x._mpc_
1164
+ isign, iman, iexp, ibc = im
1165
+ if iman:
1166
+ im_dist = iexp + ibc
1167
+ elif im == fzero:
1168
+ im_dist = ctx.ninf
1169
+ else:
1170
+ raise ValueError("requires a finite number")
1171
+ else:
1172
+ x = ctx.convert(x)
1173
+ if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"):
1174
+ return ctx.nint_distance(x)
1175
+ else:
1176
+ raise TypeError("requires an mpf/mpc")
1177
+ sign, man, exp, bc = re
1178
+ mag = exp+bc
1179
+ # |x| < 0.5
1180
+ if mag < 0:
1181
+ n = 0
1182
+ re_dist = mag
1183
+ elif man:
1184
+ # exact integer
1185
+ if exp >= 0:
1186
+ n = man << exp
1187
+ re_dist = ctx.ninf
1188
+ # exact half-integer
1189
+ elif exp == -1:
1190
+ n = (man>>1)+1
1191
+ re_dist = 0
1192
+ else:
1193
+ d = (-exp-1)
1194
+ t = man >> d
1195
+ if t & 1:
1196
+ t += 1
1197
+ man = (t<<d) - man
1198
+ else:
1199
+ man -= (t<<d)
1200
+ n = t>>1 # int(t)>>1
1201
+ re_dist = exp+bitcount(man)
1202
+ if sign:
1203
+ n = -n
1204
+ elif re == fzero:
1205
+ re_dist = ctx.ninf
1206
+ n = 0
1207
+ else:
1208
+ raise ValueError("requires a finite number")
1209
+ return n, max(re_dist, im_dist)
1210
+
1211
+ def fprod(ctx, factors):
1212
+ r"""
1213
+ Calculates a product containing a finite number of factors (for
1214
+ infinite products, see :func:`~mpmath.nprod`). The factors will be
1215
+ converted to mpmath numbers.
1216
+
1217
+ >>> from mpmath import *
1218
+ >>> mp.dps = 15; mp.pretty = False
1219
+ >>> fprod([1, 2, 0.5, 7])
1220
+ mpf('7.0')
1221
+
1222
+ """
1223
+ orig = ctx.prec
1224
+ try:
1225
+ v = ctx.one
1226
+ for p in factors:
1227
+ v *= p
1228
+ finally:
1229
+ ctx.prec = orig
1230
+ return +v
1231
+
1232
+ def rand(ctx):
1233
+ """
1234
+ Returns an ``mpf`` with value chosen randomly from `[0, 1)`.
1235
+ The number of randomly generated bits in the mantissa is equal
1236
+ to the working precision.
1237
+ """
1238
+ return ctx.make_mpf(mpf_rand(ctx._prec))
1239
+
1240
+ def fraction(ctx, p, q):
1241
+ """
1242
+ Given Python integers `(p, q)`, returns a lazy ``mpf`` representing
1243
+ the fraction `p/q`. The value is updated with the precision.
1244
+
1245
+ >>> from mpmath import *
1246
+ >>> mp.dps = 15
1247
+ >>> a = fraction(1,100)
1248
+ >>> b = mpf(1)/100
1249
+ >>> print(a); print(b)
1250
+ 0.01
1251
+ 0.01
1252
+ >>> mp.dps = 30
1253
+ >>> print(a); print(b) # a will be accurate
1254
+ 0.01
1255
+ 0.0100000000000000002081668171172
1256
+ >>> mp.dps = 15
1257
+ """
1258
+ return ctx.constant(lambda prec, rnd: from_rational(p, q, prec, rnd),
1259
+ '%s/%s' % (p, q))
1260
+
1261
+ def absmin(ctx, x):
1262
+ return abs(ctx.convert(x))
1263
+
1264
+ def absmax(ctx, x):
1265
+ return abs(ctx.convert(x))
1266
+
1267
+ def _as_points(ctx, x):
1268
+ # XXX: remove this?
1269
+ if hasattr(x, '_mpi_'):
1270
+ a, b = x._mpi_
1271
+ return [ctx.make_mpf(a), ctx.make_mpf(b)]
1272
+ return x
1273
+
1274
+ '''
1275
+ def _zetasum(ctx, s, a, b):
1276
+ """
1277
+ Computes sum of k^(-s) for k = a, a+1, ..., b with a, b both small
1278
+ integers.
1279
+ """
1280
+ a = int(a)
1281
+ b = int(b)
1282
+ s = ctx.convert(s)
1283
+ prec, rounding = ctx._prec_rounding
1284
+ if hasattr(s, '_mpf_'):
1285
+ v = ctx.make_mpf(libmp.mpf_zetasum(s._mpf_, a, b, prec))
1286
+ elif hasattr(s, '_mpc_'):
1287
+ v = ctx.make_mpc(libmp.mpc_zetasum(s._mpc_, a, b, prec))
1288
+ return v
1289
+ '''
1290
+
1291
+ def _zetasum_fast(ctx, s, a, n, derivatives=[0], reflect=False):
1292
+ if not (ctx.isint(a) and hasattr(s, "_mpc_")):
1293
+ raise NotImplementedError
1294
+ a = int(a)
1295
+ prec = ctx._prec
1296
+ xs, ys = libmp.mpc_zetasum(s._mpc_, a, n, derivatives, reflect, prec)
1297
+ xs = [ctx.make_mpc(x) for x in xs]
1298
+ ys = [ctx.make_mpc(y) for y in ys]
1299
+ return xs, ys
1300
+
1301
+ class PrecisionManager:
1302
+ def __init__(self, ctx, precfun, dpsfun, normalize_output=False):
1303
+ self.ctx = ctx
1304
+ self.precfun = precfun
1305
+ self.dpsfun = dpsfun
1306
+ self.normalize_output = normalize_output
1307
+ def __call__(self, f):
1308
+ @functools.wraps(f)
1309
+ def g(*args, **kwargs):
1310
+ orig = self.ctx.prec
1311
+ try:
1312
+ if self.precfun:
1313
+ self.ctx.prec = self.precfun(self.ctx.prec)
1314
+ else:
1315
+ self.ctx.dps = self.dpsfun(self.ctx.dps)
1316
+ if self.normalize_output:
1317
+ v = f(*args, **kwargs)
1318
+ if type(v) is tuple:
1319
+ return tuple([+a for a in v])
1320
+ return +v
1321
+ else:
1322
+ return f(*args, **kwargs)
1323
+ finally:
1324
+ self.ctx.prec = orig
1325
+ return g
1326
+ def __enter__(self):
1327
+ self.origp = self.ctx.prec
1328
+ if self.precfun:
1329
+ self.ctx.prec = self.precfun(self.ctx.prec)
1330
+ else:
1331
+ self.ctx.dps = self.dpsfun(self.ctx.dps)
1332
+ def __exit__(self, exc_type, exc_val, exc_tb):
1333
+ self.ctx.prec = self.origp
1334
+ return False
1335
+
1336
+
1337
+ if __name__ == '__main__':
1338
+ import doctest
1339
+ doctest.testmod()
lib/python3.11/site-packages/mpmath/ctx_mp_python.py ADDED
@@ -0,0 +1,1149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #from ctx_base import StandardBaseContext
2
+
3
+ from .libmp.backend import basestring, exec_
4
+
5
+ from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps,
6
+ round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps,
7
+ ComplexResult, to_pickable, from_pickable, normalize,
8
+ from_int, from_float, from_npfloat, from_Decimal, from_str, to_int, to_float, to_str,
9
+ from_rational, from_man_exp,
10
+ fone, fzero, finf, fninf, fnan,
11
+ mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
12
+ mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod,
13
+ mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge,
14
+ mpf_hash, mpf_rand,
15
+ mpf_sum,
16
+ bitcount, to_fixed,
17
+ mpc_to_str,
18
+ mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate,
19
+ mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf,
20
+ mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int,
21
+ mpc_mpf_div,
22
+ mpf_pow,
23
+ mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10,
24
+ mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin,
25
+ mpf_glaisher, mpf_twinprime, mpf_mertens,
26
+ int_types)
27
+
28
+ from . import rational
29
+ from . import function_docs
30
+
31
+ new = object.__new__
32
+
33
+ class mpnumeric(object):
34
+ """Base class for mpf and mpc."""
35
+ __slots__ = []
36
+ def __new__(cls, val):
37
+ raise NotImplementedError
38
+
39
+ class _mpf(mpnumeric):
40
+ """
41
+ An mpf instance holds a real-valued floating-point number. mpf:s
42
+ work analogously to Python floats, but support arbitrary-precision
43
+ arithmetic.
44
+ """
45
+ __slots__ = ['_mpf_']
46
+
47
+ def __new__(cls, val=fzero, **kwargs):
48
+ """A new mpf can be created from a Python float, an int, a
49
+ or a decimal string representing a number in floating-point
50
+ format."""
51
+ prec, rounding = cls.context._prec_rounding
52
+ if kwargs:
53
+ prec = kwargs.get('prec', prec)
54
+ if 'dps' in kwargs:
55
+ prec = dps_to_prec(kwargs['dps'])
56
+ rounding = kwargs.get('rounding', rounding)
57
+ if type(val) is cls:
58
+ sign, man, exp, bc = val._mpf_
59
+ if (not man) and exp:
60
+ return val
61
+ v = new(cls)
62
+ v._mpf_ = normalize(sign, man, exp, bc, prec, rounding)
63
+ return v
64
+ elif type(val) is tuple:
65
+ if len(val) == 2:
66
+ v = new(cls)
67
+ v._mpf_ = from_man_exp(val[0], val[1], prec, rounding)
68
+ return v
69
+ if len(val) == 4:
70
+ if val not in (finf, fninf, fnan):
71
+ sign, man, exp, bc = val
72
+ val = normalize(sign, MPZ(man), exp, bc, prec, rounding)
73
+ v = new(cls)
74
+ v._mpf_ = val
75
+ return v
76
+ raise ValueError
77
+ else:
78
+ v = new(cls)
79
+ v._mpf_ = mpf_pos(cls.mpf_convert_arg(val, prec, rounding), prec, rounding)
80
+ return v
81
+
82
+ @classmethod
83
+ def mpf_convert_arg(cls, x, prec, rounding):
84
+ if isinstance(x, int_types): return from_int(x)
85
+ if isinstance(x, float): return from_float(x)
86
+ if isinstance(x, basestring): return from_str(x, prec, rounding)
87
+ if isinstance(x, cls.context.constant): return x.func(prec, rounding)
88
+ if hasattr(x, '_mpf_'): return x._mpf_
89
+ if hasattr(x, '_mpmath_'):
90
+ t = cls.context.convert(x._mpmath_(prec, rounding))
91
+ if hasattr(t, '_mpf_'):
92
+ return t._mpf_
93
+ if hasattr(x, '_mpi_'):
94
+ a, b = x._mpi_
95
+ if a == b:
96
+ return a
97
+ raise ValueError("can only create mpf from zero-width interval")
98
+ raise TypeError("cannot create mpf from " + repr(x))
99
+
100
+ @classmethod
101
+ def mpf_convert_rhs(cls, x):
102
+ if isinstance(x, int_types): return from_int(x)
103
+ if isinstance(x, float): return from_float(x)
104
+ if isinstance(x, complex_types): return cls.context.mpc(x)
105
+ if isinstance(x, rational.mpq):
106
+ p, q = x._mpq_
107
+ return from_rational(p, q, cls.context.prec)
108
+ if hasattr(x, '_mpf_'): return x._mpf_
109
+ if hasattr(x, '_mpmath_'):
110
+ t = cls.context.convert(x._mpmath_(*cls.context._prec_rounding))
111
+ if hasattr(t, '_mpf_'):
112
+ return t._mpf_
113
+ return t
114
+ return NotImplemented
115
+
116
+ @classmethod
117
+ def mpf_convert_lhs(cls, x):
118
+ x = cls.mpf_convert_rhs(x)
119
+ if type(x) is tuple:
120
+ return cls.context.make_mpf(x)
121
+ return x
122
+
123
+ man_exp = property(lambda self: self._mpf_[1:3])
124
+ man = property(lambda self: self._mpf_[1])
125
+ exp = property(lambda self: self._mpf_[2])
126
+ bc = property(lambda self: self._mpf_[3])
127
+
128
+ real = property(lambda self: self)
129
+ imag = property(lambda self: self.context.zero)
130
+
131
+ conjugate = lambda self: self
132
+
133
+ def __getstate__(self): return to_pickable(self._mpf_)
134
+ def __setstate__(self, val): self._mpf_ = from_pickable(val)
135
+
136
+ def __repr__(s):
137
+ if s.context.pretty:
138
+ return str(s)
139
+ return "mpf('%s')" % to_str(s._mpf_, s.context._repr_digits)
140
+
141
+ def __str__(s): return to_str(s._mpf_, s.context._str_digits)
142
+ def __hash__(s): return mpf_hash(s._mpf_)
143
+ def __int__(s): return int(to_int(s._mpf_))
144
+ def __long__(s): return long(to_int(s._mpf_))
145
+ def __float__(s): return to_float(s._mpf_, rnd=s.context._prec_rounding[1])
146
+ def __complex__(s): return complex(float(s))
147
+ def __nonzero__(s): return s._mpf_ != fzero
148
+
149
+ __bool__ = __nonzero__
150
+
151
+ def __abs__(s):
152
+ cls, new, (prec, rounding) = s._ctxdata
153
+ v = new(cls)
154
+ v._mpf_ = mpf_abs(s._mpf_, prec, rounding)
155
+ return v
156
+
157
+ def __pos__(s):
158
+ cls, new, (prec, rounding) = s._ctxdata
159
+ v = new(cls)
160
+ v._mpf_ = mpf_pos(s._mpf_, prec, rounding)
161
+ return v
162
+
163
+ def __neg__(s):
164
+ cls, new, (prec, rounding) = s._ctxdata
165
+ v = new(cls)
166
+ v._mpf_ = mpf_neg(s._mpf_, prec, rounding)
167
+ return v
168
+
169
+ def _cmp(s, t, func):
170
+ if hasattr(t, '_mpf_'):
171
+ t = t._mpf_
172
+ else:
173
+ t = s.mpf_convert_rhs(t)
174
+ if t is NotImplemented:
175
+ return t
176
+ return func(s._mpf_, t)
177
+
178
+ def __cmp__(s, t): return s._cmp(t, mpf_cmp)
179
+ def __lt__(s, t): return s._cmp(t, mpf_lt)
180
+ def __gt__(s, t): return s._cmp(t, mpf_gt)
181
+ def __le__(s, t): return s._cmp(t, mpf_le)
182
+ def __ge__(s, t): return s._cmp(t, mpf_ge)
183
+
184
+ def __ne__(s, t):
185
+ v = s.__eq__(t)
186
+ if v is NotImplemented:
187
+ return v
188
+ return not v
189
+
190
+ def __rsub__(s, t):
191
+ cls, new, (prec, rounding) = s._ctxdata
192
+ if type(t) in int_types:
193
+ v = new(cls)
194
+ v._mpf_ = mpf_sub(from_int(t), s._mpf_, prec, rounding)
195
+ return v
196
+ t = s.mpf_convert_lhs(t)
197
+ if t is NotImplemented:
198
+ return t
199
+ return t - s
200
+
201
+ def __rdiv__(s, t):
202
+ cls, new, (prec, rounding) = s._ctxdata
203
+ if isinstance(t, int_types):
204
+ v = new(cls)
205
+ v._mpf_ = mpf_rdiv_int(t, s._mpf_, prec, rounding)
206
+ return v
207
+ t = s.mpf_convert_lhs(t)
208
+ if t is NotImplemented:
209
+ return t
210
+ return t / s
211
+
212
+ def __rpow__(s, t):
213
+ t = s.mpf_convert_lhs(t)
214
+ if t is NotImplemented:
215
+ return t
216
+ return t ** s
217
+
218
+ def __rmod__(s, t):
219
+ t = s.mpf_convert_lhs(t)
220
+ if t is NotImplemented:
221
+ return t
222
+ return t % s
223
+
224
+ def sqrt(s):
225
+ return s.context.sqrt(s)
226
+
227
+ def ae(s, t, rel_eps=None, abs_eps=None):
228
+ return s.context.almosteq(s, t, rel_eps, abs_eps)
229
+
230
+ def to_fixed(self, prec):
231
+ return to_fixed(self._mpf_, prec)
232
+
233
+ def __round__(self, *args):
234
+ return round(float(self), *args)
235
+
236
+ mpf_binary_op = """
237
+ def %NAME%(self, other):
238
+ mpf, new, (prec, rounding) = self._ctxdata
239
+ sval = self._mpf_
240
+ if hasattr(other, '_mpf_'):
241
+ tval = other._mpf_
242
+ %WITH_MPF%
243
+ ttype = type(other)
244
+ if ttype in int_types:
245
+ %WITH_INT%
246
+ elif ttype is float:
247
+ tval = from_float(other)
248
+ %WITH_MPF%
249
+ elif hasattr(other, '_mpc_'):
250
+ tval = other._mpc_
251
+ mpc = type(other)
252
+ %WITH_MPC%
253
+ elif ttype is complex:
254
+ tval = from_float(other.real), from_float(other.imag)
255
+ mpc = self.context.mpc
256
+ %WITH_MPC%
257
+ if isinstance(other, mpnumeric):
258
+ return NotImplemented
259
+ try:
260
+ other = mpf.context.convert(other, strings=False)
261
+ except TypeError:
262
+ return NotImplemented
263
+ return self.%NAME%(other)
264
+ """
265
+
266
+ return_mpf = "; obj = new(mpf); obj._mpf_ = val; return obj"
267
+ return_mpc = "; obj = new(mpc); obj._mpc_ = val; return obj"
268
+
269
+ mpf_pow_same = """
270
+ try:
271
+ val = mpf_pow(sval, tval, prec, rounding) %s
272
+ except ComplexResult:
273
+ if mpf.context.trap_complex:
274
+ raise
275
+ mpc = mpf.context.mpc
276
+ val = mpc_pow((sval, fzero), (tval, fzero), prec, rounding) %s
277
+ """ % (return_mpf, return_mpc)
278
+
279
+ def binary_op(name, with_mpf='', with_int='', with_mpc=''):
280
+ code = mpf_binary_op
281
+ code = code.replace("%WITH_INT%", with_int)
282
+ code = code.replace("%WITH_MPC%", with_mpc)
283
+ code = code.replace("%WITH_MPF%", with_mpf)
284
+ code = code.replace("%NAME%", name)
285
+ np = {}
286
+ exec_(code, globals(), np)
287
+ return np[name]
288
+
289
+ _mpf.__eq__ = binary_op('__eq__',
290
+ 'return mpf_eq(sval, tval)',
291
+ 'return mpf_eq(sval, from_int(other))',
292
+ 'return (tval[1] == fzero) and mpf_eq(tval[0], sval)')
293
+
294
+ _mpf.__add__ = binary_op('__add__',
295
+ 'val = mpf_add(sval, tval, prec, rounding)' + return_mpf,
296
+ 'val = mpf_add(sval, from_int(other), prec, rounding)' + return_mpf,
297
+ 'val = mpc_add_mpf(tval, sval, prec, rounding)' + return_mpc)
298
+
299
+ _mpf.__sub__ = binary_op('__sub__',
300
+ 'val = mpf_sub(sval, tval, prec, rounding)' + return_mpf,
301
+ 'val = mpf_sub(sval, from_int(other), prec, rounding)' + return_mpf,
302
+ 'val = mpc_sub((sval, fzero), tval, prec, rounding)' + return_mpc)
303
+
304
+ _mpf.__mul__ = binary_op('__mul__',
305
+ 'val = mpf_mul(sval, tval, prec, rounding)' + return_mpf,
306
+ 'val = mpf_mul_int(sval, other, prec, rounding)' + return_mpf,
307
+ 'val = mpc_mul_mpf(tval, sval, prec, rounding)' + return_mpc)
308
+
309
+ _mpf.__div__ = binary_op('__div__',
310
+ 'val = mpf_div(sval, tval, prec, rounding)' + return_mpf,
311
+ 'val = mpf_div(sval, from_int(other), prec, rounding)' + return_mpf,
312
+ 'val = mpc_mpf_div(sval, tval, prec, rounding)' + return_mpc)
313
+
314
+ _mpf.__mod__ = binary_op('__mod__',
315
+ 'val = mpf_mod(sval, tval, prec, rounding)' + return_mpf,
316
+ 'val = mpf_mod(sval, from_int(other), prec, rounding)' + return_mpf,
317
+ 'raise NotImplementedError("complex modulo")')
318
+
319
+ _mpf.__pow__ = binary_op('__pow__',
320
+ mpf_pow_same,
321
+ 'val = mpf_pow_int(sval, other, prec, rounding)' + return_mpf,
322
+ 'val = mpc_pow((sval, fzero), tval, prec, rounding)' + return_mpc)
323
+
324
+ _mpf.__radd__ = _mpf.__add__
325
+ _mpf.__rmul__ = _mpf.__mul__
326
+ _mpf.__truediv__ = _mpf.__div__
327
+ _mpf.__rtruediv__ = _mpf.__rdiv__
328
+
329
+
330
+ class _constant(_mpf):
331
+ """Represents a mathematical constant with dynamic precision.
332
+ When printed or used in an arithmetic operation, a constant
333
+ is converted to a regular mpf at the working precision. A
334
+ regular mpf can also be obtained using the operation +x."""
335
+
336
+ def __new__(cls, func, name, docname=''):
337
+ a = object.__new__(cls)
338
+ a.name = name
339
+ a.func = func
340
+ a.__doc__ = getattr(function_docs, docname, '')
341
+ return a
342
+
343
+ def __call__(self, prec=None, dps=None, rounding=None):
344
+ prec2, rounding2 = self.context._prec_rounding
345
+ if not prec: prec = prec2
346
+ if not rounding: rounding = rounding2
347
+ if dps: prec = dps_to_prec(dps)
348
+ return self.context.make_mpf(self.func(prec, rounding))
349
+
350
+ @property
351
+ def _mpf_(self):
352
+ prec, rounding = self.context._prec_rounding
353
+ return self.func(prec, rounding)
354
+
355
+ def __repr__(self):
356
+ return "<%s: %s~>" % (self.name, self.context.nstr(self(dps=15)))
357
+
358
+
359
+ class _mpc(mpnumeric):
360
+ """
361
+ An mpc represents a complex number using a pair of mpf:s (one
362
+ for the real part and another for the imaginary part.) The mpc
363
+ class behaves fairly similarly to Python's complex type.
364
+ """
365
+
366
+ __slots__ = ['_mpc_']
367
+
368
+ def __new__(cls, real=0, imag=0):
369
+ s = object.__new__(cls)
370
+ if isinstance(real, complex_types):
371
+ real, imag = real.real, real.imag
372
+ elif hasattr(real, '_mpc_'):
373
+ s._mpc_ = real._mpc_
374
+ return s
375
+ real = cls.context.mpf(real)
376
+ imag = cls.context.mpf(imag)
377
+ s._mpc_ = (real._mpf_, imag._mpf_)
378
+ return s
379
+
380
+ real = property(lambda self: self.context.make_mpf(self._mpc_[0]))
381
+ imag = property(lambda self: self.context.make_mpf(self._mpc_[1]))
382
+
383
+ def __getstate__(self):
384
+ return to_pickable(self._mpc_[0]), to_pickable(self._mpc_[1])
385
+
386
+ def __setstate__(self, val):
387
+ self._mpc_ = from_pickable(val[0]), from_pickable(val[1])
388
+
389
+ def __repr__(s):
390
+ if s.context.pretty:
391
+ return str(s)
392
+ r = repr(s.real)[4:-1]
393
+ i = repr(s.imag)[4:-1]
394
+ return "%s(real=%s, imag=%s)" % (type(s).__name__, r, i)
395
+
396
+ def __str__(s):
397
+ return "(%s)" % mpc_to_str(s._mpc_, s.context._str_digits)
398
+
399
+ def __complex__(s):
400
+ return mpc_to_complex(s._mpc_, rnd=s.context._prec_rounding[1])
401
+
402
+ def __pos__(s):
403
+ cls, new, (prec, rounding) = s._ctxdata
404
+ v = new(cls)
405
+ v._mpc_ = mpc_pos(s._mpc_, prec, rounding)
406
+ return v
407
+
408
+ def __abs__(s):
409
+ prec, rounding = s.context._prec_rounding
410
+ v = new(s.context.mpf)
411
+ v._mpf_ = mpc_abs(s._mpc_, prec, rounding)
412
+ return v
413
+
414
+ def __neg__(s):
415
+ cls, new, (prec, rounding) = s._ctxdata
416
+ v = new(cls)
417
+ v._mpc_ = mpc_neg(s._mpc_, prec, rounding)
418
+ return v
419
+
420
+ def conjugate(s):
421
+ cls, new, (prec, rounding) = s._ctxdata
422
+ v = new(cls)
423
+ v._mpc_ = mpc_conjugate(s._mpc_, prec, rounding)
424
+ return v
425
+
426
+ def __nonzero__(s):
427
+ return mpc_is_nonzero(s._mpc_)
428
+
429
+ __bool__ = __nonzero__
430
+
431
+ def __hash__(s):
432
+ return mpc_hash(s._mpc_)
433
+
434
+ @classmethod
435
+ def mpc_convert_lhs(cls, x):
436
+ try:
437
+ y = cls.context.convert(x)
438
+ return y
439
+ except TypeError:
440
+ return NotImplemented
441
+
442
+ def __eq__(s, t):
443
+ if not hasattr(t, '_mpc_'):
444
+ if isinstance(t, str):
445
+ return False
446
+ t = s.mpc_convert_lhs(t)
447
+ if t is NotImplemented:
448
+ return t
449
+ return s.real == t.real and s.imag == t.imag
450
+
451
+ def __ne__(s, t):
452
+ b = s.__eq__(t)
453
+ if b is NotImplemented:
454
+ return b
455
+ return not b
456
+
457
+ def _compare(*args):
458
+ raise TypeError("no ordering relation is defined for complex numbers")
459
+
460
+ __gt__ = _compare
461
+ __le__ = _compare
462
+ __gt__ = _compare
463
+ __ge__ = _compare
464
+
465
+ def __add__(s, t):
466
+ cls, new, (prec, rounding) = s._ctxdata
467
+ if not hasattr(t, '_mpc_'):
468
+ t = s.mpc_convert_lhs(t)
469
+ if t is NotImplemented:
470
+ return t
471
+ if hasattr(t, '_mpf_'):
472
+ v = new(cls)
473
+ v._mpc_ = mpc_add_mpf(s._mpc_, t._mpf_, prec, rounding)
474
+ return v
475
+ v = new(cls)
476
+ v._mpc_ = mpc_add(s._mpc_, t._mpc_, prec, rounding)
477
+ return v
478
+
479
+ def __sub__(s, t):
480
+ cls, new, (prec, rounding) = s._ctxdata
481
+ if not hasattr(t, '_mpc_'):
482
+ t = s.mpc_convert_lhs(t)
483
+ if t is NotImplemented:
484
+ return t
485
+ if hasattr(t, '_mpf_'):
486
+ v = new(cls)
487
+ v._mpc_ = mpc_sub_mpf(s._mpc_, t._mpf_, prec, rounding)
488
+ return v
489
+ v = new(cls)
490
+ v._mpc_ = mpc_sub(s._mpc_, t._mpc_, prec, rounding)
491
+ return v
492
+
493
+ def __mul__(s, t):
494
+ cls, new, (prec, rounding) = s._ctxdata
495
+ if not hasattr(t, '_mpc_'):
496
+ if isinstance(t, int_types):
497
+ v = new(cls)
498
+ v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding)
499
+ return v
500
+ t = s.mpc_convert_lhs(t)
501
+ if t is NotImplemented:
502
+ return t
503
+ if hasattr(t, '_mpf_'):
504
+ v = new(cls)
505
+ v._mpc_ = mpc_mul_mpf(s._mpc_, t._mpf_, prec, rounding)
506
+ return v
507
+ t = s.mpc_convert_lhs(t)
508
+ v = new(cls)
509
+ v._mpc_ = mpc_mul(s._mpc_, t._mpc_, prec, rounding)
510
+ return v
511
+
512
+ def __div__(s, t):
513
+ cls, new, (prec, rounding) = s._ctxdata
514
+ if not hasattr(t, '_mpc_'):
515
+ t = s.mpc_convert_lhs(t)
516
+ if t is NotImplemented:
517
+ return t
518
+ if hasattr(t, '_mpf_'):
519
+ v = new(cls)
520
+ v._mpc_ = mpc_div_mpf(s._mpc_, t._mpf_, prec, rounding)
521
+ return v
522
+ v = new(cls)
523
+ v._mpc_ = mpc_div(s._mpc_, t._mpc_, prec, rounding)
524
+ return v
525
+
526
+ def __pow__(s, t):
527
+ cls, new, (prec, rounding) = s._ctxdata
528
+ if isinstance(t, int_types):
529
+ v = new(cls)
530
+ v._mpc_ = mpc_pow_int(s._mpc_, t, prec, rounding)
531
+ return v
532
+ t = s.mpc_convert_lhs(t)
533
+ if t is NotImplemented:
534
+ return t
535
+ v = new(cls)
536
+ if hasattr(t, '_mpf_'):
537
+ v._mpc_ = mpc_pow_mpf(s._mpc_, t._mpf_, prec, rounding)
538
+ else:
539
+ v._mpc_ = mpc_pow(s._mpc_, t._mpc_, prec, rounding)
540
+ return v
541
+
542
+ __radd__ = __add__
543
+
544
+ def __rsub__(s, t):
545
+ t = s.mpc_convert_lhs(t)
546
+ if t is NotImplemented:
547
+ return t
548
+ return t - s
549
+
550
+ def __rmul__(s, t):
551
+ cls, new, (prec, rounding) = s._ctxdata
552
+ if isinstance(t, int_types):
553
+ v = new(cls)
554
+ v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding)
555
+ return v
556
+ t = s.mpc_convert_lhs(t)
557
+ if t is NotImplemented:
558
+ return t
559
+ return t * s
560
+
561
+ def __rdiv__(s, t):
562
+ t = s.mpc_convert_lhs(t)
563
+ if t is NotImplemented:
564
+ return t
565
+ return t / s
566
+
567
+ def __rpow__(s, t):
568
+ t = s.mpc_convert_lhs(t)
569
+ if t is NotImplemented:
570
+ return t
571
+ return t ** s
572
+
573
+ __truediv__ = __div__
574
+ __rtruediv__ = __rdiv__
575
+
576
+ def ae(s, t, rel_eps=None, abs_eps=None):
577
+ return s.context.almosteq(s, t, rel_eps, abs_eps)
578
+
579
+
580
+ complex_types = (complex, _mpc)
581
+
582
+
583
+ class PythonMPContext(object):
584
+
585
+ def __init__(ctx):
586
+ ctx._prec_rounding = [53, round_nearest]
587
+ ctx.mpf = type('mpf', (_mpf,), {})
588
+ ctx.mpc = type('mpc', (_mpc,), {})
589
+ ctx.mpf._ctxdata = [ctx.mpf, new, ctx._prec_rounding]
590
+ ctx.mpc._ctxdata = [ctx.mpc, new, ctx._prec_rounding]
591
+ ctx.mpf.context = ctx
592
+ ctx.mpc.context = ctx
593
+ ctx.constant = type('constant', (_constant,), {})
594
+ ctx.constant._ctxdata = [ctx.mpf, new, ctx._prec_rounding]
595
+ ctx.constant.context = ctx
596
+
597
+ def make_mpf(ctx, v):
598
+ a = new(ctx.mpf)
599
+ a._mpf_ = v
600
+ return a
601
+
602
+ def make_mpc(ctx, v):
603
+ a = new(ctx.mpc)
604
+ a._mpc_ = v
605
+ return a
606
+
607
+ def default(ctx):
608
+ ctx._prec = ctx._prec_rounding[0] = 53
609
+ ctx._dps = 15
610
+ ctx.trap_complex = False
611
+
612
+ def _set_prec(ctx, n):
613
+ ctx._prec = ctx._prec_rounding[0] = max(1, int(n))
614
+ ctx._dps = prec_to_dps(n)
615
+
616
+ def _set_dps(ctx, n):
617
+ ctx._prec = ctx._prec_rounding[0] = dps_to_prec(n)
618
+ ctx._dps = max(1, int(n))
619
+
620
+ prec = property(lambda ctx: ctx._prec, _set_prec)
621
+ dps = property(lambda ctx: ctx._dps, _set_dps)
622
+
623
+ def convert(ctx, x, strings=True):
624
+ """
625
+ Converts *x* to an ``mpf`` or ``mpc``. If *x* is of type ``mpf``,
626
+ ``mpc``, ``int``, ``float``, ``complex``, the conversion
627
+ will be performed losslessly.
628
+
629
+ If *x* is a string, the result will be rounded to the present
630
+ working precision. Strings representing fractions or complex
631
+ numbers are permitted.
632
+
633
+ >>> from mpmath import *
634
+ >>> mp.dps = 15; mp.pretty = False
635
+ >>> mpmathify(3.5)
636
+ mpf('3.5')
637
+ >>> mpmathify('2.1')
638
+ mpf('2.1000000000000001')
639
+ >>> mpmathify('3/4')
640
+ mpf('0.75')
641
+ >>> mpmathify('2+3j')
642
+ mpc(real='2.0', imag='3.0')
643
+
644
+ """
645
+ if type(x) in ctx.types: return x
646
+ if isinstance(x, int_types): return ctx.make_mpf(from_int(x))
647
+ if isinstance(x, float): return ctx.make_mpf(from_float(x))
648
+ if isinstance(x, complex):
649
+ return ctx.make_mpc((from_float(x.real), from_float(x.imag)))
650
+ if type(x).__module__ == 'numpy': return ctx.npconvert(x)
651
+ if isinstance(x, numbers.Rational): # e.g. Fraction
652
+ try: x = rational.mpq(int(x.numerator), int(x.denominator))
653
+ except: pass
654
+ prec, rounding = ctx._prec_rounding
655
+ if isinstance(x, rational.mpq):
656
+ p, q = x._mpq_
657
+ return ctx.make_mpf(from_rational(p, q, prec))
658
+ if strings and isinstance(x, basestring):
659
+ try:
660
+ _mpf_ = from_str(x, prec, rounding)
661
+ return ctx.make_mpf(_mpf_)
662
+ except ValueError:
663
+ pass
664
+ if hasattr(x, '_mpf_'): return ctx.make_mpf(x._mpf_)
665
+ if hasattr(x, '_mpc_'): return ctx.make_mpc(x._mpc_)
666
+ if hasattr(x, '_mpmath_'):
667
+ return ctx.convert(x._mpmath_(prec, rounding))
668
+ if type(x).__module__ == 'decimal':
669
+ try: return ctx.make_mpf(from_Decimal(x, prec, rounding))
670
+ except: pass
671
+ return ctx._convert_fallback(x, strings)
672
+
673
+ def npconvert(ctx, x):
674
+ """
675
+ Converts *x* to an ``mpf`` or ``mpc``. *x* should be a numpy
676
+ scalar.
677
+ """
678
+ import numpy as np
679
+ if isinstance(x, np.integer): return ctx.make_mpf(from_int(int(x)))
680
+ if isinstance(x, np.floating): return ctx.make_mpf(from_npfloat(x))
681
+ if isinstance(x, np.complexfloating):
682
+ return ctx.make_mpc((from_npfloat(x.real), from_npfloat(x.imag)))
683
+ raise TypeError("cannot create mpf from " + repr(x))
684
+
685
+ def isnan(ctx, x):
686
+ """
687
+ Return *True* if *x* is a NaN (not-a-number), or for a complex
688
+ number, whether either the real or complex part is NaN;
689
+ otherwise return *False*::
690
+
691
+ >>> from mpmath import *
692
+ >>> isnan(3.14)
693
+ False
694
+ >>> isnan(nan)
695
+ True
696
+ >>> isnan(mpc(3.14,2.72))
697
+ False
698
+ >>> isnan(mpc(3.14,nan))
699
+ True
700
+
701
+ """
702
+ if hasattr(x, "_mpf_"):
703
+ return x._mpf_ == fnan
704
+ if hasattr(x, "_mpc_"):
705
+ return fnan in x._mpc_
706
+ if isinstance(x, int_types) or isinstance(x, rational.mpq):
707
+ return False
708
+ x = ctx.convert(x)
709
+ if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
710
+ return ctx.isnan(x)
711
+ raise TypeError("isnan() needs a number as input")
712
+
713
+ def isinf(ctx, x):
714
+ """
715
+ Return *True* if the absolute value of *x* is infinite;
716
+ otherwise return *False*::
717
+
718
+ >>> from mpmath import *
719
+ >>> isinf(inf)
720
+ True
721
+ >>> isinf(-inf)
722
+ True
723
+ >>> isinf(3)
724
+ False
725
+ >>> isinf(3+4j)
726
+ False
727
+ >>> isinf(mpc(3,inf))
728
+ True
729
+ >>> isinf(mpc(inf,3))
730
+ True
731
+
732
+ """
733
+ if hasattr(x, "_mpf_"):
734
+ return x._mpf_ in (finf, fninf)
735
+ if hasattr(x, "_mpc_"):
736
+ re, im = x._mpc_
737
+ return re in (finf, fninf) or im in (finf, fninf)
738
+ if isinstance(x, int_types) or isinstance(x, rational.mpq):
739
+ return False
740
+ x = ctx.convert(x)
741
+ if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
742
+ return ctx.isinf(x)
743
+ raise TypeError("isinf() needs a number as input")
744
+
745
+ def isnormal(ctx, x):
746
+ """
747
+ Determine whether *x* is "normal" in the sense of floating-point
748
+ representation; that is, return *False* if *x* is zero, an
749
+ infinity or NaN; otherwise return *True*. By extension, a
750
+ complex number *x* is considered "normal" if its magnitude is
751
+ normal::
752
+
753
+ >>> from mpmath import *
754
+ >>> isnormal(3)
755
+ True
756
+ >>> isnormal(0)
757
+ False
758
+ >>> isnormal(inf); isnormal(-inf); isnormal(nan)
759
+ False
760
+ False
761
+ False
762
+ >>> isnormal(0+0j)
763
+ False
764
+ >>> isnormal(0+3j)
765
+ True
766
+ >>> isnormal(mpc(2,nan))
767
+ False
768
+ """
769
+ if hasattr(x, "_mpf_"):
770
+ return bool(x._mpf_[1])
771
+ if hasattr(x, "_mpc_"):
772
+ re, im = x._mpc_
773
+ re_normal = bool(re[1])
774
+ im_normal = bool(im[1])
775
+ if re == fzero: return im_normal
776
+ if im == fzero: return re_normal
777
+ return re_normal and im_normal
778
+ if isinstance(x, int_types) or isinstance(x, rational.mpq):
779
+ return bool(x)
780
+ x = ctx.convert(x)
781
+ if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
782
+ return ctx.isnormal(x)
783
+ raise TypeError("isnormal() needs a number as input")
784
+
785
+ def isint(ctx, x, gaussian=False):
786
+ """
787
+ Return *True* if *x* is integer-valued; otherwise return
788
+ *False*::
789
+
790
+ >>> from mpmath import *
791
+ >>> isint(3)
792
+ True
793
+ >>> isint(mpf(3))
794
+ True
795
+ >>> isint(3.2)
796
+ False
797
+ >>> isint(inf)
798
+ False
799
+
800
+ Optionally, Gaussian integers can be checked for::
801
+
802
+ >>> isint(3+0j)
803
+ True
804
+ >>> isint(3+2j)
805
+ False
806
+ >>> isint(3+2j, gaussian=True)
807
+ True
808
+
809
+ """
810
+ if isinstance(x, int_types):
811
+ return True
812
+ if hasattr(x, "_mpf_"):
813
+ sign, man, exp, bc = xval = x._mpf_
814
+ return bool((man and exp >= 0) or xval == fzero)
815
+ if hasattr(x, "_mpc_"):
816
+ re, im = x._mpc_
817
+ rsign, rman, rexp, rbc = re
818
+ isign, iman, iexp, ibc = im
819
+ re_isint = (rman and rexp >= 0) or re == fzero
820
+ if gaussian:
821
+ im_isint = (iman and iexp >= 0) or im == fzero
822
+ return re_isint and im_isint
823
+ return re_isint and im == fzero
824
+ if isinstance(x, rational.mpq):
825
+ p, q = x._mpq_
826
+ return p % q == 0
827
+ x = ctx.convert(x)
828
+ if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
829
+ return ctx.isint(x, gaussian)
830
+ raise TypeError("isint() needs a number as input")
831
+
832
+ def fsum(ctx, terms, absolute=False, squared=False):
833
+ """
834
+ Calculates a sum containing a finite number of terms (for infinite
835
+ series, see :func:`~mpmath.nsum`). The terms will be converted to
836
+ mpmath numbers. For len(terms) > 2, this function is generally
837
+ faster and produces more accurate results than the builtin
838
+ Python function :func:`sum`.
839
+
840
+ >>> from mpmath import *
841
+ >>> mp.dps = 15; mp.pretty = False
842
+ >>> fsum([1, 2, 0.5, 7])
843
+ mpf('10.5')
844
+
845
+ With squared=True each term is squared, and with absolute=True
846
+ the absolute value of each term is used.
847
+ """
848
+ prec, rnd = ctx._prec_rounding
849
+ real = []
850
+ imag = []
851
+ for term in terms:
852
+ reval = imval = 0
853
+ if hasattr(term, "_mpf_"):
854
+ reval = term._mpf_
855
+ elif hasattr(term, "_mpc_"):
856
+ reval, imval = term._mpc_
857
+ else:
858
+ term = ctx.convert(term)
859
+ if hasattr(term, "_mpf_"):
860
+ reval = term._mpf_
861
+ elif hasattr(term, "_mpc_"):
862
+ reval, imval = term._mpc_
863
+ else:
864
+ raise NotImplementedError
865
+ if imval:
866
+ if squared:
867
+ if absolute:
868
+ real.append(mpf_mul(reval,reval))
869
+ real.append(mpf_mul(imval,imval))
870
+ else:
871
+ reval, imval = mpc_pow_int((reval,imval),2,prec+10)
872
+ real.append(reval)
873
+ imag.append(imval)
874
+ elif absolute:
875
+ real.append(mpc_abs((reval,imval), prec))
876
+ else:
877
+ real.append(reval)
878
+ imag.append(imval)
879
+ else:
880
+ if squared:
881
+ reval = mpf_mul(reval, reval)
882
+ elif absolute:
883
+ reval = mpf_abs(reval)
884
+ real.append(reval)
885
+ s = mpf_sum(real, prec, rnd, absolute)
886
+ if imag:
887
+ s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd)))
888
+ else:
889
+ s = ctx.make_mpf(s)
890
+ return s
891
+
892
+ def fdot(ctx, A, B=None, conjugate=False):
893
+ r"""
894
+ Computes the dot product of the iterables `A` and `B`,
895
+
896
+ .. math ::
897
+
898
+ \sum_{k=0} A_k B_k.
899
+
900
+ Alternatively, :func:`~mpmath.fdot` accepts a single iterable of pairs.
901
+ In other words, ``fdot(A,B)`` and ``fdot(zip(A,B))`` are equivalent.
902
+ The elements are automatically converted to mpmath numbers.
903
+
904
+ With ``conjugate=True``, the elements in the second vector
905
+ will be conjugated:
906
+
907
+ .. math ::
908
+
909
+ \sum_{k=0} A_k \overline{B_k}
910
+
911
+ **Examples**
912
+
913
+ >>> from mpmath import *
914
+ >>> mp.dps = 15; mp.pretty = False
915
+ >>> A = [2, 1.5, 3]
916
+ >>> B = [1, -1, 2]
917
+ >>> fdot(A, B)
918
+ mpf('6.5')
919
+ >>> list(zip(A, B))
920
+ [(2, 1), (1.5, -1), (3, 2)]
921
+ >>> fdot(_)
922
+ mpf('6.5')
923
+ >>> A = [2, 1.5, 3j]
924
+ >>> B = [1+j, 3, -1-j]
925
+ >>> fdot(A, B)
926
+ mpc(real='9.5', imag='-1.0')
927
+ >>> fdot(A, B, conjugate=True)
928
+ mpc(real='3.5', imag='-5.0')
929
+
930
+ """
931
+ if B is not None:
932
+ A = zip(A, B)
933
+ prec, rnd = ctx._prec_rounding
934
+ real = []
935
+ imag = []
936
+ hasattr_ = hasattr
937
+ types = (ctx.mpf, ctx.mpc)
938
+ for a, b in A:
939
+ if type(a) not in types: a = ctx.convert(a)
940
+ if type(b) not in types: b = ctx.convert(b)
941
+ a_real = hasattr_(a, "_mpf_")
942
+ b_real = hasattr_(b, "_mpf_")
943
+ if a_real and b_real:
944
+ real.append(mpf_mul(a._mpf_, b._mpf_))
945
+ continue
946
+ a_complex = hasattr_(a, "_mpc_")
947
+ b_complex = hasattr_(b, "_mpc_")
948
+ if a_real and b_complex:
949
+ aval = a._mpf_
950
+ bre, bim = b._mpc_
951
+ if conjugate:
952
+ bim = mpf_neg(bim)
953
+ real.append(mpf_mul(aval, bre))
954
+ imag.append(mpf_mul(aval, bim))
955
+ elif b_real and a_complex:
956
+ are, aim = a._mpc_
957
+ bval = b._mpf_
958
+ real.append(mpf_mul(are, bval))
959
+ imag.append(mpf_mul(aim, bval))
960
+ elif a_complex and b_complex:
961
+ #re, im = mpc_mul(a._mpc_, b._mpc_, prec+20)
962
+ are, aim = a._mpc_
963
+ bre, bim = b._mpc_
964
+ if conjugate:
965
+ bim = mpf_neg(bim)
966
+ real.append(mpf_mul(are, bre))
967
+ real.append(mpf_neg(mpf_mul(aim, bim)))
968
+ imag.append(mpf_mul(are, bim))
969
+ imag.append(mpf_mul(aim, bre))
970
+ else:
971
+ raise NotImplementedError
972
+ s = mpf_sum(real, prec, rnd)
973
+ if imag:
974
+ s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd)))
975
+ else:
976
+ s = ctx.make_mpf(s)
977
+ return s
978
+
979
+ def _wrap_libmp_function(ctx, mpf_f, mpc_f=None, mpi_f=None, doc="<no doc>"):
980
+ """
981
+ Given a low-level mpf_ function, and optionally similar functions
982
+ for mpc_ and mpi_, defines the function as a context method.
983
+
984
+ It is assumed that the return type is the same as that of
985
+ the input; the exception is that propagation from mpf to mpc is possible
986
+ by raising ComplexResult.
987
+
988
+ """
989
+ def f(x, **kwargs):
990
+ if type(x) not in ctx.types:
991
+ x = ctx.convert(x)
992
+ prec, rounding = ctx._prec_rounding
993
+ if kwargs:
994
+ prec = kwargs.get('prec', prec)
995
+ if 'dps' in kwargs:
996
+ prec = dps_to_prec(kwargs['dps'])
997
+ rounding = kwargs.get('rounding', rounding)
998
+ if hasattr(x, '_mpf_'):
999
+ try:
1000
+ return ctx.make_mpf(mpf_f(x._mpf_, prec, rounding))
1001
+ except ComplexResult:
1002
+ # Handle propagation to complex
1003
+ if ctx.trap_complex:
1004
+ raise
1005
+ return ctx.make_mpc(mpc_f((x._mpf_, fzero), prec, rounding))
1006
+ elif hasattr(x, '_mpc_'):
1007
+ return ctx.make_mpc(mpc_f(x._mpc_, prec, rounding))
1008
+ raise NotImplementedError("%s of a %s" % (name, type(x)))
1009
+ name = mpf_f.__name__[4:]
1010
+ f.__doc__ = function_docs.__dict__.get(name, "Computes the %s of x" % doc)
1011
+ return f
1012
+
1013
+ # Called by SpecialFunctions.__init__()
1014
+ @classmethod
1015
+ def _wrap_specfun(cls, name, f, wrap):
1016
+ if wrap:
1017
+ def f_wrapped(ctx, *args, **kwargs):
1018
+ convert = ctx.convert
1019
+ args = [convert(a) for a in args]
1020
+ prec = ctx.prec
1021
+ try:
1022
+ ctx.prec += 10
1023
+ retval = f(ctx, *args, **kwargs)
1024
+ finally:
1025
+ ctx.prec = prec
1026
+ return +retval
1027
+ else:
1028
+ f_wrapped = f
1029
+ f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__)
1030
+ setattr(cls, name, f_wrapped)
1031
+
1032
+ def _convert_param(ctx, x):
1033
+ if hasattr(x, "_mpc_"):
1034
+ v, im = x._mpc_
1035
+ if im != fzero:
1036
+ return x, 'C'
1037
+ elif hasattr(x, "_mpf_"):
1038
+ v = x._mpf_
1039
+ else:
1040
+ if type(x) in int_types:
1041
+ return int(x), 'Z'
1042
+ p = None
1043
+ if isinstance(x, tuple):
1044
+ p, q = x
1045
+ elif hasattr(x, '_mpq_'):
1046
+ p, q = x._mpq_
1047
+ elif isinstance(x, basestring) and '/' in x:
1048
+ p, q = x.split('/')
1049
+ p = int(p)
1050
+ q = int(q)
1051
+ if p is not None:
1052
+ if not p % q:
1053
+ return p // q, 'Z'
1054
+ return ctx.mpq(p,q), 'Q'
1055
+ x = ctx.convert(x)
1056
+ if hasattr(x, "_mpc_"):
1057
+ v, im = x._mpc_
1058
+ if im != fzero:
1059
+ return x, 'C'
1060
+ elif hasattr(x, "_mpf_"):
1061
+ v = x._mpf_
1062
+ else:
1063
+ return x, 'U'
1064
+ sign, man, exp, bc = v
1065
+ if man:
1066
+ if exp >= -4:
1067
+ if sign:
1068
+ man = -man
1069
+ if exp >= 0:
1070
+ return int(man) << exp, 'Z'
1071
+ if exp >= -4:
1072
+ p, q = int(man), (1<<(-exp))
1073
+ return ctx.mpq(p,q), 'Q'
1074
+ x = ctx.make_mpf(v)
1075
+ return x, 'R'
1076
+ elif not exp:
1077
+ return 0, 'Z'
1078
+ else:
1079
+ return x, 'U'
1080
+
1081
+ def _mpf_mag(ctx, x):
1082
+ sign, man, exp, bc = x
1083
+ if man:
1084
+ return exp+bc
1085
+ if x == fzero:
1086
+ return ctx.ninf
1087
+ if x == finf or x == fninf:
1088
+ return ctx.inf
1089
+ return ctx.nan
1090
+
1091
+ def mag(ctx, x):
1092
+ """
1093
+ Quick logarithmic magnitude estimate of a number. Returns an
1094
+ integer or infinity `m` such that `|x| <= 2^m`. It is not
1095
+ guaranteed that `m` is an optimal bound, but it will never
1096
+ be too large by more than 2 (and probably not more than 1).
1097
+
1098
+ **Examples**
1099
+
1100
+ >>> from mpmath import *
1101
+ >>> mp.pretty = True
1102
+ >>> mag(10), mag(10.0), mag(mpf(10)), int(ceil(log(10,2)))
1103
+ (4, 4, 4, 4)
1104
+ >>> mag(10j), mag(10+10j)
1105
+ (4, 5)
1106
+ >>> mag(0.01), int(ceil(log(0.01,2)))
1107
+ (-6, -6)
1108
+ >>> mag(0), mag(inf), mag(-inf), mag(nan)
1109
+ (-inf, +inf, +inf, nan)
1110
+
1111
+ """
1112
+ if hasattr(x, "_mpf_"):
1113
+ return ctx._mpf_mag(x._mpf_)
1114
+ elif hasattr(x, "_mpc_"):
1115
+ r, i = x._mpc_
1116
+ if r == fzero:
1117
+ return ctx._mpf_mag(i)
1118
+ if i == fzero:
1119
+ return ctx._mpf_mag(r)
1120
+ return 1+max(ctx._mpf_mag(r), ctx._mpf_mag(i))
1121
+ elif isinstance(x, int_types):
1122
+ if x:
1123
+ return bitcount(abs(x))
1124
+ return ctx.ninf
1125
+ elif isinstance(x, rational.mpq):
1126
+ p, q = x._mpq_
1127
+ if p:
1128
+ return 1 + bitcount(abs(p)) - bitcount(q)
1129
+ return ctx.ninf
1130
+ else:
1131
+ x = ctx.convert(x)
1132
+ if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"):
1133
+ return ctx.mag(x)
1134
+ else:
1135
+ raise TypeError("requires an mpf/mpc")
1136
+
1137
+
1138
+ # Register with "numbers" ABC
1139
+ # We do not subclass, hence we do not use the @abstractmethod checks. While
1140
+ # this is less invasive it may turn out that we do not actually support
1141
+ # parts of the expected interfaces. See
1142
+ # http://docs.python.org/2/library/numbers.html for list of abstract
1143
+ # methods.
1144
+ try:
1145
+ import numbers
1146
+ numbers.Complex.register(_mpc)
1147
+ numbers.Real.register(_mpf)
1148
+ except ImportError:
1149
+ pass
lib/python3.11/site-packages/mpmath/function_docs.py ADDED
The diff for this file is too large to render. See raw diff
 
lib/python3.11/site-packages/mpmath/functions/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import functions
2
+ # Hack to update methods
3
+ from . import factorials
4
+ from . import hypergeometric
5
+ from . import expintegrals
6
+ from . import bessel
7
+ from . import orthogonal
8
+ from . import theta
9
+ from . import elliptic
10
+ from . import signals
11
+ from . import zeta
12
+ from . import rszeta
13
+ from . import zetazeros
14
+ from . import qfunctions
lib/python3.11/site-packages/mpmath/functions/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (848 Bytes). View file
 
lib/python3.11/site-packages/mpmath/functions/__pycache__/bessel.cpython-311.pyc ADDED
Binary file (63.1 kB). View file
 
lib/python3.11/site-packages/mpmath/functions/__pycache__/elliptic.cpython-311.pyc ADDED
Binary file (57.8 kB). View file
 
lib/python3.11/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-311.pyc ADDED
Binary file (23.7 kB). View file
 
lib/python3.11/site-packages/mpmath/functions/__pycache__/factorials.cpython-311.pyc ADDED
Binary file (10.9 kB). View file
 
lib/python3.11/site-packages/mpmath/functions/__pycache__/functions.cpython-311.pyc ADDED
Binary file (33.4 kB). View file
 
lib/python3.11/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-311.pyc ADDED
Binary file (76.8 kB). View file
 
lib/python3.11/site-packages/mpmath/functions/__pycache__/orthogonal.cpython-311.pyc ADDED
Binary file (25.8 kB). View file
 
lib/python3.11/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-311.pyc ADDED
Binary file (10.2 kB). View file
 
lib/python3.11/site-packages/mpmath/functions/__pycache__/rszeta.cpython-311.pyc ADDED
Binary file (73.9 kB). View file
 
lib/python3.11/site-packages/mpmath/functions/__pycache__/signals.cpython-311.pyc ADDED
Binary file (1.88 kB). View file
 
lib/python3.11/site-packages/mpmath/functions/__pycache__/theta.cpython-311.pyc ADDED
Binary file (53.5 kB). View file
 
lib/python3.11/site-packages/mpmath/functions/__pycache__/zeta.cpython-311.pyc ADDED
Binary file (64.2 kB). View file
 
lib/python3.11/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-311.pyc ADDED
Binary file (45.8 kB). View file
 
lib/python3.11/site-packages/mpmath/functions/bessel.py ADDED
@@ -0,0 +1,1108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .functions import defun, defun_wrapped
2
+
3
+ @defun
4
+ def j0(ctx, x):
5
+ """Computes the Bessel function `J_0(x)`. See :func:`~mpmath.besselj`."""
6
+ return ctx.besselj(0, x)
7
+
8
+ @defun
9
+ def j1(ctx, x):
10
+ """Computes the Bessel function `J_1(x)`. See :func:`~mpmath.besselj`."""
11
+ return ctx.besselj(1, x)
12
+
13
+ @defun
14
+ def besselj(ctx, n, z, derivative=0, **kwargs):
15
+ if type(n) is int:
16
+ n_isint = True
17
+ else:
18
+ n = ctx.convert(n)
19
+ n_isint = ctx.isint(n)
20
+ if n_isint:
21
+ n = int(ctx._re(n))
22
+ if n_isint and n < 0:
23
+ return (-1)**n * ctx.besselj(-n, z, derivative, **kwargs)
24
+ z = ctx.convert(z)
25
+ M = ctx.mag(z)
26
+ if derivative:
27
+ d = ctx.convert(derivative)
28
+ # TODO: the integer special-casing shouldn't be necessary.
29
+ # However, the hypergeometric series gets inaccurate for large d
30
+ # because of inaccurate pole cancellation at a pole far from
31
+ # zero (needs to be fixed in hypercomb or hypsum)
32
+ if ctx.isint(d) and d >= 0:
33
+ d = int(d)
34
+ orig = ctx.prec
35
+ try:
36
+ ctx.prec += 15
37
+ v = ctx.fsum((-1)**k * ctx.binomial(d,k) * ctx.besselj(2*k+n-d,z)
38
+ for k in range(d+1))
39
+ finally:
40
+ ctx.prec = orig
41
+ v *= ctx.mpf(2)**(-d)
42
+ else:
43
+ def h(n,d):
44
+ r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), -0.25, exact=True)
45
+ B = [0.5*(n-d+1), 0.5*(n-d+2)]
46
+ T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[],B,[(n+1)*0.5,(n+2)*0.5],B+[n+1],r)]
47
+ return T
48
+ v = ctx.hypercomb(h, [n,d], **kwargs)
49
+ else:
50
+ # Fast case: J_n(x), n int, appropriate magnitude for fixed-point calculation
51
+ if (not derivative) and n_isint and abs(M) < 10 and abs(n) < 20:
52
+ try:
53
+ return ctx._besselj(n, z)
54
+ except NotImplementedError:
55
+ pass
56
+ if not z:
57
+ if not n:
58
+ v = ctx.one + n+z
59
+ elif ctx.re(n) > 0:
60
+ v = n*z
61
+ else:
62
+ v = ctx.inf + z + n
63
+ else:
64
+ #v = 0
65
+ orig = ctx.prec
66
+ try:
67
+ # XXX: workaround for accuracy in low level hypergeometric series
68
+ # when alternating, large arguments
69
+ ctx.prec += min(3*abs(M), ctx.prec)
70
+ w = ctx.fmul(z, 0.5, exact=True)
71
+ def h(n):
72
+ r = ctx.fneg(ctx.fmul(w, w, prec=max(0,ctx.prec+M)), exact=True)
73
+ return [([w], [n], [], [n+1], [], [n+1], r)]
74
+ v = ctx.hypercomb(h, [n], **kwargs)
75
+ finally:
76
+ ctx.prec = orig
77
+ v = +v
78
+ return v
79
+
80
+ @defun
81
+ def besseli(ctx, n, z, derivative=0, **kwargs):
82
+ n = ctx.convert(n)
83
+ z = ctx.convert(z)
84
+ if not z:
85
+ if derivative:
86
+ raise ValueError
87
+ if not n:
88
+ # I(0,0) = 1
89
+ return 1+n+z
90
+ if ctx.isint(n):
91
+ return 0*(n+z)
92
+ r = ctx.re(n)
93
+ if r == 0:
94
+ return ctx.nan*(n+z)
95
+ elif r > 0:
96
+ return 0*(n+z)
97
+ else:
98
+ return ctx.inf+(n+z)
99
+ M = ctx.mag(z)
100
+ if derivative:
101
+ d = ctx.convert(derivative)
102
+ def h(n,d):
103
+ r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), 0.25, exact=True)
104
+ B = [0.5*(n-d+1), 0.5*(n-d+2), n+1]
105
+ T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[n+1],B,[(n+1)*0.5,(n+2)*0.5],B,r)]
106
+ return T
107
+ v = ctx.hypercomb(h, [n,d], **kwargs)
108
+ else:
109
+ def h(n):
110
+ w = ctx.fmul(z, 0.5, exact=True)
111
+ r = ctx.fmul(w, w, prec=max(0,ctx.prec+M))
112
+ return [([w], [n], [], [n+1], [], [n+1], r)]
113
+ v = ctx.hypercomb(h, [n], **kwargs)
114
+ return v
115
+
116
+ @defun_wrapped
117
+ def bessely(ctx, n, z, derivative=0, **kwargs):
118
+ if not z:
119
+ if derivative:
120
+ # Not implemented
121
+ raise ValueError
122
+ if not n:
123
+ # ~ log(z/2)
124
+ return -ctx.inf + (n+z)
125
+ if ctx.im(n):
126
+ return ctx.nan * (n+z)
127
+ r = ctx.re(n)
128
+ q = n+0.5
129
+ if ctx.isint(q):
130
+ if n > 0:
131
+ return -ctx.inf + (n+z)
132
+ else:
133
+ return 0 * (n+z)
134
+ if r < 0 and int(ctx.floor(q)) % 2:
135
+ return ctx.inf + (n+z)
136
+ else:
137
+ return ctx.ninf + (n+z)
138
+ # XXX: use hypercomb
139
+ ctx.prec += 10
140
+ m, d = ctx.nint_distance(n)
141
+ if d < -ctx.prec:
142
+ h = +ctx.eps
143
+ ctx.prec *= 2
144
+ n += h
145
+ elif d < 0:
146
+ ctx.prec -= d
147
+ # TODO: avoid cancellation for imaginary arguments
148
+ cos, sin = ctx.cospi_sinpi(n)
149
+ return (ctx.besselj(n,z,derivative,**kwargs)*cos - \
150
+ ctx.besselj(-n,z,derivative,**kwargs))/sin
151
+
152
+ @defun_wrapped
153
+ def besselk(ctx, n, z, **kwargs):
154
+ if not z:
155
+ return ctx.inf
156
+ M = ctx.mag(z)
157
+ if M < 1:
158
+ # Represent as limit definition
159
+ def h(n):
160
+ r = (z/2)**2
161
+ T1 = [z, 2], [-n, n-1], [n], [], [], [1-n], r
162
+ T2 = [z, 2], [n, -n-1], [-n], [], [], [1+n], r
163
+ return T1, T2
164
+ # We could use the limit definition always, but it leads
165
+ # to very bad cancellation (of exponentially large terms)
166
+ # for large real z
167
+ # Instead represent in terms of 2F0
168
+ else:
169
+ ctx.prec += M
170
+ def h(n):
171
+ return [([ctx.pi/2, z, ctx.exp(-z)], [0.5,-0.5,1], [], [], \
172
+ [n+0.5, 0.5-n], [], -1/(2*z))]
173
+ return ctx.hypercomb(h, [n], **kwargs)
174
+
175
+ @defun_wrapped
176
+ def hankel1(ctx,n,x,**kwargs):
177
+ return ctx.besselj(n,x,**kwargs) + ctx.j*ctx.bessely(n,x,**kwargs)
178
+
179
+ @defun_wrapped
180
+ def hankel2(ctx,n,x,**kwargs):
181
+ return ctx.besselj(n,x,**kwargs) - ctx.j*ctx.bessely(n,x,**kwargs)
182
+
183
+ @defun_wrapped
184
+ def whitm(ctx,k,m,z,**kwargs):
185
+ if z == 0:
186
+ # M(k,m,z) = 0^(1/2+m)
187
+ if ctx.re(m) > -0.5:
188
+ return z
189
+ elif ctx.re(m) < -0.5:
190
+ return ctx.inf + z
191
+ else:
192
+ return ctx.nan * z
193
+ x = ctx.fmul(-0.5, z, exact=True)
194
+ y = 0.5+m
195
+ return ctx.exp(x) * z**y * ctx.hyp1f1(y-k, 1+2*m, z, **kwargs)
196
+
197
+ @defun_wrapped
198
+ def whitw(ctx,k,m,z,**kwargs):
199
+ if z == 0:
200
+ g = abs(ctx.re(m))
201
+ if g < 0.5:
202
+ return z
203
+ elif g > 0.5:
204
+ return ctx.inf + z
205
+ else:
206
+ return ctx.nan * z
207
+ x = ctx.fmul(-0.5, z, exact=True)
208
+ y = 0.5+m
209
+ return ctx.exp(x) * z**y * ctx.hyperu(y-k, 1+2*m, z, **kwargs)
210
+
211
+ @defun
212
+ def hyperu(ctx, a, b, z, **kwargs):
213
+ a, atype = ctx._convert_param(a)
214
+ b, btype = ctx._convert_param(b)
215
+ z = ctx.convert(z)
216
+ if not z:
217
+ if ctx.re(b) <= 1:
218
+ return ctx.gammaprod([1-b],[a-b+1])
219
+ else:
220
+ return ctx.inf + z
221
+ bb = 1+a-b
222
+ bb, bbtype = ctx._convert_param(bb)
223
+ try:
224
+ orig = ctx.prec
225
+ try:
226
+ ctx.prec += 10
227
+ v = ctx.hypsum(2, 0, (atype, bbtype), [a, bb], -1/z, maxterms=ctx.prec)
228
+ return v / z**a
229
+ finally:
230
+ ctx.prec = orig
231
+ except ctx.NoConvergence:
232
+ pass
233
+ def h(a,b):
234
+ w = ctx.sinpi(b)
235
+ T1 = ([ctx.pi,w],[1,-1],[],[a-b+1,b],[a],[b],z)
236
+ T2 = ([-ctx.pi,w,z],[1,-1,1-b],[],[a,2-b],[a-b+1],[2-b],z)
237
+ return T1, T2
238
+ return ctx.hypercomb(h, [a,b], **kwargs)
239
+
240
+ @defun
241
+ def struveh(ctx,n,z, **kwargs):
242
+ n = ctx.convert(n)
243
+ z = ctx.convert(z)
244
+ # http://functions.wolfram.com/Bessel-TypeFunctions/StruveH/26/01/02/
245
+ def h(n):
246
+ return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], -(z/2)**2)]
247
+ return ctx.hypercomb(h, [n], **kwargs)
248
+
249
+ @defun
250
+ def struvel(ctx,n,z, **kwargs):
251
+ n = ctx.convert(n)
252
+ z = ctx.convert(z)
253
+ # http://functions.wolfram.com/Bessel-TypeFunctions/StruveL/26/01/02/
254
+ def h(n):
255
+ return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], (z/2)**2)]
256
+ return ctx.hypercomb(h, [n], **kwargs)
257
+
258
+ def _anger(ctx,which,v,z,**kwargs):
259
+ v = ctx._convert_param(v)[0]
260
+ z = ctx.convert(z)
261
+ def h(v):
262
+ b = ctx.mpq_1_2
263
+ u = v*b
264
+ m = b*3
265
+ a1,a2,b1,b2 = m-u, m+u, 1-u, 1+u
266
+ c, s = ctx.cospi_sinpi(u)
267
+ if which == 0:
268
+ A, B = [b*z, s], [c]
269
+ if which == 1:
270
+ A, B = [b*z, -c], [s]
271
+ w = ctx.square_exp_arg(z, mult=-0.25)
272
+ T1 = A, [1, 1], [], [a1,a2], [1], [a1,a2], w
273
+ T2 = B, [1], [], [b1,b2], [1], [b1,b2], w
274
+ return T1, T2
275
+ return ctx.hypercomb(h, [v], **kwargs)
276
+
277
+ @defun
278
+ def angerj(ctx, v, z, **kwargs):
279
+ return _anger(ctx, 0, v, z, **kwargs)
280
+
281
+ @defun
282
+ def webere(ctx, v, z, **kwargs):
283
+ return _anger(ctx, 1, v, z, **kwargs)
284
+
285
+ @defun
286
+ def lommels1(ctx, u, v, z, **kwargs):
287
+ u = ctx._convert_param(u)[0]
288
+ v = ctx._convert_param(v)[0]
289
+ z = ctx.convert(z)
290
+ def h(u,v):
291
+ b = ctx.mpq_1_2
292
+ w = ctx.square_exp_arg(z, mult=-0.25)
293
+ return ([u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], \
294
+ [b*(u-v+3),b*(u+v+3)], w),
295
+ return ctx.hypercomb(h, [u,v], **kwargs)
296
+
297
+ @defun
298
+ def lommels2(ctx, u, v, z, **kwargs):
299
+ u = ctx._convert_param(u)[0]
300
+ v = ctx._convert_param(v)[0]
301
+ z = ctx.convert(z)
302
+ # Asymptotic expansion (GR p. 947) -- need to be careful
303
+ # not to use for small arguments
304
+ # def h(u,v):
305
+ # b = ctx.mpq_1_2
306
+ # w = -(z/2)**(-2)
307
+ # return ([z], [u-1], [], [], [b*(1-u+v)], [b*(1-u-v)], w),
308
+ def h(u,v):
309
+ b = ctx.mpq_1_2
310
+ w = ctx.square_exp_arg(z, mult=-0.25)
311
+ T1 = [u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], [b*(u-v+3),b*(u+v+3)], w
312
+ T2 = [2, z], [u+v-1, -v], [v, b*(u+v+1)], [b*(v-u+1)], [], [1-v], w
313
+ T3 = [2, z], [u-v-1, v], [-v, b*(u-v+1)], [b*(1-u-v)], [], [1+v], w
314
+ #c1 = ctx.cospi((u-v)*b)
315
+ #c2 = ctx.cospi((u+v)*b)
316
+ #s = ctx.sinpi(v)
317
+ #r1 = (u-v+1)*b
318
+ #r2 = (u+v+1)*b
319
+ #T2 = [c1, s, z, 2], [1, -1, -v, v], [], [-v+1], [], [-v+1], w
320
+ #T3 = [-c2, s, z, 2], [1, -1, v, -v], [], [v+1], [], [v+1], w
321
+ #T2 = [c1, s, z, 2], [1, -1, -v, v+u-1], [r1, r2], [-v+1], [], [-v+1], w
322
+ #T3 = [-c2, s, z, 2], [1, -1, v, -v+u-1], [r1, r2], [v+1], [], [v+1], w
323
+ return T1, T2, T3
324
+ return ctx.hypercomb(h, [u,v], **kwargs)
325
+
326
+ @defun
327
+ def ber(ctx, n, z, **kwargs):
328
+ n = ctx.convert(n)
329
+ z = ctx.convert(z)
330
+ # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBer2/26/01/02/0001/
331
+ def h(n):
332
+ r = -(z/4)**4
333
+ cos, sin = ctx.cospi_sinpi(-0.75*n)
334
+ T1 = [cos, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r
335
+ T2 = [sin, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r
336
+ return T1, T2
337
+ return ctx.hypercomb(h, [n], **kwargs)
338
+
339
+ @defun
340
+ def bei(ctx, n, z, **kwargs):
341
+ n = ctx.convert(n)
342
+ z = ctx.convert(z)
343
+ # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBei2/26/01/02/0001/
344
+ def h(n):
345
+ r = -(z/4)**4
346
+ cos, sin = ctx.cospi_sinpi(0.75*n)
347
+ T1 = [cos, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r
348
+ T2 = [sin, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r
349
+ return T1, T2
350
+ return ctx.hypercomb(h, [n], **kwargs)
351
+
352
+ @defun
353
+ def ker(ctx, n, z, **kwargs):
354
+ n = ctx.convert(n)
355
+ z = ctx.convert(z)
356
+ # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKer2/26/01/02/0001/
357
+ def h(n):
358
+ r = -(z/4)**4
359
+ cos1, sin1 = ctx.cospi_sinpi(0.25*n)
360
+ cos2, sin2 = ctx.cospi_sinpi(0.75*n)
361
+ T1 = [2, z, 4*cos1], [-n-3, n, 1], [-n], [], [], [0.5, 0.5*(1+n), 0.5*(n+2)], r
362
+ T2 = [2, z, -sin1], [-n-3, 2+n, 1], [-n-1], [], [], [1.5, 0.5*(3+n), 0.5*(n+2)], r
363
+ T3 = [2, z, 4*cos2], [n-3, -n, 1], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r
364
+ T4 = [2, z, -sin2], [n-3, 2-n, 1], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r
365
+ return T1, T2, T3, T4
366
+ return ctx.hypercomb(h, [n], **kwargs)
367
+
368
+ @defun
369
+ def kei(ctx, n, z, **kwargs):
370
+ n = ctx.convert(n)
371
+ z = ctx.convert(z)
372
+ # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKei2/26/01/02/0001/
373
+ def h(n):
374
+ r = -(z/4)**4
375
+ cos1, sin1 = ctx.cospi_sinpi(0.75*n)
376
+ cos2, sin2 = ctx.cospi_sinpi(0.25*n)
377
+ T1 = [-cos1, 2, z], [1, n-3, 2-n], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r
378
+ T2 = [-sin1, 2, z], [1, n-1, -n], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r
379
+ T3 = [-sin2, 2, z], [1, -n-1, n], [-n], [], [], [0.5, 0.5*(n+1), 0.5*(n+2)], r
380
+ T4 = [-cos2, 2, z], [1, -n-3, n+2], [-n-1], [], [], [1.5, 0.5*(n+3), 0.5*(n+2)], r
381
+ return T1, T2, T3, T4
382
+ return ctx.hypercomb(h, [n], **kwargs)
383
+
384
+ # TODO: do this more generically?
385
+ def c_memo(f):
386
+ name = f.__name__
387
+ def f_wrapped(ctx):
388
+ cache = ctx._misc_const_cache
389
+ prec = ctx.prec
390
+ p,v = cache.get(name, (-1,0))
391
+ if p >= prec:
392
+ return +v
393
+ else:
394
+ cache[name] = (prec, f(ctx))
395
+ return cache[name][1]
396
+ return f_wrapped
397
+
398
+ @c_memo
399
+ def _airyai_C1(ctx):
400
+ return 1 / (ctx.cbrt(9) * ctx.gamma(ctx.mpf(2)/3))
401
+
402
+ @c_memo
403
+ def _airyai_C2(ctx):
404
+ return -1 / (ctx.cbrt(3) * ctx.gamma(ctx.mpf(1)/3))
405
+
406
+ @c_memo
407
+ def _airybi_C1(ctx):
408
+ return 1 / (ctx.nthroot(3,6) * ctx.gamma(ctx.mpf(2)/3))
409
+
410
+ @c_memo
411
+ def _airybi_C2(ctx):
412
+ return ctx.nthroot(3,6) / ctx.gamma(ctx.mpf(1)/3)
413
+
414
+ def _airybi_n2_inf(ctx):
415
+ prec = ctx.prec
416
+ try:
417
+ v = ctx.power(3,'2/3')*ctx.gamma('2/3')/(2*ctx.pi)
418
+ finally:
419
+ ctx.prec = prec
420
+ return +v
421
+
422
+ # Derivatives at z = 0
423
+ # TODO: could be expressed more elegantly using triple factorials
424
+ def _airyderiv_0(ctx, z, n, ntype, which):
425
+ if ntype == 'Z':
426
+ if n < 0:
427
+ return z
428
+ r = ctx.mpq_1_3
429
+ prec = ctx.prec
430
+ try:
431
+ ctx.prec += 10
432
+ v = ctx.gamma((n+1)*r) * ctx.power(3,n*r) / ctx.pi
433
+ if which == 0:
434
+ v *= ctx.sinpi(2*(n+1)*r)
435
+ v /= ctx.power(3,'2/3')
436
+ else:
437
+ v *= abs(ctx.sinpi(2*(n+1)*r))
438
+ v /= ctx.power(3,'1/6')
439
+ finally:
440
+ ctx.prec = prec
441
+ return +v + z
442
+ else:
443
+ # singular (does the limit exist?)
444
+ raise NotImplementedError
445
+
446
+ @defun
447
+ def airyai(ctx, z, derivative=0, **kwargs):
448
+ z = ctx.convert(z)
449
+ if derivative:
450
+ n, ntype = ctx._convert_param(derivative)
451
+ else:
452
+ n = 0
453
+ # Values at infinities
454
+ if not ctx.isnormal(z) and z:
455
+ if n and ntype == 'Z':
456
+ if n == -1:
457
+ if z == ctx.inf:
458
+ return ctx.mpf(1)/3 + 1/z
459
+ if z == ctx.ninf:
460
+ return ctx.mpf(-2)/3 + 1/z
461
+ if n < -1:
462
+ if z == ctx.inf:
463
+ return z
464
+ if z == ctx.ninf:
465
+ return (-1)**n * (-z)
466
+ if (not n) and z == ctx.inf or z == ctx.ninf:
467
+ return 1/z
468
+ # TODO: limits
469
+ raise ValueError("essential singularity of Ai(z)")
470
+ # Account for exponential scaling
471
+ if z:
472
+ extraprec = max(0, int(1.5*ctx.mag(z)))
473
+ else:
474
+ extraprec = 0
475
+ if n:
476
+ if n == 1:
477
+ def h():
478
+ # http://functions.wolfram.com/03.07.06.0005.01
479
+ if ctx._re(z) > 4:
480
+ ctx.prec += extraprec
481
+ w = z**1.5; r = -0.75/w; u = -2*w/3
482
+ ctx.prec -= extraprec
483
+ C = -ctx.exp(u)/(2*ctx.sqrt(ctx.pi))*ctx.nthroot(z,4)
484
+ return ([C],[1],[],[],[(-1,6),(7,6)],[],r),
485
+ # http://functions.wolfram.com/03.07.26.0001.01
486
+ else:
487
+ ctx.prec += extraprec
488
+ w = z**3 / 9
489
+ ctx.prec -= extraprec
490
+ C1 = _airyai_C1(ctx) * 0.5
491
+ C2 = _airyai_C2(ctx)
492
+ T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w
493
+ T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w
494
+ return T1, T2
495
+ return ctx.hypercomb(h, [], **kwargs)
496
+ else:
497
+ if z == 0:
498
+ return _airyderiv_0(ctx, z, n, ntype, 0)
499
+ # http://functions.wolfram.com/03.05.20.0004.01
500
+ def h(n):
501
+ ctx.prec += extraprec
502
+ w = z**3/9
503
+ ctx.prec -= extraprec
504
+ q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3
505
+ a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13
506
+ T1 = [3, z], [n-q23, -n], [a1], [b1,b2,b3], \
507
+ [a1,a2], [b1,b2,b3], w
508
+ a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13
509
+ T2 = [3, z, -z], [n-q43, -n, 1], [a1], [b1,b2,b3], \
510
+ [a1,a2], [b1,b2,b3], w
511
+ return T1, T2
512
+ v = ctx.hypercomb(h, [n], **kwargs)
513
+ if ctx._is_real_type(z) and ctx.isint(n):
514
+ v = ctx._re(v)
515
+ return v
516
+ else:
517
+ def h():
518
+ if ctx._re(z) > 4:
519
+ # We could use 1F1, but it results in huge cancellation;
520
+ # the following expansion is better.
521
+ # TODO: asymptotic series for derivatives
522
+ ctx.prec += extraprec
523
+ w = z**1.5; r = -0.75/w; u = -2*w/3
524
+ ctx.prec -= extraprec
525
+ C = ctx.exp(u)/(2*ctx.sqrt(ctx.pi)*ctx.nthroot(z,4))
526
+ return ([C],[1],[],[],[(1,6),(5,6)],[],r),
527
+ else:
528
+ ctx.prec += extraprec
529
+ w = z**3 / 9
530
+ ctx.prec -= extraprec
531
+ C1 = _airyai_C1(ctx)
532
+ C2 = _airyai_C2(ctx)
533
+ T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w
534
+ T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w
535
+ return T1, T2
536
+ return ctx.hypercomb(h, [], **kwargs)
537
+
538
+ @defun
539
+ def airybi(ctx, z, derivative=0, **kwargs):
540
+ z = ctx.convert(z)
541
+ if derivative:
542
+ n, ntype = ctx._convert_param(derivative)
543
+ else:
544
+ n = 0
545
+ # Values at infinities
546
+ if not ctx.isnormal(z) and z:
547
+ if n and ntype == 'Z':
548
+ if z == ctx.inf:
549
+ return z
550
+ if z == ctx.ninf:
551
+ if n == -1:
552
+ return 1/z
553
+ if n == -2:
554
+ return _airybi_n2_inf(ctx)
555
+ if n < -2:
556
+ return (-1)**n * (-z)
557
+ if not n:
558
+ if z == ctx.inf:
559
+ return z
560
+ if z == ctx.ninf:
561
+ return 1/z
562
+ # TODO: limits
563
+ raise ValueError("essential singularity of Bi(z)")
564
+ if z:
565
+ extraprec = max(0, int(1.5*ctx.mag(z)))
566
+ else:
567
+ extraprec = 0
568
+ if n:
569
+ if n == 1:
570
+ # http://functions.wolfram.com/03.08.26.0001.01
571
+ def h():
572
+ ctx.prec += extraprec
573
+ w = z**3 / 9
574
+ ctx.prec -= extraprec
575
+ C1 = _airybi_C1(ctx)*0.5
576
+ C2 = _airybi_C2(ctx)
577
+ T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w
578
+ T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w
579
+ return T1, T2
580
+ return ctx.hypercomb(h, [], **kwargs)
581
+ else:
582
+ if z == 0:
583
+ return _airyderiv_0(ctx, z, n, ntype, 1)
584
+ def h(n):
585
+ ctx.prec += extraprec
586
+ w = z**3/9
587
+ ctx.prec -= extraprec
588
+ q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3
589
+ q16 = ctx.mpq_1_6
590
+ q56 = ctx.mpq_5_6
591
+ a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13
592
+ T1 = [3, z], [n-q16, -n], [a1], [b1,b2,b3], \
593
+ [a1,a2], [b1,b2,b3], w
594
+ a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13
595
+ T2 = [3, z], [n-q56, 1-n], [a1], [b1,b2,b3], \
596
+ [a1,a2], [b1,b2,b3], w
597
+ return T1, T2
598
+ v = ctx.hypercomb(h, [n], **kwargs)
599
+ if ctx._is_real_type(z) and ctx.isint(n):
600
+ v = ctx._re(v)
601
+ return v
602
+ else:
603
+ def h():
604
+ ctx.prec += extraprec
605
+ w = z**3 / 9
606
+ ctx.prec -= extraprec
607
+ C1 = _airybi_C1(ctx)
608
+ C2 = _airybi_C2(ctx)
609
+ T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w
610
+ T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w
611
+ return T1, T2
612
+ return ctx.hypercomb(h, [], **kwargs)
613
+
614
+ def _airy_zero(ctx, which, k, derivative, complex=False):
615
+ # Asymptotic formulas are given in DLMF section 9.9
616
+ def U(t): return t**(2/3.)*(1-7/(t**2*48))
617
+ def T(t): return t**(2/3.)*(1+5/(t**2*48))
618
+ k = int(k)
619
+ if k < 1:
620
+ raise ValueError("k cannot be less than 1")
621
+ if not derivative in (0,1):
622
+ raise ValueError("Derivative should lie between 0 and 1")
623
+ if which == 0:
624
+ if derivative:
625
+ return ctx.findroot(lambda z: ctx.airyai(z,1),
626
+ -U(3*ctx.pi*(4*k-3)/8))
627
+ return ctx.findroot(ctx.airyai, -T(3*ctx.pi*(4*k-1)/8))
628
+ if which == 1 and complex == False:
629
+ if derivative:
630
+ return ctx.findroot(lambda z: ctx.airybi(z,1),
631
+ -U(3*ctx.pi*(4*k-1)/8))
632
+ return ctx.findroot(ctx.airybi, -T(3*ctx.pi*(4*k-3)/8))
633
+ if which == 1 and complex == True:
634
+ if derivative:
635
+ t = 3*ctx.pi*(4*k-3)/8 + 0.75j*ctx.ln2
636
+ s = ctx.expjpi(ctx.mpf(1)/3) * T(t)
637
+ return ctx.findroot(lambda z: ctx.airybi(z,1), s)
638
+ t = 3*ctx.pi*(4*k-1)/8 + 0.75j*ctx.ln2
639
+ s = ctx.expjpi(ctx.mpf(1)/3) * U(t)
640
+ return ctx.findroot(ctx.airybi, s)
641
+
642
+ @defun
643
+ def airyaizero(ctx, k, derivative=0):
644
+ return _airy_zero(ctx, 0, k, derivative, False)
645
+
646
+ @defun
647
+ def airybizero(ctx, k, derivative=0, complex=False):
648
+ return _airy_zero(ctx, 1, k, derivative, complex)
649
+
650
+ def _scorer(ctx, z, which, kwargs):
651
+ z = ctx.convert(z)
652
+ if ctx.isinf(z):
653
+ if z == ctx.inf:
654
+ if which == 0: return 1/z
655
+ if which == 1: return z
656
+ if z == ctx.ninf:
657
+ return 1/z
658
+ raise ValueError("essential singularity")
659
+ if z:
660
+ extraprec = max(0, int(1.5*ctx.mag(z)))
661
+ else:
662
+ extraprec = 0
663
+ if kwargs.get('derivative'):
664
+ raise NotImplementedError
665
+ # Direct asymptotic expansions, to avoid
666
+ # exponentially large cancellation
667
+ try:
668
+ if ctx.mag(z) > 3:
669
+ if which == 0 and abs(ctx.arg(z)) < ctx.pi/3 * 0.999:
670
+ def h():
671
+ return (([ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),)
672
+ return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True)
673
+ if which == 1 and abs(ctx.arg(-z)) < 2*ctx.pi/3 * 0.999:
674
+ def h():
675
+ return (([-ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),)
676
+ return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True)
677
+ except ctx.NoConvergence:
678
+ pass
679
+ def h():
680
+ A = ctx.airybi(z, **kwargs)/3
681
+ B = -2*ctx.pi
682
+ if which == 1:
683
+ A *= 2
684
+ B *= -1
685
+ ctx.prec += extraprec
686
+ w = z**3/9
687
+ ctx.prec -= extraprec
688
+ T1 = [A], [1], [], [], [], [], 0
689
+ T2 = [B,z], [-1,2], [], [], [1], [ctx.mpq_4_3,ctx.mpq_5_3], w
690
+ return T1, T2
691
+ return ctx.hypercomb(h, [], **kwargs)
692
+
693
+ @defun
694
+ def scorergi(ctx, z, **kwargs):
695
+ return _scorer(ctx, z, 0, kwargs)
696
+
697
+ @defun
698
+ def scorerhi(ctx, z, **kwargs):
699
+ return _scorer(ctx, z, 1, kwargs)
700
+
701
+ @defun_wrapped
702
+ def coulombc(ctx, l, eta, _cache={}):
703
+ if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec:
704
+ return +_cache[l,eta][1]
705
+ G3 = ctx.loggamma(2*l+2)
706
+ G1 = ctx.loggamma(1+l+ctx.j*eta)
707
+ G2 = ctx.loggamma(1+l-ctx.j*eta)
708
+ v = 2**l * ctx.exp((-ctx.pi*eta+G1+G2)/2 - G3)
709
+ if not (ctx.im(l) or ctx.im(eta)):
710
+ v = ctx.re(v)
711
+ _cache[l,eta] = (ctx.prec, v)
712
+ return v
713
+
714
+ @defun_wrapped
715
+ def coulombf(ctx, l, eta, z, w=1, chop=True, **kwargs):
716
+ # Regular Coulomb wave function
717
+ # Note: w can be either 1 or -1; the other may be better in some cases
718
+ # TODO: check that chop=True chops when and only when it should
719
+ #ctx.prec += 10
720
+ def h(l, eta):
721
+ try:
722
+ jw = ctx.j*w
723
+ jwz = ctx.fmul(jw, z, exact=True)
724
+ jwz2 = ctx.fmul(jwz, -2, exact=True)
725
+ C = ctx.coulombc(l, eta)
726
+ T1 = [C, z, ctx.exp(jwz)], [1, l+1, 1], [], [], [1+l+jw*eta], \
727
+ [2*l+2], jwz2
728
+ except ValueError:
729
+ T1 = [0], [-1], [], [], [], [], 0
730
+ return (T1,)
731
+ v = ctx.hypercomb(h, [l,eta], **kwargs)
732
+ if chop and (not ctx.im(l)) and (not ctx.im(eta)) and (not ctx.im(z)) and \
733
+ (ctx.re(z) >= 0):
734
+ v = ctx.re(v)
735
+ return v
736
+
737
+ @defun_wrapped
738
+ def _coulomb_chi(ctx, l, eta, _cache={}):
739
+ if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec:
740
+ return _cache[l,eta][1]
741
+ def terms():
742
+ l2 = -l-1
743
+ jeta = ctx.j*eta
744
+ return [ctx.loggamma(1+l+jeta) * (-0.5j),
745
+ ctx.loggamma(1+l-jeta) * (0.5j),
746
+ ctx.loggamma(1+l2+jeta) * (0.5j),
747
+ ctx.loggamma(1+l2-jeta) * (-0.5j),
748
+ -(l+0.5)*ctx.pi]
749
+ v = ctx.sum_accurately(terms, 1)
750
+ _cache[l,eta] = (ctx.prec, v)
751
+ return v
752
+
753
+ @defun_wrapped
754
+ def coulombg(ctx, l, eta, z, w=1, chop=True, **kwargs):
755
+ # Irregular Coulomb wave function
756
+ # Note: w can be either 1 or -1; the other may be better in some cases
757
+ # TODO: check that chop=True chops when and only when it should
758
+ if not ctx._im(l):
759
+ l = ctx._re(l) # XXX: for isint
760
+ def h(l, eta):
761
+ # Force perturbation for integers and half-integers
762
+ if ctx.isint(l*2):
763
+ T1 = [0], [-1], [], [], [], [], 0
764
+ return (T1,)
765
+ l2 = -l-1
766
+ try:
767
+ chi = ctx._coulomb_chi(l, eta)
768
+ jw = ctx.j*w
769
+ s = ctx.sin(chi); c = ctx.cos(chi)
770
+ C1 = ctx.coulombc(l,eta)
771
+ C2 = ctx.coulombc(l2,eta)
772
+ u = ctx.exp(jw*z)
773
+ x = -2*jw*z
774
+ T1 = [s, C1, z, u, c], [-1, 1, l+1, 1, 1], [], [], \
775
+ [1+l+jw*eta], [2*l+2], x
776
+ T2 = [-s, C2, z, u], [-1, 1, l2+1, 1], [], [], \
777
+ [1+l2+jw*eta], [2*l2+2], x
778
+ return T1, T2
779
+ except ValueError:
780
+ T1 = [0], [-1], [], [], [], [], 0
781
+ return (T1,)
782
+ v = ctx.hypercomb(h, [l,eta], **kwargs)
783
+ if chop and (not ctx._im(l)) and (not ctx._im(eta)) and (not ctx._im(z)) and \
784
+ (ctx._re(z) >= 0):
785
+ v = ctx._re(v)
786
+ return v
787
+
788
+ def mcmahon(ctx,kind,prime,v,m):
789
+ """
790
+ Computes an estimate for the location of the Bessel function zero
791
+ j_{v,m}, y_{v,m}, j'_{v,m} or y'_{v,m} using McMahon's asymptotic
792
+ expansion (Abramowitz & Stegun 9.5.12-13, DLMF 20.21(vi)).
793
+
794
+ Returns (r,err) where r is the estimated location of the root
795
+ and err is a positive number estimating the error of the
796
+ asymptotic expansion.
797
+ """
798
+ u = 4*v**2
799
+ if kind == 1 and not prime: b = (4*m+2*v-1)*ctx.pi/4
800
+ if kind == 2 and not prime: b = (4*m+2*v-3)*ctx.pi/4
801
+ if kind == 1 and prime: b = (4*m+2*v-3)*ctx.pi/4
802
+ if kind == 2 and prime: b = (4*m+2*v-1)*ctx.pi/4
803
+ if not prime:
804
+ s1 = b
805
+ s2 = -(u-1)/(8*b)
806
+ s3 = -4*(u-1)*(7*u-31)/(3*(8*b)**3)
807
+ s4 = -32*(u-1)*(83*u**2-982*u+3779)/(15*(8*b)**5)
808
+ s5 = -64*(u-1)*(6949*u**3-153855*u**2+1585743*u-6277237)/(105*(8*b)**7)
809
+ if prime:
810
+ s1 = b
811
+ s2 = -(u+3)/(8*b)
812
+ s3 = -4*(7*u**2+82*u-9)/(3*(8*b)**3)
813
+ s4 = -32*(83*u**3+2075*u**2-3039*u+3537)/(15*(8*b)**5)
814
+ s5 = -64*(6949*u**4+296492*u**3-1248002*u**2+7414380*u-5853627)/(105*(8*b)**7)
815
+ terms = [s1,s2,s3,s4,s5]
816
+ s = s1
817
+ err = 0.0
818
+ for i in range(1,len(terms)):
819
+ if abs(terms[i]) < abs(terms[i-1]):
820
+ s += terms[i]
821
+ else:
822
+ err = abs(terms[i])
823
+ if i == len(terms)-1:
824
+ err = abs(terms[-1])
825
+ return s, err
826
+
827
+ def generalized_bisection(ctx,f,a,b,n):
828
+ """
829
+ Given f known to have exactly n simple roots within [a,b],
830
+ return a list of n intervals isolating the roots
831
+ and having opposite signs at the endpoints.
832
+
833
+ TODO: this can be optimized, e.g. by reusing evaluation points.
834
+ """
835
+ if n < 1:
836
+ raise ValueError("n cannot be less than 1")
837
+ N = n+1
838
+ points = []
839
+ signs = []
840
+ while 1:
841
+ points = ctx.linspace(a,b,N)
842
+ signs = [ctx.sign(f(x)) for x in points]
843
+ ok_intervals = [(points[i],points[i+1]) for i in range(N-1) \
844
+ if signs[i]*signs[i+1] == -1]
845
+ if len(ok_intervals) == n:
846
+ return ok_intervals
847
+ N = N*2
848
+
849
+ def find_in_interval(ctx, f, ab):
850
+ return ctx.findroot(f, ab, solver='illinois', verify=False)
851
+
852
+ def bessel_zero(ctx, kind, prime, v, m, isoltol=0.01, _interval_cache={}):
853
+ prec = ctx.prec
854
+ workprec = max(prec, ctx.mag(v), ctx.mag(m))+10
855
+ try:
856
+ ctx.prec = workprec
857
+ v = ctx.mpf(v)
858
+ m = int(m)
859
+ prime = int(prime)
860
+ if v < 0:
861
+ raise ValueError("v cannot be negative")
862
+ if m < 1:
863
+ raise ValueError("m cannot be less than 1")
864
+ if not prime in (0,1):
865
+ raise ValueError("prime should lie between 0 and 1")
866
+ if kind == 1:
867
+ if prime: f = lambda x: ctx.besselj(v,x,derivative=1)
868
+ else: f = lambda x: ctx.besselj(v,x)
869
+ if kind == 2:
870
+ if prime: f = lambda x: ctx.bessely(v,x,derivative=1)
871
+ else: f = lambda x: ctx.bessely(v,x)
872
+ # The first root of J' is very close to 0 for small
873
+ # orders, and this needs to be special-cased
874
+ if kind == 1 and prime and m == 1:
875
+ if v == 0:
876
+ return ctx.zero
877
+ if v <= 1:
878
+ # TODO: use v <= j'_{v,1} < y_{v,1}?
879
+ r = 2*ctx.sqrt(v*(1+v)/(v+2))
880
+ return find_in_interval(ctx, f, (r/10, 2*r))
881
+ if (kind,prime,v,m) in _interval_cache:
882
+ return find_in_interval(ctx, f, _interval_cache[kind,prime,v,m])
883
+ r, err = mcmahon(ctx, kind, prime, v, m)
884
+ if err < isoltol:
885
+ return find_in_interval(ctx, f, (r-isoltol, r+isoltol))
886
+ # An x such that 0 < x < r_{v,1}
887
+ if kind == 1 and not prime: low = 2.4
888
+ if kind == 1 and prime: low = 1.8
889
+ if kind == 2 and not prime: low = 0.8
890
+ if kind == 2 and prime: low = 2.0
891
+ n = m+1
892
+ while 1:
893
+ r1, err = mcmahon(ctx, kind, prime, v, n)
894
+ if err < isoltol:
895
+ r2, err2 = mcmahon(ctx, kind, prime, v, n+1)
896
+ intervals = generalized_bisection(ctx, f, low, 0.5*(r1+r2), n)
897
+ for k, ab in enumerate(intervals):
898
+ _interval_cache[kind,prime,v,k+1] = ab
899
+ return find_in_interval(ctx, f, intervals[m-1])
900
+ else:
901
+ n = n*2
902
+ finally:
903
+ ctx.prec = prec
904
+
905
+ @defun
906
+ def besseljzero(ctx, v, m, derivative=0):
907
+ r"""
908
+ For a real order `\nu \ge 0` and a positive integer `m`, returns
909
+ `j_{\nu,m}`, the `m`-th positive zero of the Bessel function of the
910
+ first kind `J_{\nu}(z)` (see :func:`~mpmath.besselj`). Alternatively,
911
+ with *derivative=1*, gives the first nonnegative simple zero
912
+ `j'_{\nu,m}` of `J'_{\nu}(z)`.
913
+
914
+ The indexing convention is that used by Abramowitz & Stegun
915
+ and the DLMF. Note the special case `j'_{0,1} = 0`, while all other
916
+ zeros are positive. In effect, only simple zeros are counted
917
+ (all zeros of Bessel functions are simple except possibly `z = 0`)
918
+ and `j_{\nu,m}` becomes a monotonic function of both `\nu`
919
+ and `m`.
920
+
921
+ The zeros are interlaced according to the inequalities
922
+
923
+ .. math ::
924
+
925
+ j'_{\nu,k} < j_{\nu,k} < j'_{\nu,k+1}
926
+
927
+ j_{\nu,1} < j_{\nu+1,2} < j_{\nu,2} < j_{\nu+1,2} < j_{\nu,3} < \cdots
928
+
929
+ **Examples**
930
+
931
+ Initial zeros of the Bessel functions `J_0(z), J_1(z), J_2(z)`::
932
+
933
+ >>> from mpmath import *
934
+ >>> mp.dps = 25; mp.pretty = True
935
+ >>> besseljzero(0,1); besseljzero(0,2); besseljzero(0,3)
936
+ 2.404825557695772768621632
937
+ 5.520078110286310649596604
938
+ 8.653727912911012216954199
939
+ >>> besseljzero(1,1); besseljzero(1,2); besseljzero(1,3)
940
+ 3.831705970207512315614436
941
+ 7.01558666981561875353705
942
+ 10.17346813506272207718571
943
+ >>> besseljzero(2,1); besseljzero(2,2); besseljzero(2,3)
944
+ 5.135622301840682556301402
945
+ 8.417244140399864857783614
946
+ 11.61984117214905942709415
947
+
948
+ Initial zeros of `J'_0(z), J'_1(z), J'_2(z)`::
949
+
950
+ 0.0
951
+ 3.831705970207512315614436
952
+ 7.01558666981561875353705
953
+ >>> besseljzero(1,1,1); besseljzero(1,2,1); besseljzero(1,3,1)
954
+ 1.84118378134065930264363
955
+ 5.331442773525032636884016
956
+ 8.536316366346285834358961
957
+ >>> besseljzero(2,1,1); besseljzero(2,2,1); besseljzero(2,3,1)
958
+ 3.054236928227140322755932
959
+ 6.706133194158459146634394
960
+ 9.969467823087595793179143
961
+
962
+ Zeros with large index::
963
+
964
+ >>> besseljzero(0,100); besseljzero(0,1000); besseljzero(0,10000)
965
+ 313.3742660775278447196902
966
+ 3140.807295225078628895545
967
+ 31415.14114171350798533666
968
+ >>> besseljzero(5,100); besseljzero(5,1000); besseljzero(5,10000)
969
+ 321.1893195676003157339222
970
+ 3148.657306813047523500494
971
+ 31422.9947255486291798943
972
+ >>> besseljzero(0,100,1); besseljzero(0,1000,1); besseljzero(0,10000,1)
973
+ 311.8018681873704508125112
974
+ 3139.236339643802482833973
975
+ 31413.57032947022399485808
976
+
977
+ Zeros of functions with large order::
978
+
979
+ >>> besseljzero(50,1)
980
+ 57.11689916011917411936228
981
+ >>> besseljzero(50,2)
982
+ 62.80769876483536093435393
983
+ >>> besseljzero(50,100)
984
+ 388.6936600656058834640981
985
+ >>> besseljzero(50,1,1)
986
+ 52.99764038731665010944037
987
+ >>> besseljzero(50,2,1)
988
+ 60.02631933279942589882363
989
+ >>> besseljzero(50,100,1)
990
+ 387.1083151608726181086283
991
+
992
+ Zeros of functions with fractional order::
993
+
994
+ >>> besseljzero(0.5,1); besseljzero(1.5,1); besseljzero(2.25,4)
995
+ 3.141592653589793238462643
996
+ 4.493409457909064175307881
997
+ 15.15657692957458622921634
998
+
999
+ Both `J_{\nu}(z)` and `J'_{\nu}(z)` can be expressed as infinite
1000
+ products over their zeros::
1001
+
1002
+ >>> v,z = 2, mpf(1)
1003
+ >>> (z/2)**v/gamma(v+1) * \
1004
+ ... nprod(lambda k: 1-(z/besseljzero(v,k))**2, [1,inf])
1005
+ ...
1006
+ 0.1149034849319004804696469
1007
+ >>> besselj(v,z)
1008
+ 0.1149034849319004804696469
1009
+ >>> (z/2)**(v-1)/2/gamma(v) * \
1010
+ ... nprod(lambda k: 1-(z/besseljzero(v,k,1))**2, [1,inf])
1011
+ ...
1012
+ 0.2102436158811325550203884
1013
+ >>> besselj(v,z,1)
1014
+ 0.2102436158811325550203884
1015
+
1016
+ """
1017
+ return +bessel_zero(ctx, 1, derivative, v, m)
1018
+
1019
+ @defun
1020
+ def besselyzero(ctx, v, m, derivative=0):
1021
+ r"""
1022
+ For a real order `\nu \ge 0` and a positive integer `m`, returns
1023
+ `y_{\nu,m}`, the `m`-th positive zero of the Bessel function of the
1024
+ second kind `Y_{\nu}(z)` (see :func:`~mpmath.bessely`). Alternatively,
1025
+ with *derivative=1*, gives the first positive zero `y'_{\nu,m}` of
1026
+ `Y'_{\nu}(z)`.
1027
+
1028
+ The zeros are interlaced according to the inequalities
1029
+
1030
+ .. math ::
1031
+
1032
+ y_{\nu,k} < y'_{\nu,k} < y_{\nu,k+1}
1033
+
1034
+ y_{\nu,1} < y_{\nu+1,2} < y_{\nu,2} < y_{\nu+1,2} < y_{\nu,3} < \cdots
1035
+
1036
+ **Examples**
1037
+
1038
+ Initial zeros of the Bessel functions `Y_0(z), Y_1(z), Y_2(z)`::
1039
+
1040
+ >>> from mpmath import *
1041
+ >>> mp.dps = 25; mp.pretty = True
1042
+ >>> besselyzero(0,1); besselyzero(0,2); besselyzero(0,3)
1043
+ 0.8935769662791675215848871
1044
+ 3.957678419314857868375677
1045
+ 7.086051060301772697623625
1046
+ >>> besselyzero(1,1); besselyzero(1,2); besselyzero(1,3)
1047
+ 2.197141326031017035149034
1048
+ 5.429681040794135132772005
1049
+ 8.596005868331168926429606
1050
+ >>> besselyzero(2,1); besselyzero(2,2); besselyzero(2,3)
1051
+ 3.384241767149593472701426
1052
+ 6.793807513268267538291167
1053
+ 10.02347797936003797850539
1054
+
1055
+ Initial zeros of `Y'_0(z), Y'_1(z), Y'_2(z)`::
1056
+
1057
+ >>> besselyzero(0,1,1); besselyzero(0,2,1); besselyzero(0,3,1)
1058
+ 2.197141326031017035149034
1059
+ 5.429681040794135132772005
1060
+ 8.596005868331168926429606
1061
+ >>> besselyzero(1,1,1); besselyzero(1,2,1); besselyzero(1,3,1)
1062
+ 3.683022856585177699898967
1063
+ 6.941499953654175655751944
1064
+ 10.12340465543661307978775
1065
+ >>> besselyzero(2,1,1); besselyzero(2,2,1); besselyzero(2,3,1)
1066
+ 5.002582931446063945200176
1067
+ 8.350724701413079526349714
1068
+ 11.57419546521764654624265
1069
+
1070
+ Zeros with large index::
1071
+
1072
+ >>> besselyzero(0,100); besselyzero(0,1000); besselyzero(0,10000)
1073
+ 311.8034717601871549333419
1074
+ 3139.236498918198006794026
1075
+ 31413.57034538691205229188
1076
+ >>> besselyzero(5,100); besselyzero(5,1000); besselyzero(5,10000)
1077
+ 319.6183338562782156235062
1078
+ 3147.086508524556404473186
1079
+ 31421.42392920214673402828
1080
+ >>> besselyzero(0,100,1); besselyzero(0,1000,1); besselyzero(0,10000,1)
1081
+ 313.3726705426359345050449
1082
+ 3140.807136030340213610065
1083
+ 31415.14112579761578220175
1084
+
1085
+ Zeros of functions with large order::
1086
+
1087
+ >>> besselyzero(50,1)
1088
+ 53.50285882040036394680237
1089
+ >>> besselyzero(50,2)
1090
+ 60.11244442774058114686022
1091
+ >>> besselyzero(50,100)
1092
+ 387.1096509824943957706835
1093
+ >>> besselyzero(50,1,1)
1094
+ 56.96290427516751320063605
1095
+ >>> besselyzero(50,2,1)
1096
+ 62.74888166945933944036623
1097
+ >>> besselyzero(50,100,1)
1098
+ 388.6923300548309258355475
1099
+
1100
+ Zeros of functions with fractional order::
1101
+
1102
+ >>> besselyzero(0.5,1); besselyzero(1.5,1); besselyzero(2.25,4)
1103
+ 1.570796326794896619231322
1104
+ 2.798386045783887136720249
1105
+ 13.56721208770735123376018
1106
+
1107
+ """
1108
+ return +bessel_zero(ctx, 2, derivative, v, m)
lib/python3.11/site-packages/mpmath/functions/elliptic.py ADDED
@@ -0,0 +1,1431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ Elliptic functions historically comprise the elliptic integrals
3
+ and their inverses, and originate from the problem of computing the
4
+ arc length of an ellipse. From a more modern point of view,
5
+ an elliptic function is defined as a doubly periodic function, i.e.
6
+ a function which satisfies
7
+
8
+ .. math ::
9
+
10
+ f(z + 2 \omega_1) = f(z + 2 \omega_2) = f(z)
11
+
12
+ for some half-periods `\omega_1, \omega_2` with
13
+ `\mathrm{Im}[\omega_1 / \omega_2] > 0`. The canonical elliptic
14
+ functions are the Jacobi elliptic functions. More broadly, this section
15
+ includes quasi-doubly periodic functions (such as the Jacobi theta
16
+ functions) and other functions useful in the study of elliptic functions.
17
+
18
+ Many different conventions for the arguments of
19
+ elliptic functions are in use. It is even standard to use
20
+ different parameterizations for different functions in the same
21
+ text or software (and mpmath is no exception).
22
+ The usual parameters are the elliptic nome `q`, which usually
23
+ must satisfy `|q| < 1`; the elliptic parameter `m` (an arbitrary
24
+ complex number); the elliptic modulus `k` (an arbitrary complex
25
+ number); and the half-period ratio `\tau`, which usually must
26
+ satisfy `\mathrm{Im}[\tau] > 0`.
27
+ These quantities can be expressed in terms of each other
28
+ using the following relations:
29
+
30
+ .. math ::
31
+
32
+ m = k^2
33
+
34
+ .. math ::
35
+
36
+ \tau = i \frac{K(1-m)}{K(m)}
37
+
38
+ .. math ::
39
+
40
+ q = e^{i \pi \tau}
41
+
42
+ .. math ::
43
+
44
+ k = \frac{\vartheta_2^2(q)}{\vartheta_3^2(q)}
45
+
46
+ In addition, an alternative definition is used for the nome in
47
+ number theory, which we here denote by q-bar:
48
+
49
+ .. math ::
50
+
51
+ \bar{q} = q^2 = e^{2 i \pi \tau}
52
+
53
+ For convenience, mpmath provides functions to convert
54
+ between the various parameters (:func:`~mpmath.qfrom`, :func:`~mpmath.mfrom`,
55
+ :func:`~mpmath.kfrom`, :func:`~mpmath.taufrom`, :func:`~mpmath.qbarfrom`).
56
+
57
+ **References**
58
+
59
+ 1. [AbramowitzStegun]_
60
+
61
+ 2. [WhittakerWatson]_
62
+
63
+ """
64
+
65
+ from .functions import defun, defun_wrapped
66
+
67
+ @defun_wrapped
68
+ def eta(ctx, tau):
69
+ r"""
70
+ Returns the Dedekind eta function of tau in the upper half-plane.
71
+
72
+ >>> from mpmath import *
73
+ >>> mp.dps = 25; mp.pretty = True
74
+ >>> eta(1j); gamma(0.25) / (2*pi**0.75)
75
+ (0.7682254223260566590025942 + 0.0j)
76
+ 0.7682254223260566590025942
77
+ >>> tau = sqrt(2) + sqrt(5)*1j
78
+ >>> eta(-1/tau); sqrt(-1j*tau) * eta(tau)
79
+ (0.9022859908439376463573294 + 0.07985093673948098408048575j)
80
+ (0.9022859908439376463573295 + 0.07985093673948098408048575j)
81
+ >>> eta(tau+1); exp(pi*1j/12) * eta(tau)
82
+ (0.4493066139717553786223114 + 0.3290014793877986663915939j)
83
+ (0.4493066139717553786223114 + 0.3290014793877986663915939j)
84
+ >>> f = lambda z: diff(eta, z) / eta(z)
85
+ >>> chop(36*diff(f,tau)**2 - 24*diff(f,tau,2)*f(tau) + diff(f,tau,3))
86
+ 0.0
87
+
88
+ """
89
+ if ctx.im(tau) <= 0.0:
90
+ raise ValueError("eta is only defined in the upper half-plane")
91
+ q = ctx.expjpi(tau/12)
92
+ return q * ctx.qp(q**24)
93
+
94
+ def nome(ctx, m):
95
+ m = ctx.convert(m)
96
+ if not m:
97
+ return m
98
+ if m == ctx.one:
99
+ return m
100
+ if ctx.isnan(m):
101
+ return m
102
+ if ctx.isinf(m):
103
+ if m == ctx.ninf:
104
+ return type(m)(-1)
105
+ else:
106
+ return ctx.mpc(-1)
107
+ a = ctx.ellipk(ctx.one-m)
108
+ b = ctx.ellipk(m)
109
+ v = ctx.exp(-ctx.pi*a/b)
110
+ if not ctx._im(m) and ctx._re(m) < 1:
111
+ if ctx._is_real_type(m):
112
+ return v.real
113
+ else:
114
+ return v.real + 0j
115
+ elif m == 2:
116
+ v = ctx.mpc(0, v.imag)
117
+ return v
118
+
119
+ @defun_wrapped
120
+ def qfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
121
+ r"""
122
+ Returns the elliptic nome `q`, given any of `q, m, k, \tau, \bar{q}`::
123
+
124
+ >>> from mpmath import *
125
+ >>> mp.dps = 25; mp.pretty = True
126
+ >>> qfrom(q=0.25)
127
+ 0.25
128
+ >>> qfrom(m=mfrom(q=0.25))
129
+ 0.25
130
+ >>> qfrom(k=kfrom(q=0.25))
131
+ 0.25
132
+ >>> qfrom(tau=taufrom(q=0.25))
133
+ (0.25 + 0.0j)
134
+ >>> qfrom(qbar=qbarfrom(q=0.25))
135
+ 0.25
136
+
137
+ """
138
+ if q is not None:
139
+ return ctx.convert(q)
140
+ if m is not None:
141
+ return nome(ctx, m)
142
+ if k is not None:
143
+ return nome(ctx, ctx.convert(k)**2)
144
+ if tau is not None:
145
+ return ctx.expjpi(tau)
146
+ if qbar is not None:
147
+ return ctx.sqrt(qbar)
148
+
149
+ @defun_wrapped
150
+ def qbarfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
151
+ r"""
152
+ Returns the number-theoretic nome `\bar q`, given any of
153
+ `q, m, k, \tau, \bar{q}`::
154
+
155
+ >>> from mpmath import *
156
+ >>> mp.dps = 25; mp.pretty = True
157
+ >>> qbarfrom(qbar=0.25)
158
+ 0.25
159
+ >>> qbarfrom(q=qfrom(qbar=0.25))
160
+ 0.25
161
+ >>> qbarfrom(m=extraprec(20)(mfrom)(qbar=0.25)) # ill-conditioned
162
+ 0.25
163
+ >>> qbarfrom(k=extraprec(20)(kfrom)(qbar=0.25)) # ill-conditioned
164
+ 0.25
165
+ >>> qbarfrom(tau=taufrom(qbar=0.25))
166
+ (0.25 + 0.0j)
167
+
168
+ """
169
+ if qbar is not None:
170
+ return ctx.convert(qbar)
171
+ if q is not None:
172
+ return ctx.convert(q) ** 2
173
+ if m is not None:
174
+ return nome(ctx, m) ** 2
175
+ if k is not None:
176
+ return nome(ctx, ctx.convert(k)**2) ** 2
177
+ if tau is not None:
178
+ return ctx.expjpi(2*tau)
179
+
180
+ @defun_wrapped
181
+ def taufrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
182
+ r"""
183
+ Returns the elliptic half-period ratio `\tau`, given any of
184
+ `q, m, k, \tau, \bar{q}`::
185
+
186
+ >>> from mpmath import *
187
+ >>> mp.dps = 25; mp.pretty = True
188
+ >>> taufrom(tau=0.5j)
189
+ (0.0 + 0.5j)
190
+ >>> taufrom(q=qfrom(tau=0.5j))
191
+ (0.0 + 0.5j)
192
+ >>> taufrom(m=mfrom(tau=0.5j))
193
+ (0.0 + 0.5j)
194
+ >>> taufrom(k=kfrom(tau=0.5j))
195
+ (0.0 + 0.5j)
196
+ >>> taufrom(qbar=qbarfrom(tau=0.5j))
197
+ (0.0 + 0.5j)
198
+
199
+ """
200
+ if tau is not None:
201
+ return ctx.convert(tau)
202
+ if m is not None:
203
+ m = ctx.convert(m)
204
+ return ctx.j*ctx.ellipk(1-m)/ctx.ellipk(m)
205
+ if k is not None:
206
+ k = ctx.convert(k)
207
+ return ctx.j*ctx.ellipk(1-k**2)/ctx.ellipk(k**2)
208
+ if q is not None:
209
+ return ctx.log(q) / (ctx.pi*ctx.j)
210
+ if qbar is not None:
211
+ qbar = ctx.convert(qbar)
212
+ return ctx.log(qbar) / (2*ctx.pi*ctx.j)
213
+
214
+ @defun_wrapped
215
+ def kfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
216
+ r"""
217
+ Returns the elliptic modulus `k`, given any of
218
+ `q, m, k, \tau, \bar{q}`::
219
+
220
+ >>> from mpmath import *
221
+ >>> mp.dps = 25; mp.pretty = True
222
+ >>> kfrom(k=0.25)
223
+ 0.25
224
+ >>> kfrom(m=mfrom(k=0.25))
225
+ 0.25
226
+ >>> kfrom(q=qfrom(k=0.25))
227
+ 0.25
228
+ >>> kfrom(tau=taufrom(k=0.25))
229
+ (0.25 + 0.0j)
230
+ >>> kfrom(qbar=qbarfrom(k=0.25))
231
+ 0.25
232
+
233
+ As `q \to 1` and `q \to -1`, `k` rapidly approaches
234
+ `1` and `i \infty` respectively::
235
+
236
+ >>> kfrom(q=0.75)
237
+ 0.9999999999999899166471767
238
+ >>> kfrom(q=-0.75)
239
+ (0.0 + 7041781.096692038332790615j)
240
+ >>> kfrom(q=1)
241
+ 1
242
+ >>> kfrom(q=-1)
243
+ (0.0 + +infj)
244
+ """
245
+ if k is not None:
246
+ return ctx.convert(k)
247
+ if m is not None:
248
+ return ctx.sqrt(m)
249
+ if tau is not None:
250
+ q = ctx.expjpi(tau)
251
+ if qbar is not None:
252
+ q = ctx.sqrt(qbar)
253
+ if q == 1:
254
+ return q
255
+ if q == -1:
256
+ return ctx.mpc(0,'inf')
257
+ return (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**2
258
+
259
+ @defun_wrapped
260
+ def mfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
261
+ r"""
262
+ Returns the elliptic parameter `m`, given any of
263
+ `q, m, k, \tau, \bar{q}`::
264
+
265
+ >>> from mpmath import *
266
+ >>> mp.dps = 25; mp.pretty = True
267
+ >>> mfrom(m=0.25)
268
+ 0.25
269
+ >>> mfrom(q=qfrom(m=0.25))
270
+ 0.25
271
+ >>> mfrom(k=kfrom(m=0.25))
272
+ 0.25
273
+ >>> mfrom(tau=taufrom(m=0.25))
274
+ (0.25 + 0.0j)
275
+ >>> mfrom(qbar=qbarfrom(m=0.25))
276
+ 0.25
277
+
278
+ As `q \to 1` and `q \to -1`, `m` rapidly approaches
279
+ `1` and `-\infty` respectively::
280
+
281
+ >>> mfrom(q=0.75)
282
+ 0.9999999999999798332943533
283
+ >>> mfrom(q=-0.75)
284
+ -49586681013729.32611558353
285
+ >>> mfrom(q=1)
286
+ 1.0
287
+ >>> mfrom(q=-1)
288
+ -inf
289
+
290
+ The inverse nome as a function of `q` has an integer
291
+ Taylor series expansion::
292
+
293
+ >>> taylor(lambda q: mfrom(q), 0, 7)
294
+ [0.0, 16.0, -128.0, 704.0, -3072.0, 11488.0, -38400.0, 117632.0]
295
+
296
+ """
297
+ if m is not None:
298
+ return m
299
+ if k is not None:
300
+ return k**2
301
+ if tau is not None:
302
+ q = ctx.expjpi(tau)
303
+ if qbar is not None:
304
+ q = ctx.sqrt(qbar)
305
+ if q == 1:
306
+ return ctx.convert(q)
307
+ if q == -1:
308
+ return q*ctx.inf
309
+ v = (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**4
310
+ if ctx._is_real_type(q) and q < 0:
311
+ v = v.real
312
+ return v
313
+
314
+ jacobi_spec = {
315
+ 'sn' : ([3],[2],[1],[4], 'sin', 'tanh'),
316
+ 'cn' : ([4],[2],[2],[4], 'cos', 'sech'),
317
+ 'dn' : ([4],[3],[3],[4], '1', 'sech'),
318
+ 'ns' : ([2],[3],[4],[1], 'csc', 'coth'),
319
+ 'nc' : ([2],[4],[4],[2], 'sec', 'cosh'),
320
+ 'nd' : ([3],[4],[4],[3], '1', 'cosh'),
321
+ 'sc' : ([3],[4],[1],[2], 'tan', 'sinh'),
322
+ 'sd' : ([3,3],[2,4],[1],[3], 'sin', 'sinh'),
323
+ 'cd' : ([3],[2],[2],[3], 'cos', '1'),
324
+ 'cs' : ([4],[3],[2],[1], 'cot', 'csch'),
325
+ 'dc' : ([2],[3],[3],[2], 'sec', '1'),
326
+ 'ds' : ([2,4],[3,3],[3],[1], 'csc', 'csch'),
327
+ 'cc' : None,
328
+ 'ss' : None,
329
+ 'nn' : None,
330
+ 'dd' : None
331
+ }
332
+
333
+ @defun
334
+ def ellipfun(ctx, kind, u=None, m=None, q=None, k=None, tau=None):
335
+ try:
336
+ S = jacobi_spec[kind]
337
+ except KeyError:
338
+ raise ValueError("First argument must be a two-character string "
339
+ "containing 's', 'c', 'd' or 'n', e.g.: 'sn'")
340
+ if u is None:
341
+ def f(*args, **kwargs):
342
+ return ctx.ellipfun(kind, *args, **kwargs)
343
+ f.__name__ = kind
344
+ return f
345
+ prec = ctx.prec
346
+ try:
347
+ ctx.prec += 10
348
+ u = ctx.convert(u)
349
+ q = ctx.qfrom(m=m, q=q, k=k, tau=tau)
350
+ if S is None:
351
+ v = ctx.one + 0*q*u
352
+ elif q == ctx.zero:
353
+ if S[4] == '1': v = ctx.one
354
+ else: v = getattr(ctx, S[4])(u)
355
+ v += 0*q*u
356
+ elif q == ctx.one:
357
+ if S[5] == '1': v = ctx.one
358
+ else: v = getattr(ctx, S[5])(u)
359
+ v += 0*q*u
360
+ else:
361
+ t = u / ctx.jtheta(3, 0, q)**2
362
+ v = ctx.one
363
+ for a in S[0]: v *= ctx.jtheta(a, 0, q)
364
+ for b in S[1]: v /= ctx.jtheta(b, 0, q)
365
+ for c in S[2]: v *= ctx.jtheta(c, t, q)
366
+ for d in S[3]: v /= ctx.jtheta(d, t, q)
367
+ finally:
368
+ ctx.prec = prec
369
+ return +v
370
+
371
+ @defun_wrapped
372
+ def kleinj(ctx, tau=None, **kwargs):
373
+ r"""
374
+ Evaluates the Klein j-invariant, which is a modular function defined for
375
+ `\tau` in the upper half-plane as
376
+
377
+ .. math ::
378
+
379
+ J(\tau) = \frac{g_2^3(\tau)}{g_2^3(\tau) - 27 g_3^2(\tau)}
380
+
381
+ where `g_2` and `g_3` are the modular invariants of the Weierstrass
382
+ elliptic function,
383
+
384
+ .. math ::
385
+
386
+ g_2(\tau) = 60 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-4}
387
+
388
+ g_3(\tau) = 140 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-6}.
389
+
390
+ An alternative, common notation is that of the j-function
391
+ `j(\tau) = 1728 J(\tau)`.
392
+
393
+ **Plots**
394
+
395
+ .. literalinclude :: /plots/kleinj.py
396
+ .. image :: /plots/kleinj.png
397
+ .. literalinclude :: /plots/kleinj2.py
398
+ .. image :: /plots/kleinj2.png
399
+
400
+ **Examples**
401
+
402
+ Verifying the functional equation `J(\tau) = J(\tau+1) = J(-\tau^{-1})`::
403
+
404
+ >>> from mpmath import *
405
+ >>> mp.dps = 25; mp.pretty = True
406
+ >>> tau = 0.625+0.75*j
407
+ >>> tau = 0.625+0.75*j
408
+ >>> kleinj(tau)
409
+ (-0.1507492166511182267125242 + 0.07595948379084571927228948j)
410
+ >>> kleinj(tau+1)
411
+ (-0.1507492166511182267125242 + 0.07595948379084571927228948j)
412
+ >>> kleinj(-1/tau)
413
+ (-0.1507492166511182267125242 + 0.07595948379084571927228946j)
414
+
415
+ The j-function has a famous Laurent series expansion in terms of the nome
416
+ `\bar{q}`, `j(\tau) = \bar{q}^{-1} + 744 + 196884\bar{q} + \ldots`::
417
+
418
+ >>> mp.dps = 15
419
+ >>> taylor(lambda q: 1728*q*kleinj(qbar=q), 0, 5, singular=True)
420
+ [1.0, 744.0, 196884.0, 21493760.0, 864299970.0, 20245856256.0]
421
+
422
+ The j-function admits exact evaluation at special algebraic points
423
+ related to the Heegner numbers 1, 2, 3, 7, 11, 19, 43, 67, 163::
424
+
425
+ >>> @extraprec(10)
426
+ ... def h(n):
427
+ ... v = (1+sqrt(n)*j)
428
+ ... if n > 2:
429
+ ... v *= 0.5
430
+ ... return v
431
+ ...
432
+ >>> mp.dps = 25
433
+ >>> for n in [1,2,3,7,11,19,43,67,163]:
434
+ ... n, chop(1728*kleinj(h(n)))
435
+ ...
436
+ (1, 1728.0)
437
+ (2, 8000.0)
438
+ (3, 0.0)
439
+ (7, -3375.0)
440
+ (11, -32768.0)
441
+ (19, -884736.0)
442
+ (43, -884736000.0)
443
+ (67, -147197952000.0)
444
+ (163, -262537412640768000.0)
445
+
446
+ Also at other special points, the j-function assumes explicit
447
+ algebraic values, e.g.::
448
+
449
+ >>> chop(1728*kleinj(j*sqrt(5)))
450
+ 1264538.909475140509320227
451
+ >>> identify(cbrt(_)) # note: not simplified
452
+ '((100+sqrt(13520))/2)'
453
+ >>> (50+26*sqrt(5))**3
454
+ 1264538.909475140509320227
455
+
456
+ """
457
+ q = ctx.qfrom(tau=tau, **kwargs)
458
+ t2 = ctx.jtheta(2,0,q)
459
+ t3 = ctx.jtheta(3,0,q)
460
+ t4 = ctx.jtheta(4,0,q)
461
+ P = (t2**8 + t3**8 + t4**8)**3
462
+ Q = 54*(t2*t3*t4)**8
463
+ return P/Q
464
+
465
+
466
+ def RF_calc(ctx, x, y, z, r):
467
+ if y == z: return RC_calc(ctx, x, y, r)
468
+ if x == z: return RC_calc(ctx, y, x, r)
469
+ if x == y: return RC_calc(ctx, z, x, r)
470
+ if not (ctx.isnormal(x) and ctx.isnormal(y) and ctx.isnormal(z)):
471
+ if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z):
472
+ return x*y*z
473
+ if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z):
474
+ return ctx.zero
475
+ xm,ym,zm = x,y,z
476
+ A0 = Am = (x+y+z)/3
477
+ Q = ctx.root(3*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z))
478
+ g = ctx.mpf(0.25)
479
+ pow4 = ctx.one
480
+ while 1:
481
+ xs = ctx.sqrt(xm)
482
+ ys = ctx.sqrt(ym)
483
+ zs = ctx.sqrt(zm)
484
+ lm = xs*ys + xs*zs + ys*zs
485
+ Am1 = (Am+lm)*g
486
+ xm, ym, zm = (xm+lm)*g, (ym+lm)*g, (zm+lm)*g
487
+ if pow4 * Q < abs(Am):
488
+ break
489
+ Am = Am1
490
+ pow4 *= g
491
+ t = pow4/Am
492
+ X = (A0-x)*t
493
+ Y = (A0-y)*t
494
+ Z = -X-Y
495
+ E2 = X*Y-Z**2
496
+ E3 = X*Y*Z
497
+ return ctx.power(Am,-0.5) * (9240-924*E2+385*E2**2+660*E3-630*E2*E3)/9240
498
+
499
+ def RC_calc(ctx, x, y, r, pv=True):
500
+ if not (ctx.isnormal(x) and ctx.isnormal(y)):
501
+ if ctx.isinf(x) or ctx.isinf(y):
502
+ return 1/(x*y)
503
+ if y == 0:
504
+ return ctx.inf
505
+ if x == 0:
506
+ return ctx.pi / ctx.sqrt(y) / 2
507
+ raise ValueError
508
+ # Cauchy principal value
509
+ if pv and ctx._im(y) == 0 and ctx._re(y) < 0:
510
+ return ctx.sqrt(x/(x-y)) * RC_calc(ctx, x-y, -y, r)
511
+ if x == y:
512
+ return 1/ctx.sqrt(x)
513
+ extraprec = 2*max(0,-ctx.mag(x-y)+ctx.mag(x))
514
+ ctx.prec += extraprec
515
+ if ctx._is_real_type(x) and ctx._is_real_type(y):
516
+ x = ctx._re(x)
517
+ y = ctx._re(y)
518
+ a = ctx.sqrt(x/y)
519
+ if x < y:
520
+ b = ctx.sqrt(y-x)
521
+ v = ctx.acos(a)/b
522
+ else:
523
+ b = ctx.sqrt(x-y)
524
+ v = ctx.acosh(a)/b
525
+ else:
526
+ sx = ctx.sqrt(x)
527
+ sy = ctx.sqrt(y)
528
+ v = ctx.acos(sx/sy)/(ctx.sqrt((1-x/y))*sy)
529
+ ctx.prec -= extraprec
530
+ return v
531
+
532
+ def RJ_calc(ctx, x, y, z, p, r, integration):
533
+ """
534
+ With integration == 0, computes RJ only using Carlson's algorithm
535
+ (may be wrong for some values).
536
+ With integration == 1, uses an initial integration to make sure
537
+ Carlson's algorithm is correct.
538
+ With integration == 2, uses only integration.
539
+ """
540
+ if not (ctx.isnormal(x) and ctx.isnormal(y) and \
541
+ ctx.isnormal(z) and ctx.isnormal(p)):
542
+ if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z) or ctx.isnan(p):
543
+ return x*y*z
544
+ if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z) or ctx.isinf(p):
545
+ return ctx.zero
546
+ if not p:
547
+ return ctx.inf
548
+ if (not x) + (not y) + (not z) > 1:
549
+ return ctx.inf
550
+ # Check conditions and fall back on integration for argument
551
+ # reduction if needed. The following conditions might be needlessly
552
+ # restrictive.
553
+ initial_integral = ctx.zero
554
+ if integration >= 1:
555
+ ok = (x.real >= 0 and y.real >= 0 and z.real >= 0 and p.real > 0)
556
+ if not ok:
557
+ if x == p or y == p or z == p:
558
+ ok = True
559
+ if not ok:
560
+ if p.imag != 0 or p.real >= 0:
561
+ if (x.imag == 0 and x.real >= 0 and ctx.conj(y) == z):
562
+ ok = True
563
+ if (y.imag == 0 and y.real >= 0 and ctx.conj(x) == z):
564
+ ok = True
565
+ if (z.imag == 0 and z.real >= 0 and ctx.conj(x) == y):
566
+ ok = True
567
+ if not ok or (integration == 2):
568
+ N = ctx.ceil(-min(x.real, y.real, z.real, p.real)) + 1
569
+ # Integrate around any singularities
570
+ if all((t.imag >= 0 or t.real > 0) for t in [x, y, z, p]):
571
+ margin = ctx.j
572
+ elif all((t.imag < 0 or t.real > 0) for t in [x, y, z, p]):
573
+ margin = -ctx.j
574
+ else:
575
+ margin = 1
576
+ # Go through the upper half-plane, but low enough that any
577
+ # parameter starting in the lower plane doesn't cross the
578
+ # branch cut
579
+ for t in [x, y, z, p]:
580
+ if t.imag >= 0 or t.real > 0:
581
+ continue
582
+ margin = min(margin, abs(t.imag) * 0.5)
583
+ margin *= ctx.j
584
+ N += margin
585
+ F = lambda t: 1/(ctx.sqrt(t+x)*ctx.sqrt(t+y)*ctx.sqrt(t+z)*(t+p))
586
+ if integration == 2:
587
+ return 1.5 * ctx.quadsubdiv(F, [0, N, ctx.inf])
588
+ initial_integral = 1.5 * ctx.quadsubdiv(F, [0, N])
589
+ x += N; y += N; z += N; p += N
590
+ xm,ym,zm,pm = x,y,z,p
591
+ A0 = Am = (x + y + z + 2*p)/5
592
+ delta = (p-x)*(p-y)*(p-z)
593
+ Q = ctx.root(0.25*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z),abs(A0-p))
594
+ g = ctx.mpf(0.25)
595
+ pow4 = ctx.one
596
+ S = 0
597
+ while 1:
598
+ sx = ctx.sqrt(xm)
599
+ sy = ctx.sqrt(ym)
600
+ sz = ctx.sqrt(zm)
601
+ sp = ctx.sqrt(pm)
602
+ lm = sx*sy + sx*sz + sy*sz
603
+ Am1 = (Am+lm)*g
604
+ xm = (xm+lm)*g; ym = (ym+lm)*g; zm = (zm+lm)*g; pm = (pm+lm)*g
605
+ dm = (sp+sx) * (sp+sy) * (sp+sz)
606
+ em = delta * pow4**3 / dm**2
607
+ if pow4 * Q < abs(Am):
608
+ break
609
+ T = RC_calc(ctx, ctx.one, ctx.one+em, r) * pow4 / dm
610
+ S += T
611
+ pow4 *= g
612
+ Am = Am1
613
+ t = pow4 / Am
614
+ X = (A0-x)*t
615
+ Y = (A0-y)*t
616
+ Z = (A0-z)*t
617
+ P = (-X-Y-Z)/2
618
+ E2 = X*Y + X*Z + Y*Z - 3*P**2
619
+ E3 = X*Y*Z + 2*E2*P + 4*P**3
620
+ E4 = (2*X*Y*Z + E2*P + 3*P**3)*P
621
+ E5 = X*Y*Z*P**2
622
+ P = 24024 - 5148*E2 + 2457*E2**2 + 4004*E3 - 4158*E2*E3 - 3276*E4 + 2772*E5
623
+ Q = 24024
624
+ v1 = pow4 * ctx.power(Am, -1.5) * P/Q
625
+ v2 = 6*S
626
+ return initial_integral + v1 + v2
627
+
628
+ @defun
629
+ def elliprf(ctx, x, y, z):
630
+ r"""
631
+ Evaluates the Carlson symmetric elliptic integral of the first kind
632
+
633
+ .. math ::
634
+
635
+ R_F(x,y,z) = \frac{1}{2}
636
+ \int_0^{\infty} \frac{dt}{\sqrt{(t+x)(t+y)(t+z)}}
637
+
638
+ which is defined for `x,y,z \notin (-\infty,0)`, and with
639
+ at most one of `x,y,z` being zero.
640
+
641
+ For real `x,y,z \ge 0`, the principal square root is taken in the integrand.
642
+ For complex `x,y,z`, the principal square root is taken as `t \to \infty`
643
+ and as `t \to 0` non-principal branches are chosen as necessary so as to
644
+ make the integrand continuous.
645
+
646
+ **Examples**
647
+
648
+ Some basic values and limits::
649
+
650
+ >>> from mpmath import *
651
+ >>> mp.dps = 25; mp.pretty = True
652
+ >>> elliprf(0,1,1); pi/2
653
+ 1.570796326794896619231322
654
+ 1.570796326794896619231322
655
+ >>> elliprf(0,1,inf)
656
+ 0.0
657
+ >>> elliprf(1,1,1)
658
+ 1.0
659
+ >>> elliprf(2,2,2)**2
660
+ 0.5
661
+ >>> elliprf(1,0,0); elliprf(0,0,1); elliprf(0,1,0); elliprf(0,0,0)
662
+ +inf
663
+ +inf
664
+ +inf
665
+ +inf
666
+
667
+ Representing complete elliptic integrals in terms of `R_F`::
668
+
669
+ >>> m = mpf(0.75)
670
+ >>> ellipk(m); elliprf(0,1-m,1)
671
+ 2.156515647499643235438675
672
+ 2.156515647499643235438675
673
+ >>> ellipe(m); elliprf(0,1-m,1)-m*elliprd(0,1-m,1)/3
674
+ 1.211056027568459524803563
675
+ 1.211056027568459524803563
676
+
677
+ Some symmetries and argument transformations::
678
+
679
+ >>> x,y,z = 2,3,4
680
+ >>> elliprf(x,y,z); elliprf(y,x,z); elliprf(z,y,x)
681
+ 0.5840828416771517066928492
682
+ 0.5840828416771517066928492
683
+ 0.5840828416771517066928492
684
+ >>> k = mpf(100000)
685
+ >>> elliprf(k*x,k*y,k*z); k**(-0.5) * elliprf(x,y,z)
686
+ 0.001847032121923321253219284
687
+ 0.001847032121923321253219284
688
+ >>> l = sqrt(x*y) + sqrt(y*z) + sqrt(z*x)
689
+ >>> elliprf(x,y,z); 2*elliprf(x+l,y+l,z+l)
690
+ 0.5840828416771517066928492
691
+ 0.5840828416771517066928492
692
+ >>> elliprf((x+l)/4,(y+l)/4,(z+l)/4)
693
+ 0.5840828416771517066928492
694
+
695
+ Comparing with numerical integration::
696
+
697
+ >>> x,y,z = 2,3,4
698
+ >>> elliprf(x,y,z)
699
+ 0.5840828416771517066928492
700
+ >>> f = lambda t: 0.5*((t+x)*(t+y)*(t+z))**(-0.5)
701
+ >>> q = extradps(25)(quad)
702
+ >>> q(f, [0,inf])
703
+ 0.5840828416771517066928492
704
+
705
+ With the following arguments, the square root in the integrand becomes
706
+ discontinuous at `t = 1/2` if the principal branch is used. To obtain
707
+ the right value, `-\sqrt{r}` must be taken instead of `\sqrt{r}`
708
+ on `t \in (0, 1/2)`::
709
+
710
+ >>> x,y,z = j-1,j,0
711
+ >>> elliprf(x,y,z)
712
+ (0.7961258658423391329305694 - 1.213856669836495986430094j)
713
+ >>> -q(f, [0,0.5]) + q(f, [0.5,inf])
714
+ (0.7961258658423391329305694 - 1.213856669836495986430094j)
715
+
716
+ The so-called *first lemniscate constant*, a transcendental number::
717
+
718
+ >>> elliprf(0,1,2)
719
+ 1.31102877714605990523242
720
+ >>> extradps(25)(quad)(lambda t: 1/sqrt(1-t**4), [0,1])
721
+ 1.31102877714605990523242
722
+ >>> gamma('1/4')**2/(4*sqrt(2*pi))
723
+ 1.31102877714605990523242
724
+
725
+ **References**
726
+
727
+ 1. [Carlson]_
728
+ 2. [DLMF]_ Chapter 19. Elliptic Integrals
729
+
730
+ """
731
+ x = ctx.convert(x)
732
+ y = ctx.convert(y)
733
+ z = ctx.convert(z)
734
+ prec = ctx.prec
735
+ try:
736
+ ctx.prec += 20
737
+ tol = ctx.eps * 2**10
738
+ v = RF_calc(ctx, x, y, z, tol)
739
+ finally:
740
+ ctx.prec = prec
741
+ return +v
742
+
743
+ @defun
744
+ def elliprc(ctx, x, y, pv=True):
745
+ r"""
746
+ Evaluates the degenerate Carlson symmetric elliptic integral
747
+ of the first kind
748
+
749
+ .. math ::
750
+
751
+ R_C(x,y) = R_F(x,y,y) =
752
+ \frac{1}{2} \int_0^{\infty} \frac{dt}{(t+y) \sqrt{(t+x)}}.
753
+
754
+ If `y \in (-\infty,0)`, either a value defined by continuity,
755
+ or with *pv=True* the Cauchy principal value, can be computed.
756
+
757
+ If `x \ge 0, y > 0`, the value can be expressed in terms of
758
+ elementary functions as
759
+
760
+ .. math ::
761
+
762
+ R_C(x,y) =
763
+ \begin{cases}
764
+ \dfrac{1}{\sqrt{y-x}}
765
+ \cos^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x < y \\
766
+ \dfrac{1}{\sqrt{y}}, & x = y \\
767
+ \dfrac{1}{\sqrt{x-y}}
768
+ \cosh^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x > y \\
769
+ \end{cases}.
770
+
771
+ **Examples**
772
+
773
+ Some special values and limits::
774
+
775
+ >>> from mpmath import *
776
+ >>> mp.dps = 25; mp.pretty = True
777
+ >>> elliprc(1,2)*4; elliprc(0,1)*2; +pi
778
+ 3.141592653589793238462643
779
+ 3.141592653589793238462643
780
+ 3.141592653589793238462643
781
+ >>> elliprc(1,0)
782
+ +inf
783
+ >>> elliprc(5,5)**2
784
+ 0.2
785
+ >>> elliprc(1,inf); elliprc(inf,1); elliprc(inf,inf)
786
+ 0.0
787
+ 0.0
788
+ 0.0
789
+
790
+ Comparing with the elementary closed-form solution::
791
+
792
+ >>> elliprc('1/3', '1/5'); sqrt(7.5)*acosh(sqrt('5/3'))
793
+ 2.041630778983498390751238
794
+ 2.041630778983498390751238
795
+ >>> elliprc('1/5', '1/3'); sqrt(7.5)*acos(sqrt('3/5'))
796
+ 1.875180765206547065111085
797
+ 1.875180765206547065111085
798
+
799
+ Comparing with numerical integration::
800
+
801
+ >>> q = extradps(25)(quad)
802
+ >>> elliprc(2, -3, pv=True)
803
+ 0.3333969101113672670749334
804
+ >>> elliprc(2, -3, pv=False)
805
+ (0.3333969101113672670749334 + 0.7024814731040726393156375j)
806
+ >>> 0.5*q(lambda t: 1/(sqrt(t+2)*(t-3)), [0,3-j,6,inf])
807
+ (0.3333969101113672670749334 + 0.7024814731040726393156375j)
808
+
809
+ """
810
+ x = ctx.convert(x)
811
+ y = ctx.convert(y)
812
+ prec = ctx.prec
813
+ try:
814
+ ctx.prec += 20
815
+ tol = ctx.eps * 2**10
816
+ v = RC_calc(ctx, x, y, tol, pv)
817
+ finally:
818
+ ctx.prec = prec
819
+ return +v
820
+
821
+ @defun
822
+ def elliprj(ctx, x, y, z, p, integration=1):
823
+ r"""
824
+ Evaluates the Carlson symmetric elliptic integral of the third kind
825
+
826
+ .. math ::
827
+
828
+ R_J(x,y,z,p) = \frac{3}{2}
829
+ \int_0^{\infty} \frac{dt}{(t+p)\sqrt{(t+x)(t+y)(t+z)}}.
830
+
831
+ Like :func:`~mpmath.elliprf`, the branch of the square root in the integrand
832
+ is defined so as to be continuous along the path of integration for
833
+ complex values of the arguments.
834
+
835
+ **Examples**
836
+
837
+ Some values and limits::
838
+
839
+ >>> from mpmath import *
840
+ >>> mp.dps = 25; mp.pretty = True
841
+ >>> elliprj(1,1,1,1)
842
+ 1.0
843
+ >>> elliprj(2,2,2,2); 1/(2*sqrt(2))
844
+ 0.3535533905932737622004222
845
+ 0.3535533905932737622004222
846
+ >>> elliprj(0,1,2,2)
847
+ 1.067937989667395702268688
848
+ >>> 3*(2*gamma('5/4')**2-pi**2/gamma('1/4')**2)/(sqrt(2*pi))
849
+ 1.067937989667395702268688
850
+ >>> elliprj(0,1,1,2); 3*pi*(2-sqrt(2))/4
851
+ 1.380226776765915172432054
852
+ 1.380226776765915172432054
853
+ >>> elliprj(1,3,2,0); elliprj(0,1,1,0); elliprj(0,0,0,0)
854
+ +inf
855
+ +inf
856
+ +inf
857
+ >>> elliprj(1,inf,1,0); elliprj(1,1,1,inf)
858
+ 0.0
859
+ 0.0
860
+ >>> chop(elliprj(1+j, 1-j, 1, 1))
861
+ 0.8505007163686739432927844
862
+
863
+ Scale transformation::
864
+
865
+ >>> x,y,z,p = 2,3,4,5
866
+ >>> k = mpf(100000)
867
+ >>> elliprj(k*x,k*y,k*z,k*p); k**(-1.5)*elliprj(x,y,z,p)
868
+ 4.521291677592745527851168e-9
869
+ 4.521291677592745527851168e-9
870
+
871
+ Comparing with numerical integration::
872
+
873
+ >>> elliprj(1,2,3,4)
874
+ 0.2398480997495677621758617
875
+ >>> f = lambda t: 1/((t+4)*sqrt((t+1)*(t+2)*(t+3)))
876
+ >>> 1.5*quad(f, [0,inf])
877
+ 0.2398480997495677621758617
878
+ >>> elliprj(1,2+1j,3,4-2j)
879
+ (0.216888906014633498739952 + 0.04081912627366673332369512j)
880
+ >>> f = lambda t: 1/((t+4-2j)*sqrt((t+1)*(t+2+1j)*(t+3)))
881
+ >>> 1.5*quad(f, [0,inf])
882
+ (0.216888906014633498739952 + 0.04081912627366673332369511j)
883
+
884
+ """
885
+ x = ctx.convert(x)
886
+ y = ctx.convert(y)
887
+ z = ctx.convert(z)
888
+ p = ctx.convert(p)
889
+ prec = ctx.prec
890
+ try:
891
+ ctx.prec += 20
892
+ tol = ctx.eps * 2**10
893
+ v = RJ_calc(ctx, x, y, z, p, tol, integration)
894
+ finally:
895
+ ctx.prec = prec
896
+ return +v
897
+
898
+ @defun
899
+ def elliprd(ctx, x, y, z):
900
+ r"""
901
+ Evaluates the degenerate Carlson symmetric elliptic integral
902
+ of the third kind or Carlson elliptic integral of the
903
+ second kind `R_D(x,y,z) = R_J(x,y,z,z)`.
904
+
905
+ See :func:`~mpmath.elliprj` for additional information.
906
+
907
+ **Examples**
908
+
909
+ >>> from mpmath import *
910
+ >>> mp.dps = 25; mp.pretty = True
911
+ >>> elliprd(1,2,3)
912
+ 0.2904602810289906442326534
913
+ >>> elliprj(1,2,3,3)
914
+ 0.2904602810289906442326534
915
+
916
+ The so-called *second lemniscate constant*, a transcendental number::
917
+
918
+ >>> elliprd(0,2,1)/3
919
+ 0.5990701173677961037199612
920
+ >>> extradps(25)(quad)(lambda t: t**2/sqrt(1-t**4), [0,1])
921
+ 0.5990701173677961037199612
922
+ >>> gamma('3/4')**2/sqrt(2*pi)
923
+ 0.5990701173677961037199612
924
+
925
+ """
926
+ return ctx.elliprj(x,y,z,z)
927
+
928
+ @defun
929
+ def elliprg(ctx, x, y, z):
930
+ r"""
931
+ Evaluates the Carlson completely symmetric elliptic integral
932
+ of the second kind
933
+
934
+ .. math ::
935
+
936
+ R_G(x,y,z) = \frac{1}{4} \int_0^{\infty}
937
+ \frac{t}{\sqrt{(t+x)(t+y)(t+z)}}
938
+ \left( \frac{x}{t+x} + \frac{y}{t+y} + \frac{z}{t+z}\right) dt.
939
+
940
+ **Examples**
941
+
942
+ Evaluation for real and complex arguments::
943
+
944
+ >>> from mpmath import *
945
+ >>> mp.dps = 25; mp.pretty = True
946
+ >>> elliprg(0,1,1)*4; +pi
947
+ 3.141592653589793238462643
948
+ 3.141592653589793238462643
949
+ >>> elliprg(0,0.5,1)
950
+ 0.6753219405238377512600874
951
+ >>> chop(elliprg(1+j, 1-j, 2))
952
+ 1.172431327676416604532822
953
+
954
+ A double integral that can be evaluated in terms of `R_G`::
955
+
956
+ >>> x,y,z = 2,3,4
957
+ >>> def f(t,u):
958
+ ... st = fp.sin(t); ct = fp.cos(t)
959
+ ... su = fp.sin(u); cu = fp.cos(u)
960
+ ... return (x*(st*cu)**2 + y*(st*su)**2 + z*ct**2)**0.5 * st
961
+ ...
962
+ >>> nprint(mpf(fp.quad(f, [0,fp.pi], [0,2*fp.pi])/(4*fp.pi)), 13)
963
+ 1.725503028069
964
+ >>> nprint(elliprg(x,y,z), 13)
965
+ 1.725503028069
966
+
967
+ """
968
+ x = ctx.convert(x)
969
+ y = ctx.convert(y)
970
+ z = ctx.convert(z)
971
+ zeros = (not x) + (not y) + (not z)
972
+ if zeros == 3:
973
+ return (x+y+z)*0
974
+ if zeros == 2:
975
+ if x: return 0.5*ctx.sqrt(x)
976
+ if y: return 0.5*ctx.sqrt(y)
977
+ return 0.5*ctx.sqrt(z)
978
+ if zeros == 1:
979
+ if not z:
980
+ x, z = z, x
981
+ def terms():
982
+ T1 = 0.5*z*ctx.elliprf(x,y,z)
983
+ T2 = -0.5*(x-z)*(y-z)*ctx.elliprd(x,y,z)/3
984
+ T3 = 0.5*ctx.sqrt(x)*ctx.sqrt(y)/ctx.sqrt(z)
985
+ return T1,T2,T3
986
+ return ctx.sum_accurately(terms)
987
+
988
+
989
+ @defun_wrapped
990
+ def ellipf(ctx, phi, m):
991
+ r"""
992
+ Evaluates the Legendre incomplete elliptic integral of the first kind
993
+
994
+ .. math ::
995
+
996
+ F(\phi,m) = \int_0^{\phi} \frac{dt}{\sqrt{1-m \sin^2 t}}
997
+
998
+ or equivalently
999
+
1000
+ .. math ::
1001
+
1002
+ F(\phi,m) = \int_0^{\sin \phi}
1003
+ \frac{dt}{\left(\sqrt{1-t^2}\right)\left(\sqrt{1-mt^2}\right)}.
1004
+
1005
+ The function reduces to a complete elliptic integral of the first kind
1006
+ (see :func:`~mpmath.ellipk`) when `\phi = \frac{\pi}{2}`; that is,
1007
+
1008
+ .. math ::
1009
+
1010
+ F\left(\frac{\pi}{2}, m\right) = K(m).
1011
+
1012
+ In the defining integral, it is assumed that the principal branch
1013
+ of the square root is taken and that the path of integration avoids
1014
+ crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`,
1015
+ the function extends quasi-periodically as
1016
+
1017
+ .. math ::
1018
+
1019
+ F(\phi + n \pi, m) = 2 n K(m) + F(\phi,m), n \in \mathbb{Z}.
1020
+
1021
+ **Plots**
1022
+
1023
+ .. literalinclude :: /plots/ellipf.py
1024
+ .. image :: /plots/ellipf.png
1025
+
1026
+ **Examples**
1027
+
1028
+ Basic values and limits::
1029
+
1030
+ >>> from mpmath import *
1031
+ >>> mp.dps = 25; mp.pretty = True
1032
+ >>> ellipf(0,1)
1033
+ 0.0
1034
+ >>> ellipf(0,0)
1035
+ 0.0
1036
+ >>> ellipf(1,0); ellipf(2+3j,0)
1037
+ 1.0
1038
+ (2.0 + 3.0j)
1039
+ >>> ellipf(1,1); log(sec(1)+tan(1))
1040
+ 1.226191170883517070813061
1041
+ 1.226191170883517070813061
1042
+ >>> ellipf(pi/2, -0.5); ellipk(-0.5)
1043
+ 1.415737208425956198892166
1044
+ 1.415737208425956198892166
1045
+ >>> ellipf(pi/2+eps, 1); ellipf(-pi/2-eps, 1)
1046
+ +inf
1047
+ +inf
1048
+ >>> ellipf(1.5, 1)
1049
+ 3.340677542798311003320813
1050
+
1051
+ Comparing with numerical integration::
1052
+
1053
+ >>> z,m = 0.5, 1.25
1054
+ >>> ellipf(z,m)
1055
+ 0.5287219202206327872978255
1056
+ >>> quad(lambda t: (1-m*sin(t)**2)**(-0.5), [0,z])
1057
+ 0.5287219202206327872978255
1058
+
1059
+ The arguments may be complex numbers::
1060
+
1061
+ >>> ellipf(3j, 0.5)
1062
+ (0.0 + 1.713602407841590234804143j)
1063
+ >>> ellipf(3+4j, 5-6j)
1064
+ (1.269131241950351323305741 - 0.3561052815014558335412538j)
1065
+ >>> z,m = 2+3j, 1.25
1066
+ >>> k = 1011
1067
+ >>> ellipf(z+pi*k,m); ellipf(z,m) + 2*k*ellipk(m)
1068
+ (4086.184383622179764082821 - 3003.003538923749396546871j)
1069
+ (4086.184383622179764082821 - 3003.003538923749396546871j)
1070
+
1071
+ For `|\Re(z)| < \pi/2`, the function can be expressed as a
1072
+ hypergeometric series of two variables
1073
+ (see :func:`~mpmath.appellf1`)::
1074
+
1075
+ >>> z,m = 0.5, 0.25
1076
+ >>> ellipf(z,m)
1077
+ 0.5050887275786480788831083
1078
+ >>> sin(z)*appellf1(0.5,0.5,0.5,1.5,sin(z)**2,m*sin(z)**2)
1079
+ 0.5050887275786480788831083
1080
+
1081
+ """
1082
+ z = phi
1083
+ if not (ctx.isnormal(z) and ctx.isnormal(m)):
1084
+ if m == 0:
1085
+ return z + m
1086
+ if z == 0:
1087
+ return z * m
1088
+ if m == ctx.inf or m == ctx.ninf: return z/m
1089
+ raise ValueError
1090
+ x = z.real
1091
+ ctx.prec += max(0, ctx.mag(x))
1092
+ pi = +ctx.pi
1093
+ away = abs(x) > pi/2
1094
+ if m == 1:
1095
+ if away:
1096
+ return ctx.inf
1097
+ if away:
1098
+ d = ctx.nint(x/pi)
1099
+ z = z-pi*d
1100
+ P = 2*d*ctx.ellipk(m)
1101
+ else:
1102
+ P = 0
1103
+ c, s = ctx.cos_sin(z)
1104
+ return s * ctx.elliprf(c**2, 1-m*s**2, 1) + P
1105
+
1106
+ @defun_wrapped
1107
+ def ellipe(ctx, *args):
1108
+ r"""
1109
+ Called with a single argument `m`, evaluates the Legendre complete
1110
+ elliptic integral of the second kind, `E(m)`, defined by
1111
+
1112
+ .. math :: E(m) = \int_0^{\pi/2} \sqrt{1-m \sin^2 t} \, dt \,=\,
1113
+ \frac{\pi}{2}
1114
+ \,_2F_1\left(\frac{1}{2}, -\frac{1}{2}, 1, m\right).
1115
+
1116
+ Called with two arguments `\phi, m`, evaluates the incomplete elliptic
1117
+ integral of the second kind
1118
+
1119
+ .. math ::
1120
+
1121
+ E(\phi,m) = \int_0^{\phi} \sqrt{1-m \sin^2 t} \, dt =
1122
+ \int_0^{\sin z}
1123
+ \frac{\sqrt{1-mt^2}}{\sqrt{1-t^2}} \, dt.
1124
+
1125
+ The incomplete integral reduces to a complete integral when
1126
+ `\phi = \frac{\pi}{2}`; that is,
1127
+
1128
+ .. math ::
1129
+
1130
+ E\left(\frac{\pi}{2}, m\right) = E(m).
1131
+
1132
+ In the defining integral, it is assumed that the principal branch
1133
+ of the square root is taken and that the path of integration avoids
1134
+ crossing any branch cuts. Outside `-\pi/2 \le \Re(z) \le \pi/2`,
1135
+ the function extends quasi-periodically as
1136
+
1137
+ .. math ::
1138
+
1139
+ E(\phi + n \pi, m) = 2 n E(m) + E(\phi,m), n \in \mathbb{Z}.
1140
+
1141
+ **Plots**
1142
+
1143
+ .. literalinclude :: /plots/ellipe.py
1144
+ .. image :: /plots/ellipe.png
1145
+
1146
+ **Examples for the complete integral**
1147
+
1148
+ Basic values and limits::
1149
+
1150
+ >>> from mpmath import *
1151
+ >>> mp.dps = 25; mp.pretty = True
1152
+ >>> ellipe(0)
1153
+ 1.570796326794896619231322
1154
+ >>> ellipe(1)
1155
+ 1.0
1156
+ >>> ellipe(-1)
1157
+ 1.910098894513856008952381
1158
+ >>> ellipe(2)
1159
+ (0.5990701173677961037199612 + 0.5990701173677961037199612j)
1160
+ >>> ellipe(inf)
1161
+ (0.0 + +infj)
1162
+ >>> ellipe(-inf)
1163
+ +inf
1164
+
1165
+ Verifying the defining integral and hypergeometric
1166
+ representation::
1167
+
1168
+ >>> ellipe(0.5)
1169
+ 1.350643881047675502520175
1170
+ >>> quad(lambda t: sqrt(1-0.5*sin(t)**2), [0, pi/2])
1171
+ 1.350643881047675502520175
1172
+ >>> pi/2*hyp2f1(0.5,-0.5,1,0.5)
1173
+ 1.350643881047675502520175
1174
+
1175
+ Evaluation is supported for arbitrary complex `m`::
1176
+
1177
+ >>> ellipe(0.5+0.25j)
1178
+ (1.360868682163129682716687 - 0.1238733442561786843557315j)
1179
+ >>> ellipe(3+4j)
1180
+ (1.499553520933346954333612 - 1.577879007912758274533309j)
1181
+
1182
+ A definite integral::
1183
+
1184
+ >>> quad(ellipe, [0,1])
1185
+ 1.333333333333333333333333
1186
+
1187
+ **Examples for the incomplete integral**
1188
+
1189
+ Basic values and limits::
1190
+
1191
+ >>> ellipe(0,1)
1192
+ 0.0
1193
+ >>> ellipe(0,0)
1194
+ 0.0
1195
+ >>> ellipe(1,0)
1196
+ 1.0
1197
+ >>> ellipe(2+3j,0)
1198
+ (2.0 + 3.0j)
1199
+ >>> ellipe(1,1); sin(1)
1200
+ 0.8414709848078965066525023
1201
+ 0.8414709848078965066525023
1202
+ >>> ellipe(pi/2, -0.5); ellipe(-0.5)
1203
+ 1.751771275694817862026502
1204
+ 1.751771275694817862026502
1205
+ >>> ellipe(pi/2, 1); ellipe(-pi/2, 1)
1206
+ 1.0
1207
+ -1.0
1208
+ >>> ellipe(1.5, 1)
1209
+ 0.9974949866040544309417234
1210
+
1211
+ Comparing with numerical integration::
1212
+
1213
+ >>> z,m = 0.5, 1.25
1214
+ >>> ellipe(z,m)
1215
+ 0.4740152182652628394264449
1216
+ >>> quad(lambda t: sqrt(1-m*sin(t)**2), [0,z])
1217
+ 0.4740152182652628394264449
1218
+
1219
+ The arguments may be complex numbers::
1220
+
1221
+ >>> ellipe(3j, 0.5)
1222
+ (0.0 + 7.551991234890371873502105j)
1223
+ >>> ellipe(3+4j, 5-6j)
1224
+ (24.15299022574220502424466 + 75.2503670480325997418156j)
1225
+ >>> k = 35
1226
+ >>> z,m = 2+3j, 1.25
1227
+ >>> ellipe(z+pi*k,m); ellipe(z,m) + 2*k*ellipe(m)
1228
+ (48.30138799412005235090766 + 17.47255216721987688224357j)
1229
+ (48.30138799412005235090766 + 17.47255216721987688224357j)
1230
+
1231
+ For `|\Re(z)| < \pi/2`, the function can be expressed as a
1232
+ hypergeometric series of two variables
1233
+ (see :func:`~mpmath.appellf1`)::
1234
+
1235
+ >>> z,m = 0.5, 0.25
1236
+ >>> ellipe(z,m)
1237
+ 0.4950017030164151928870375
1238
+ >>> sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2)
1239
+ 0.4950017030164151928870376
1240
+
1241
+ """
1242
+ if len(args) == 1:
1243
+ return ctx._ellipe(args[0])
1244
+ else:
1245
+ phi, m = args
1246
+ z = phi
1247
+ if not (ctx.isnormal(z) and ctx.isnormal(m)):
1248
+ if m == 0:
1249
+ return z + m
1250
+ if z == 0:
1251
+ return z * m
1252
+ if m == ctx.inf or m == ctx.ninf:
1253
+ return ctx.inf
1254
+ raise ValueError
1255
+ x = z.real
1256
+ ctx.prec += max(0, ctx.mag(x))
1257
+ pi = +ctx.pi
1258
+ away = abs(x) > pi/2
1259
+ if away:
1260
+ d = ctx.nint(x/pi)
1261
+ z = z-pi*d
1262
+ P = 2*d*ctx.ellipe(m)
1263
+ else:
1264
+ P = 0
1265
+ def terms():
1266
+ c, s = ctx.cos_sin(z)
1267
+ x = c**2
1268
+ y = 1-m*s**2
1269
+ RF = ctx.elliprf(x, y, 1)
1270
+ RD = ctx.elliprd(x, y, 1)
1271
+ return s*RF, -m*s**3*RD/3
1272
+ return ctx.sum_accurately(terms) + P
1273
+
1274
+ @defun_wrapped
1275
+ def ellippi(ctx, *args):
1276
+ r"""
1277
+ Called with three arguments `n, \phi, m`, evaluates the Legendre
1278
+ incomplete elliptic integral of the third kind
1279
+
1280
+ .. math ::
1281
+
1282
+ \Pi(n; \phi, m) = \int_0^{\phi}
1283
+ \frac{dt}{(1-n \sin^2 t) \sqrt{1-m \sin^2 t}} =
1284
+ \int_0^{\sin \phi}
1285
+ \frac{dt}{(1-nt^2) \sqrt{1-t^2} \sqrt{1-mt^2}}.
1286
+
1287
+ Called with two arguments `n, m`, evaluates the complete
1288
+ elliptic integral of the third kind
1289
+ `\Pi(n,m) = \Pi(n; \frac{\pi}{2},m)`.
1290
+
1291
+ In the defining integral, it is assumed that the principal branch
1292
+ of the square root is taken and that the path of integration avoids
1293
+ crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`,
1294
+ the function extends quasi-periodically as
1295
+
1296
+ .. math ::
1297
+
1298
+ \Pi(n,\phi+k\pi,m) = 2k\Pi(n,m) + \Pi(n,\phi,m), k \in \mathbb{Z}.
1299
+
1300
+ **Plots**
1301
+
1302
+ .. literalinclude :: /plots/ellippi.py
1303
+ .. image :: /plots/ellippi.png
1304
+
1305
+ **Examples for the complete integral**
1306
+
1307
+ Some basic values and limits::
1308
+
1309
+ >>> from mpmath import *
1310
+ >>> mp.dps = 25; mp.pretty = True
1311
+ >>> ellippi(0,-5); ellipk(-5)
1312
+ 0.9555039270640439337379334
1313
+ 0.9555039270640439337379334
1314
+ >>> ellippi(inf,2)
1315
+ 0.0
1316
+ >>> ellippi(2,inf)
1317
+ 0.0
1318
+ >>> abs(ellippi(1,5))
1319
+ +inf
1320
+ >>> abs(ellippi(0.25,1))
1321
+ +inf
1322
+
1323
+ Evaluation in terms of simpler functions::
1324
+
1325
+ >>> ellippi(0.25,0.25); ellipe(0.25)/(1-0.25)
1326
+ 1.956616279119236207279727
1327
+ 1.956616279119236207279727
1328
+ >>> ellippi(3,0); pi/(2*sqrt(-2))
1329
+ (0.0 - 1.11072073453959156175397j)
1330
+ (0.0 - 1.11072073453959156175397j)
1331
+ >>> ellippi(-3,0); pi/(2*sqrt(4))
1332
+ 0.7853981633974483096156609
1333
+ 0.7853981633974483096156609
1334
+
1335
+ **Examples for the incomplete integral**
1336
+
1337
+ Basic values and limits::
1338
+
1339
+ >>> ellippi(0.25,-0.5); ellippi(0.25,pi/2,-0.5)
1340
+ 1.622944760954741603710555
1341
+ 1.622944760954741603710555
1342
+ >>> ellippi(1,0,1)
1343
+ 0.0
1344
+ >>> ellippi(inf,0,1)
1345
+ 0.0
1346
+ >>> ellippi(0,0.25,0.5); ellipf(0.25,0.5)
1347
+ 0.2513040086544925794134591
1348
+ 0.2513040086544925794134591
1349
+ >>> ellippi(1,1,1); (log(sec(1)+tan(1))+sec(1)*tan(1))/2
1350
+ 2.054332933256248668692452
1351
+ 2.054332933256248668692452
1352
+ >>> ellippi(0.25, 53*pi/2, 0.75); 53*ellippi(0.25,0.75)
1353
+ 135.240868757890840755058
1354
+ 135.240868757890840755058
1355
+ >>> ellippi(0.5,pi/4,0.5); 2*ellipe(pi/4,0.5)-1/sqrt(3)
1356
+ 0.9190227391656969903987269
1357
+ 0.9190227391656969903987269
1358
+
1359
+ Complex arguments are supported::
1360
+
1361
+ >>> ellippi(0.5, 5+6j-2*pi, -7-8j)
1362
+ (-0.3612856620076747660410167 + 0.5217735339984807829755815j)
1363
+
1364
+ Some degenerate cases::
1365
+
1366
+ >>> ellippi(1,1)
1367
+ +inf
1368
+ >>> ellippi(1,0)
1369
+ +inf
1370
+ >>> ellippi(1,2,0)
1371
+ +inf
1372
+ >>> ellippi(1,2,1)
1373
+ +inf
1374
+ >>> ellippi(1,0,1)
1375
+ 0.0
1376
+
1377
+ """
1378
+ if len(args) == 2:
1379
+ n, m = args
1380
+ complete = True
1381
+ z = phi = ctx.pi/2
1382
+ else:
1383
+ n, phi, m = args
1384
+ complete = False
1385
+ z = phi
1386
+ if not (ctx.isnormal(n) and ctx.isnormal(z) and ctx.isnormal(m)):
1387
+ if ctx.isnan(n) or ctx.isnan(z) or ctx.isnan(m):
1388
+ raise ValueError
1389
+ if complete:
1390
+ if m == 0:
1391
+ if n == 1:
1392
+ return ctx.inf
1393
+ return ctx.pi/(2*ctx.sqrt(1-n))
1394
+ if n == 0: return ctx.ellipk(m)
1395
+ if ctx.isinf(n) or ctx.isinf(m): return ctx.zero
1396
+ else:
1397
+ if z == 0: return z
1398
+ if ctx.isinf(n): return ctx.zero
1399
+ if ctx.isinf(m): return ctx.zero
1400
+ if ctx.isinf(n) or ctx.isinf(z) or ctx.isinf(m):
1401
+ raise ValueError
1402
+ if complete:
1403
+ if m == 1:
1404
+ if n == 1:
1405
+ return ctx.inf
1406
+ return -ctx.inf/ctx.sign(n-1)
1407
+ away = False
1408
+ else:
1409
+ x = z.real
1410
+ ctx.prec += max(0, ctx.mag(x))
1411
+ pi = +ctx.pi
1412
+ away = abs(x) > pi/2
1413
+ if away:
1414
+ d = ctx.nint(x/pi)
1415
+ z = z-pi*d
1416
+ P = 2*d*ctx.ellippi(n,m)
1417
+ if ctx.isinf(P):
1418
+ return ctx.inf
1419
+ else:
1420
+ P = 0
1421
+ def terms():
1422
+ if complete:
1423
+ c, s = ctx.zero, ctx.one
1424
+ else:
1425
+ c, s = ctx.cos_sin(z)
1426
+ x = c**2
1427
+ y = 1-m*s**2
1428
+ RF = ctx.elliprf(x, y, 1)
1429
+ RJ = ctx.elliprj(x, y, 1, 1-n*s**2)
1430
+ return s*RF, n*s**3*RJ/3
1431
+ return ctx.sum_accurately(terms) + P
lib/python3.11/site-packages/mpmath/functions/expintegrals.py ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .functions import defun, defun_wrapped
2
+
3
+ @defun_wrapped
4
+ def _erf_complex(ctx, z):
5
+ z2 = ctx.square_exp_arg(z, -1)
6
+ #z2 = -z**2
7
+ v = (2/ctx.sqrt(ctx.pi))*z * ctx.hyp1f1((1,2),(3,2), z2)
8
+ if not ctx._re(z):
9
+ v = ctx._im(v)*ctx.j
10
+ return v
11
+
12
+ @defun_wrapped
13
+ def _erfc_complex(ctx, z):
14
+ if ctx.re(z) > 2:
15
+ z2 = ctx.square_exp_arg(z)
16
+ nz2 = ctx.fneg(z2, exact=True)
17
+ v = ctx.exp(nz2)/ctx.sqrt(ctx.pi) * ctx.hyperu((1,2),(1,2), z2)
18
+ else:
19
+ v = 1 - ctx._erf_complex(z)
20
+ if not ctx._re(z):
21
+ v = 1+ctx._im(v)*ctx.j
22
+ return v
23
+
24
+ @defun
25
+ def erf(ctx, z):
26
+ z = ctx.convert(z)
27
+ if ctx._is_real_type(z):
28
+ try:
29
+ return ctx._erf(z)
30
+ except NotImplementedError:
31
+ pass
32
+ if ctx._is_complex_type(z) and not z.imag:
33
+ try:
34
+ return type(z)(ctx._erf(z.real))
35
+ except NotImplementedError:
36
+ pass
37
+ return ctx._erf_complex(z)
38
+
39
+ @defun
40
+ def erfc(ctx, z):
41
+ z = ctx.convert(z)
42
+ if ctx._is_real_type(z):
43
+ try:
44
+ return ctx._erfc(z)
45
+ except NotImplementedError:
46
+ pass
47
+ if ctx._is_complex_type(z) and not z.imag:
48
+ try:
49
+ return type(z)(ctx._erfc(z.real))
50
+ except NotImplementedError:
51
+ pass
52
+ return ctx._erfc_complex(z)
53
+
54
+ @defun
55
+ def square_exp_arg(ctx, z, mult=1, reciprocal=False):
56
+ prec = ctx.prec*4+20
57
+ if reciprocal:
58
+ z2 = ctx.fmul(z, z, prec=prec)
59
+ z2 = ctx.fdiv(ctx.one, z2, prec=prec)
60
+ else:
61
+ z2 = ctx.fmul(z, z, prec=prec)
62
+ if mult != 1:
63
+ z2 = ctx.fmul(z2, mult, exact=True)
64
+ return z2
65
+
66
+ @defun_wrapped
67
+ def erfi(ctx, z):
68
+ if not z:
69
+ return z
70
+ z2 = ctx.square_exp_arg(z)
71
+ v = (2/ctx.sqrt(ctx.pi)*z) * ctx.hyp1f1((1,2), (3,2), z2)
72
+ if not ctx._re(z):
73
+ v = ctx._im(v)*ctx.j
74
+ return v
75
+
76
+ @defun_wrapped
77
+ def erfinv(ctx, x):
78
+ xre = ctx._re(x)
79
+ if (xre != x) or (xre < -1) or (xre > 1):
80
+ return ctx.bad_domain("erfinv(x) is defined only for -1 <= x <= 1")
81
+ x = xre
82
+ #if ctx.isnan(x): return x
83
+ if not x: return x
84
+ if x == 1: return ctx.inf
85
+ if x == -1: return ctx.ninf
86
+ if abs(x) < 0.9:
87
+ a = 0.53728*x**3 + 0.813198*x
88
+ else:
89
+ # An asymptotic formula
90
+ u = ctx.ln(2/ctx.pi/(abs(x)-1)**2)
91
+ a = ctx.sign(x) * ctx.sqrt(u - ctx.ln(u))/ctx.sqrt(2)
92
+ ctx.prec += 10
93
+ return ctx.findroot(lambda t: ctx.erf(t)-x, a)
94
+
95
+ @defun_wrapped
96
+ def npdf(ctx, x, mu=0, sigma=1):
97
+ sigma = ctx.convert(sigma)
98
+ return ctx.exp(-(x-mu)**2/(2*sigma**2)) / (sigma*ctx.sqrt(2*ctx.pi))
99
+
100
+ @defun_wrapped
101
+ def ncdf(ctx, x, mu=0, sigma=1):
102
+ a = (x-mu)/(sigma*ctx.sqrt(2))
103
+ if a < 0:
104
+ return ctx.erfc(-a)/2
105
+ else:
106
+ return (1+ctx.erf(a))/2
107
+
108
+ @defun_wrapped
109
+ def betainc(ctx, a, b, x1=0, x2=1, regularized=False):
110
+ if x1 == x2:
111
+ v = 0
112
+ elif not x1:
113
+ if x1 == 0 and x2 == 1:
114
+ v = ctx.beta(a, b)
115
+ else:
116
+ v = x2**a * ctx.hyp2f1(a, 1-b, a+1, x2) / a
117
+ else:
118
+ m, d = ctx.nint_distance(a)
119
+ if m <= 0:
120
+ if d < -ctx.prec:
121
+ h = +ctx.eps
122
+ ctx.prec *= 2
123
+ a += h
124
+ elif d < -4:
125
+ ctx.prec -= d
126
+ s1 = x2**a * ctx.hyp2f1(a,1-b,a+1,x2)
127
+ s2 = x1**a * ctx.hyp2f1(a,1-b,a+1,x1)
128
+ v = (s1 - s2) / a
129
+ if regularized:
130
+ v /= ctx.beta(a,b)
131
+ return v
132
+
133
+ @defun
134
+ def gammainc(ctx, z, a=0, b=None, regularized=False):
135
+ regularized = bool(regularized)
136
+ z = ctx.convert(z)
137
+ if a is None:
138
+ a = ctx.zero
139
+ lower_modified = False
140
+ else:
141
+ a = ctx.convert(a)
142
+ lower_modified = a != ctx.zero
143
+ if b is None:
144
+ b = ctx.inf
145
+ upper_modified = False
146
+ else:
147
+ b = ctx.convert(b)
148
+ upper_modified = b != ctx.inf
149
+ # Complete gamma function
150
+ if not (upper_modified or lower_modified):
151
+ if regularized:
152
+ if ctx.re(z) < 0:
153
+ return ctx.inf
154
+ elif ctx.re(z) > 0:
155
+ return ctx.one
156
+ else:
157
+ return ctx.nan
158
+ return ctx.gamma(z)
159
+ if a == b:
160
+ return ctx.zero
161
+ # Standardize
162
+ if ctx.re(a) > ctx.re(b):
163
+ return -ctx.gammainc(z, b, a, regularized)
164
+ # Generalized gamma
165
+ if upper_modified and lower_modified:
166
+ return +ctx._gamma3(z, a, b, regularized)
167
+ # Upper gamma
168
+ elif lower_modified:
169
+ return ctx._upper_gamma(z, a, regularized)
170
+ # Lower gamma
171
+ elif upper_modified:
172
+ return ctx._lower_gamma(z, b, regularized)
173
+
174
+ @defun
175
+ def _lower_gamma(ctx, z, b, regularized=False):
176
+ # Pole
177
+ if ctx.isnpint(z):
178
+ return type(z)(ctx.inf)
179
+ G = [z] * regularized
180
+ negb = ctx.fneg(b, exact=True)
181
+ def h(z):
182
+ T1 = [ctx.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b
183
+ return (T1,)
184
+ return ctx.hypercomb(h, [z])
185
+
186
+ @defun
187
+ def _upper_gamma(ctx, z, a, regularized=False):
188
+ # Fast integer case, when available
189
+ if ctx.isint(z):
190
+ try:
191
+ if regularized:
192
+ # Gamma pole
193
+ if ctx.isnpint(z):
194
+ return type(z)(ctx.zero)
195
+ orig = ctx.prec
196
+ try:
197
+ ctx.prec += 10
198
+ return ctx._gamma_upper_int(z, a) / ctx.gamma(z)
199
+ finally:
200
+ ctx.prec = orig
201
+ else:
202
+ return ctx._gamma_upper_int(z, a)
203
+ except NotImplementedError:
204
+ pass
205
+ # hypercomb is unable to detect the exact zeros, so handle them here
206
+ if z == 2 and a == -1:
207
+ return (z+a)*0
208
+ if z == 3 and (a == -1-1j or a == -1+1j):
209
+ return (z+a)*0
210
+ nega = ctx.fneg(a, exact=True)
211
+ G = [z] * regularized
212
+ # Use 2F0 series when possible; fall back to lower gamma representation
213
+ try:
214
+ def h(z):
215
+ r = z-1
216
+ return [([ctx.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)]
217
+ return ctx.hypercomb(h, [z], force_series=True)
218
+ except ctx.NoConvergence:
219
+ def h(z):
220
+ T1 = [], [1, z-1], [z], G, [], [], 0
221
+ T2 = [-ctx.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a
222
+ return T1, T2
223
+ return ctx.hypercomb(h, [z])
224
+
225
+ @defun
226
+ def _gamma3(ctx, z, a, b, regularized=False):
227
+ pole = ctx.isnpint(z)
228
+ if regularized and pole:
229
+ return ctx.zero
230
+ try:
231
+ ctx.prec += 15
232
+ # We don't know in advance whether it's better to write as a difference
233
+ # of lower or upper gamma functions, so try both
234
+ T1 = ctx.gammainc(z, a, regularized=regularized)
235
+ T2 = ctx.gammainc(z, b, regularized=regularized)
236
+ R = T1 - T2
237
+ if ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10:
238
+ return R
239
+ if not pole:
240
+ T1 = ctx.gammainc(z, 0, b, regularized=regularized)
241
+ T2 = ctx.gammainc(z, 0, a, regularized=regularized)
242
+ R = T1 - T2
243
+ # May be ok, but should probably at least print a warning
244
+ # about possible cancellation
245
+ if 1: #ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10:
246
+ return R
247
+ finally:
248
+ ctx.prec -= 15
249
+ raise NotImplementedError
250
+
251
+ @defun_wrapped
252
+ def expint(ctx, n, z):
253
+ if ctx.isint(n) and ctx._is_real_type(z):
254
+ try:
255
+ return ctx._expint_int(n, z)
256
+ except NotImplementedError:
257
+ pass
258
+ if ctx.isnan(n) or ctx.isnan(z):
259
+ return z*n
260
+ if z == ctx.inf:
261
+ return 1/z
262
+ if z == 0:
263
+ # integral from 1 to infinity of t^n
264
+ if ctx.re(n) <= 1:
265
+ # TODO: reasonable sign of infinity
266
+ return type(z)(ctx.inf)
267
+ else:
268
+ return ctx.one/(n-1)
269
+ if n == 0:
270
+ return ctx.exp(-z)/z
271
+ if n == -1:
272
+ return ctx.exp(-z)*(z+1)/z**2
273
+ return z**(n-1) * ctx.gammainc(1-n, z)
274
+
275
+ @defun_wrapped
276
+ def li(ctx, z, offset=False):
277
+ if offset:
278
+ if z == 2:
279
+ return ctx.zero
280
+ return ctx.ei(ctx.ln(z)) - ctx.ei(ctx.ln2)
281
+ if not z:
282
+ return z
283
+ if z == 1:
284
+ return ctx.ninf
285
+ return ctx.ei(ctx.ln(z))
286
+
287
+ @defun
288
+ def ei(ctx, z):
289
+ try:
290
+ return ctx._ei(z)
291
+ except NotImplementedError:
292
+ return ctx._ei_generic(z)
293
+
294
+ @defun_wrapped
295
+ def _ei_generic(ctx, z):
296
+ # Note: the following is currently untested because mp and fp
297
+ # both use special-case ei code
298
+ if z == ctx.inf:
299
+ return z
300
+ if z == ctx.ninf:
301
+ return ctx.zero
302
+ if ctx.mag(z) > 1:
303
+ try:
304
+ r = ctx.one/z
305
+ v = ctx.exp(z)*ctx.hyper([1,1],[],r,
306
+ maxterms=ctx.prec, force_series=True)/z
307
+ im = ctx._im(z)
308
+ if im > 0:
309
+ v += ctx.pi*ctx.j
310
+ if im < 0:
311
+ v -= ctx.pi*ctx.j
312
+ return v
313
+ except ctx.NoConvergence:
314
+ pass
315
+ v = z*ctx.hyp2f2(1,1,2,2,z) + ctx.euler
316
+ if ctx._im(z):
317
+ v += 0.5*(ctx.log(z) - ctx.log(ctx.one/z))
318
+ else:
319
+ v += ctx.log(abs(z))
320
+ return v
321
+
322
+ @defun
323
+ def e1(ctx, z):
324
+ try:
325
+ return ctx._e1(z)
326
+ except NotImplementedError:
327
+ return ctx.expint(1, z)
328
+
329
+ @defun
330
+ def ci(ctx, z):
331
+ try:
332
+ return ctx._ci(z)
333
+ except NotImplementedError:
334
+ return ctx._ci_generic(z)
335
+
336
+ @defun_wrapped
337
+ def _ci_generic(ctx, z):
338
+ if ctx.isinf(z):
339
+ if z == ctx.inf: return ctx.zero
340
+ if z == ctx.ninf: return ctx.pi*1j
341
+ jz = ctx.fmul(ctx.j,z,exact=True)
342
+ njz = ctx.fneg(jz,exact=True)
343
+ v = 0.5*(ctx.ei(jz) + ctx.ei(njz))
344
+ zreal = ctx._re(z)
345
+ zimag = ctx._im(z)
346
+ if zreal == 0:
347
+ if zimag > 0: v += ctx.pi*0.5j
348
+ if zimag < 0: v -= ctx.pi*0.5j
349
+ if zreal < 0:
350
+ if zimag >= 0: v += ctx.pi*1j
351
+ if zimag < 0: v -= ctx.pi*1j
352
+ if ctx._is_real_type(z) and zreal > 0:
353
+ v = ctx._re(v)
354
+ return v
355
+
356
+ @defun
357
+ def si(ctx, z):
358
+ try:
359
+ return ctx._si(z)
360
+ except NotImplementedError:
361
+ return ctx._si_generic(z)
362
+
363
+ @defun_wrapped
364
+ def _si_generic(ctx, z):
365
+ if ctx.isinf(z):
366
+ if z == ctx.inf: return 0.5*ctx.pi
367
+ if z == ctx.ninf: return -0.5*ctx.pi
368
+ # Suffers from cancellation near 0
369
+ if ctx.mag(z) >= -1:
370
+ jz = ctx.fmul(ctx.j,z,exact=True)
371
+ njz = ctx.fneg(jz,exact=True)
372
+ v = (-0.5j)*(ctx.ei(jz) - ctx.ei(njz))
373
+ zreal = ctx._re(z)
374
+ if zreal > 0:
375
+ v -= 0.5*ctx.pi
376
+ if zreal < 0:
377
+ v += 0.5*ctx.pi
378
+ if ctx._is_real_type(z):
379
+ v = ctx._re(v)
380
+ return v
381
+ else:
382
+ return z*ctx.hyp1f2((1,2),(3,2),(3,2),-0.25*z*z)
383
+
384
+ @defun_wrapped
385
+ def chi(ctx, z):
386
+ nz = ctx.fneg(z, exact=True)
387
+ v = 0.5*(ctx.ei(z) + ctx.ei(nz))
388
+ zreal = ctx._re(z)
389
+ zimag = ctx._im(z)
390
+ if zimag > 0:
391
+ v += ctx.pi*0.5j
392
+ elif zimag < 0:
393
+ v -= ctx.pi*0.5j
394
+ elif zreal < 0:
395
+ v += ctx.pi*1j
396
+ return v
397
+
398
+ @defun_wrapped
399
+ def shi(ctx, z):
400
+ # Suffers from cancellation near 0
401
+ if ctx.mag(z) >= -1:
402
+ nz = ctx.fneg(z, exact=True)
403
+ v = 0.5*(ctx.ei(z) - ctx.ei(nz))
404
+ zimag = ctx._im(z)
405
+ if zimag > 0: v -= 0.5j*ctx.pi
406
+ if zimag < 0: v += 0.5j*ctx.pi
407
+ return v
408
+ else:
409
+ return z * ctx.hyp1f2((1,2),(3,2),(3,2),0.25*z*z)
410
+
411
+ @defun_wrapped
412
+ def fresnels(ctx, z):
413
+ if z == ctx.inf:
414
+ return ctx.mpf(0.5)
415
+ if z == ctx.ninf:
416
+ return ctx.mpf(-0.5)
417
+ return ctx.pi*z**3/6*ctx.hyp1f2((3,4),(3,2),(7,4),-ctx.pi**2*z**4/16)
418
+
419
+ @defun_wrapped
420
+ def fresnelc(ctx, z):
421
+ if z == ctx.inf:
422
+ return ctx.mpf(0.5)
423
+ if z == ctx.ninf:
424
+ return ctx.mpf(-0.5)
425
+ return z*ctx.hyp1f2((1,4),(1,2),(5,4),-ctx.pi**2*z**4/16)
lib/python3.11/site-packages/mpmath/functions/factorials.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+ from .functions import defun, defun_wrapped
3
+
4
+ @defun
5
+ def gammaprod(ctx, a, b, _infsign=False):
6
+ a = [ctx.convert(x) for x in a]
7
+ b = [ctx.convert(x) for x in b]
8
+ poles_num = []
9
+ poles_den = []
10
+ regular_num = []
11
+ regular_den = []
12
+ for x in a: [regular_num, poles_num][ctx.isnpint(x)].append(x)
13
+ for x in b: [regular_den, poles_den][ctx.isnpint(x)].append(x)
14
+ # One more pole in numerator or denominator gives 0 or inf
15
+ if len(poles_num) < len(poles_den): return ctx.zero
16
+ if len(poles_num) > len(poles_den):
17
+ # Get correct sign of infinity for x+h, h -> 0 from above
18
+ # XXX: hack, this should be done properly
19
+ if _infsign:
20
+ a = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_num]
21
+ b = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_den]
22
+ return ctx.sign(ctx.gammaprod(a+regular_num,b+regular_den)) * ctx.inf
23
+ else:
24
+ return ctx.inf
25
+ # All poles cancel
26
+ # lim G(i)/G(j) = (-1)**(i+j) * gamma(1-j) / gamma(1-i)
27
+ p = ctx.one
28
+ orig = ctx.prec
29
+ try:
30
+ ctx.prec = orig + 15
31
+ while poles_num:
32
+ i = poles_num.pop()
33
+ j = poles_den.pop()
34
+ p *= (-1)**(i+j) * ctx.gamma(1-j) / ctx.gamma(1-i)
35
+ for x in regular_num: p *= ctx.gamma(x)
36
+ for x in regular_den: p /= ctx.gamma(x)
37
+ finally:
38
+ ctx.prec = orig
39
+ return +p
40
+
41
+ @defun
42
+ def beta(ctx, x, y):
43
+ x = ctx.convert(x)
44
+ y = ctx.convert(y)
45
+ if ctx.isinf(y):
46
+ x, y = y, x
47
+ if ctx.isinf(x):
48
+ if x == ctx.inf and not ctx._im(y):
49
+ if y == ctx.ninf:
50
+ return ctx.nan
51
+ if y > 0:
52
+ return ctx.zero
53
+ if ctx.isint(y):
54
+ return ctx.nan
55
+ if y < 0:
56
+ return ctx.sign(ctx.gamma(y)) * ctx.inf
57
+ return ctx.nan
58
+ xy = ctx.fadd(x, y, prec=2*ctx.prec)
59
+ return ctx.gammaprod([x, y], [xy])
60
+
61
+ @defun
62
+ def binomial(ctx, n, k):
63
+ n1 = ctx.fadd(n, 1, prec=2*ctx.prec)
64
+ k1 = ctx.fadd(k, 1, prec=2*ctx.prec)
65
+ nk1 = ctx.fsub(n1, k, prec=2*ctx.prec)
66
+ return ctx.gammaprod([n1], [k1, nk1])
67
+
68
+ @defun
69
+ def rf(ctx, x, n):
70
+ xn = ctx.fadd(x, n, prec=2*ctx.prec)
71
+ return ctx.gammaprod([xn], [x])
72
+
73
+ @defun
74
+ def ff(ctx, x, n):
75
+ x1 = ctx.fadd(x, 1, prec=2*ctx.prec)
76
+ xn1 = ctx.fadd(ctx.fsub(x, n, prec=2*ctx.prec), 1, prec=2*ctx.prec)
77
+ return ctx.gammaprod([x1], [xn1])
78
+
79
+ @defun_wrapped
80
+ def fac2(ctx, x):
81
+ if ctx.isinf(x):
82
+ if x == ctx.inf:
83
+ return x
84
+ return ctx.nan
85
+ return 2**(x/2)*(ctx.pi/2)**((ctx.cospi(x)-1)/4)*ctx.gamma(x/2+1)
86
+
87
+ @defun_wrapped
88
+ def barnesg(ctx, z):
89
+ if ctx.isinf(z):
90
+ if z == ctx.inf:
91
+ return z
92
+ return ctx.nan
93
+ if ctx.isnan(z):
94
+ return z
95
+ if (not ctx._im(z)) and ctx._re(z) <= 0 and ctx.isint(ctx._re(z)):
96
+ return z*0
97
+ # Account for size (would not be needed if computing log(G))
98
+ if abs(z) > 5:
99
+ ctx.dps += 2*ctx.log(abs(z),2)
100
+ # Reflection formula
101
+ if ctx.re(z) < -ctx.dps:
102
+ w = 1-z
103
+ pi2 = 2*ctx.pi
104
+ u = ctx.expjpi(2*w)
105
+ v = ctx.j*ctx.pi/12 - ctx.j*ctx.pi*w**2/2 + w*ctx.ln(1-u) - \
106
+ ctx.j*ctx.polylog(2, u)/pi2
107
+ v = ctx.barnesg(2-z)*ctx.exp(v)/pi2**w
108
+ if ctx._is_real_type(z):
109
+ v = ctx._re(v)
110
+ return v
111
+ # Estimate terms for asymptotic expansion
112
+ # TODO: fixme, obviously
113
+ N = ctx.dps // 2 + 5
114
+ G = 1
115
+ while abs(z) < N or ctx.re(z) < 1:
116
+ G /= ctx.gamma(z)
117
+ z += 1
118
+ z -= 1
119
+ s = ctx.mpf(1)/12
120
+ s -= ctx.log(ctx.glaisher)
121
+ s += z*ctx.log(2*ctx.pi)/2
122
+ s += (z**2/2-ctx.mpf(1)/12)*ctx.log(z)
123
+ s -= 3*z**2/4
124
+ z2k = z2 = z**2
125
+ for k in xrange(1, N+1):
126
+ t = ctx.bernoulli(2*k+2) / (4*k*(k+1)*z2k)
127
+ if abs(t) < ctx.eps:
128
+ #print k, N # check how many terms were needed
129
+ break
130
+ z2k *= z2
131
+ s += t
132
+ #if k == N:
133
+ # print "warning: series for barnesg failed to converge", ctx.dps
134
+ return G*ctx.exp(s)
135
+
136
+ @defun
137
+ def superfac(ctx, z):
138
+ return ctx.barnesg(z+2)
139
+
140
+ @defun_wrapped
141
+ def hyperfac(ctx, z):
142
+ # XXX: estimate needed extra bits accurately
143
+ if z == ctx.inf:
144
+ return z
145
+ if abs(z) > 5:
146
+ extra = 4*int(ctx.log(abs(z),2))
147
+ else:
148
+ extra = 0
149
+ ctx.prec += extra
150
+ if not ctx._im(z) and ctx._re(z) < 0 and ctx.isint(ctx._re(z)):
151
+ n = int(ctx.re(z))
152
+ h = ctx.hyperfac(-n-1)
153
+ if ((n+1)//2) & 1:
154
+ h = -h
155
+ if ctx._is_complex_type(z):
156
+ return h + 0j
157
+ return h
158
+ zp1 = z+1
159
+ # Wrong branch cut
160
+ #v = ctx.gamma(zp1)**z
161
+ #ctx.prec -= extra
162
+ #return v / ctx.barnesg(zp1)
163
+ v = ctx.exp(z*ctx.loggamma(zp1))
164
+ ctx.prec -= extra
165
+ return v / ctx.barnesg(zp1)
166
+
167
+ '''
168
+ @defun
169
+ def psi0(ctx, z):
170
+ """Shortcut for psi(0,z) (the digamma function)"""
171
+ return ctx.psi(0, z)
172
+
173
+ @defun
174
+ def psi1(ctx, z):
175
+ """Shortcut for psi(1,z) (the trigamma function)"""
176
+ return ctx.psi(1, z)
177
+
178
+ @defun
179
+ def psi2(ctx, z):
180
+ """Shortcut for psi(2,z) (the tetragamma function)"""
181
+ return ctx.psi(2, z)
182
+
183
+ @defun
184
+ def psi3(ctx, z):
185
+ """Shortcut for psi(3,z) (the pentagamma function)"""
186
+ return ctx.psi(3, z)
187
+ '''