| from ..libmp.backend import xrange |
| from .calculus import defun |
|
|
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
|
|
| |
| def chebcoeff(ctx,f,a,b,j,N): |
| s = ctx.mpf(0) |
| h = ctx.mpf(0.5) |
| for k in range(1, N+1): |
| t = ctx.cospi((k-h)/N) |
| s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N) |
| return 2*s/N |
|
|
| |
| def chebT(ctx, a=1, b=0): |
| Tb = [1] |
| yield Tb |
| Ta = [b, a] |
| while 1: |
| yield Ta |
| |
| Tmp = [0] + [2*a*t for t in Ta] |
| for i, c in enumerate(Ta): Tmp[i] += 2*b*c |
| for i, c in enumerate(Tb): Tmp[i] -= c |
| Ta, Tb = Tmp, Ta |
|
|
| @defun |
| def chebyfit(ctx, f, interval, N, error=False): |
| r""" |
| Computes a polynomial of degree `N-1` that approximates the |
| given function `f` on the interval `[a, b]`. With ``error=True``, |
| :func:`~mpmath.chebyfit` also returns an accurate estimate of the |
| maximum absolute error; that is, the maximum value of |
| `|f(x) - P(x)|` for `x \in [a, b]`. |
| |
| :func:`~mpmath.chebyfit` uses the Chebyshev approximation formula, |
| which gives a nearly optimal solution: that is, the maximum |
| error of the approximating polynomial is very close to |
| the smallest possible for any polynomial of the same degree. |
| |
| Chebyshev approximation is very useful if one needs repeated |
| evaluation of an expensive function, such as function defined |
| implicitly by an integral or a differential equation. (For |
| example, it could be used to turn a slow mpmath function |
| into a fast machine-precision version of the same.) |
| |
| **Examples** |
| |
| Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation |
| of `f(x) = \cos(x)`, valid on the interval `[1, 2]`:: |
| |
| >>> from mpmath import * |
| >>> mp.dps = 15; mp.pretty = True |
| >>> poly, err = chebyfit(cos, [1, 2], 5, error=True) |
| >>> nprint(poly) |
| [0.00291682, 0.146166, -0.732491, 0.174141, 0.949553] |
| >>> nprint(err, 12) |
| 1.61351758081e-5 |
| |
| The polynomial can be evaluated using ``polyval``:: |
| |
| >>> nprint(polyval(poly, 1.6), 12) |
| -0.0291858904138 |
| >>> nprint(cos(1.6), 12) |
| -0.0291995223013 |
| |
| Sampling the true error at 1000 points shows that the error |
| estimate generated by ``chebyfit`` is remarkably good:: |
| |
| >>> error = lambda x: abs(cos(x) - polyval(poly, x)) |
| >>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12) |
| 1.61349954245e-5 |
| |
| **Choice of degree** |
| |
| The degree `N` can be set arbitrarily high, to obtain an |
| arbitrarily good approximation. As a rule of thumb, an |
| `N`-term Chebyshev approximation is good to `N/(b-a)` decimal |
| places on a unit interval (although this depends on how |
| well-behaved `f` is). The cost grows accordingly: ``chebyfit`` |
| evaluates the function `(N^2)/2` times to compute the |
| coefficients and an additional `N` times to estimate the error. |
| |
| **Possible issues** |
| |
| One should be careful to use a sufficiently high working |
| precision both when calling ``chebyfit`` and when evaluating |
| the resulting polynomial, as the polynomial is sometimes |
| ill-conditioned. It is for example difficult to reach |
| 15-digit accuracy when evaluating the polynomial using |
| machine precision floats, no matter the theoretical |
| accuracy of the polynomial. (The option to return the |
| coefficients in Chebyshev form should be made available |
| in the future.) |
| |
| It is important to note the Chebyshev approximation works |
| poorly if `f` is not smooth. A function containing singularities, |
| rapid oscillation, etc can be approximated more effectively by |
| multiplying it by a weight function that cancels out the |
| nonsmooth features, or by dividing the interval into several |
| segments. |
| """ |
| a, b = ctx._as_points(interval) |
| orig = ctx.prec |
| try: |
| ctx.prec = orig + int(N**0.5) + 20 |
| c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)] |
| d = [ctx.zero] * N |
| d[0] = -c[0]/2 |
| h = ctx.mpf(0.5) |
| T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a)) |
| for (k, Tk) in zip(range(N), T): |
| for i in range(len(Tk)): |
| d[i] += c[k]*Tk[i] |
| d = d[::-1] |
| |
| err = ctx.zero |
| for k in range(N): |
| x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h |
| err = max(err, abs(f(x) - ctx.polyval(d, x))) |
| finally: |
| ctx.prec = orig |
| if error: |
| return d, +err |
| else: |
| return d |
|
|
| @defun |
| def fourier(ctx, f, interval, N): |
| r""" |
| Computes the Fourier series of degree `N` of the given function |
| on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns |
| two lists `(c, s)` of coefficients (the cosine series and sine |
| series, respectively), such that |
| |
| .. math :: |
| |
| f(x) \sim \sum_{k=0}^N |
| c_k \cos(k m x) + s_k \sin(k m x) |
| |
| where `m = 2 \pi / (b-a)`. |
| |
| Note that many texts define the first coefficient as `2 c_0` instead |
| of `c_0`. The easiest way to evaluate the computed series correctly |
| is to pass it to :func:`~mpmath.fourierval`. |
| |
| **Examples** |
| |
| The function `f(x) = x` has a simple Fourier series on the standard |
| interval `[-\pi, \pi]`. The cosine coefficients are all zero (because |
| the function has odd symmetry), and the sine coefficients are |
| rational numbers:: |
| |
| >>> from mpmath import * |
| >>> mp.dps = 15; mp.pretty = True |
| >>> c, s = fourier(lambda x: x, [-pi, pi], 5) |
| >>> nprint(c) |
| [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] |
| >>> nprint(s) |
| [0.0, 2.0, -1.0, 0.666667, -0.5, 0.4] |
| |
| This computes a Fourier series of a nonsymmetric function on |
| a nonstandard interval:: |
| |
| >>> I = [-1, 1.5] |
| >>> f = lambda x: x**2 - 4*x + 1 |
| >>> cs = fourier(f, I, 4) |
| >>> nprint(cs[0]) |
| [0.583333, 1.12479, -1.27552, 0.904708, -0.441296] |
| >>> nprint(cs[1]) |
| [0.0, -2.6255, 0.580905, 0.219974, -0.540057] |
| |
| It is instructive to plot a function along with its truncated |
| Fourier series:: |
| |
| >>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP |
| |
| Fourier series generally converge slowly (and may not converge |
| pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier |
| series gives an `L^2` error corresponding to 2-digit accuracy:: |
| |
| >>> I = [-1, 1] |
| >>> cs = fourier(cosh, I, 9) |
| >>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2 |
| >>> nprint(sqrt(quad(g, I))) |
| 0.00467963 |
| |
| :func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions, |
| the accuracy (and speed) can be improved by including all singular |
| points in the interval specification:: |
| |
| >>> nprint(fourier(abs, [-1, 1], 0), 10) |
| ([0.5000441648], [0.0]) |
| >>> nprint(fourier(abs, [-1, 0, 1], 0), 10) |
| ([0.5], [0.0]) |
| |
| """ |
| interval = ctx._as_points(interval) |
| a = interval[0] |
| b = interval[-1] |
| L = b-a |
| cos_series = [] |
| sin_series = [] |
| cutoff = ctx.eps*10 |
| for n in xrange(N+1): |
| m = 2*n*ctx.pi/L |
| an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L |
| bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L |
| if n == 0: |
| an /= 2 |
| if abs(an) < cutoff: an = ctx.zero |
| if abs(bn) < cutoff: bn = ctx.zero |
| cos_series.append(an) |
| sin_series.append(bn) |
| return cos_series, sin_series |
|
|
| @defun |
| def fourierval(ctx, series, interval, x): |
| """ |
| Evaluates a Fourier series (in the format computed by |
| by :func:`~mpmath.fourier` for the given interval) at the point `x`. |
| |
| The series should be a pair `(c, s)` where `c` is the |
| cosine series and `s` is the sine series. The two lists |
| need not have the same length. |
| """ |
| cs, ss = series |
| ab = ctx._as_points(interval) |
| a = interval[0] |
| b = interval[-1] |
| m = 2*ctx.pi/(ab[-1]-ab[0]) |
| s = ctx.zero |
| s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n]) |
| s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n]) |
| return s |
|
|