%%-----------------------------------------------------------------------%%
%%--- Sequences and Series ----------------------------------------------%%

\chapter{Sequences and Series}

Our main goal in this chapter is to gain a working knowledge of power
series and Taylor series of functions, with just enough discussion of
the details of convergence to get by.


%%-----------------------------------------------------------------------%%
%%--- Sequences ---------------------------------------------------------%%

\section{Sequences}
\index{sequence}

What is the limit of
\[
\lim_{n \to \infty} \frac{1}{2^n}?
\]
You may have encountered sequences long ago in earlier courses and
they seemed very difficult. You know much more mathematics now, so
they will probably seem easier. On the other hand, we are going to go
very quickly.

%\forclass{
%{\em We will completely skip several topics from Chapter 11.
%I will try to make what we skip clear.  Note that the homework
%has been modified to reflect the omitted topics.}
%}

A \emph{sequence} is an ordered list of numbers. These numbers may be
real, complex, etc., but in this book we will focus entirely on
sequences of real numbers. For example, here is a sequence of real
numbers:
\[
\frac{1}{2},\,
\frac{1}{4},\,
\frac{1}{8},\,
\frac{1}{16},\,
\frac{1}{32},\,
\frac{1}{64},\,
\frac{1}{128}, \ldots, \frac{1}{2^n},\dots
\]
Since the sequence is ordered, we can view it as a function whose
domain is the natural numbers $1,2,3,\dots$.

\begin{definition}
\textbf{Sequence.}
A \emph{sequence} $\{ a_n \}$ is a function $a : \nnn \to \rrr$
that takes a natural number $n$ to $a_n = a(n)$. The number $a_n$ is
the \emph{$n$-th term}.
\end{definition}

For example,
\[
a(n) = a_n = \frac{1}{2^n}
\]
which we write as $\{ 1/2^n \}$. Here is another example:
\[
\seq{b_n}{n}
=
\seq{\frac{n}{n + 1}} {n}
=
\frac{1}{2},\, \frac{2}{3},\, \frac{3}{4}, \dots
\]

\begin{example}
The Fibonacci sequence $\seq{F_n}{n}$ is defined recursively as
follows:
\[
F_1 = 1,\quad
F_2 = 1,\quad
F_n = F_{n-2} + F_{n-1} \quad\text{for $n \geq 3$}.
\]
\end{example}

Let's return to the sequence $\seq{ 1/2^n }{n}$. We write
$\lim_{n \to \infty} 1/2^n = 0$, since the terms get arbitrarily
small.

\begin{definition}
\textbf{Limit of sequence.}
\index{sequence!limit of}
If $\seq{a_n}{n}$ is a sequence, then it \emph{converges} to $L$,
written $\lim_{n \to \infty} a_n = L$, if $a_n$ gets arbitrarily close
to $L$ as $n$ gets sufficiently large.
\emph{\sc Secret rigorous definition:} For every $\varepsilon > 0$,
there exists $B$ such that for $n \geq B$ we have
$|a_n - L| < \varepsilon$.
\end{definition}

This is exactly like in differential calculus when we considered
limits of functions. If $f(x)$ is a function, the meaning of
$\lim_{x \to \infty} f(x) = L$ is essentially the same. Moreover, we
have the following fact.

\begin{proposition}
\label{prop:limserfun}
If $f$ is a function with $\lim_{x \to \infty} f(x) = L$ and
$\seq{a_n}{n}$ is a sequence given by $a_n = f(n)$, then
$\lim_{n \to \infty} a_n = L$.
\end{proposition}

As a corollary, note that this implies that all the facts about limits
that you know from functions also apply to sequences! For example,
\[
\lim_{n \to \infty} \frac{n}{n + 1}
=
\lim_{x \to \infty} \frac{x}{x + 1}
=
1.
\]

\begin{example}
The converse of Proposition~\ref{prop:limserfun} is false
\emph{in general}, i.e. knowing the limit of the sequence converges
does not imply that the limit of the function converges. We have
$\lim_{n \to \infty} \cos(2\pi n) = 1$, but
$\lim_{x \to \infty} \cos(2\pi x)$ diverges. The converse is OK if the
limit involving the function converges.
\end{example}

\begin{practice}
Compute $\ds \lim_{n \to \infty} \frac{n^3 + n + 5}{17n^3 - 2006n + 15}$.
%% {\em Answer: $\frac{1}{17}$.}
\end{practice}


%%-----------------------------------------------------------------------%%
%%--- Series ------------------------------------------------------------%%

\section{Series}
\label{sec:series}
\index{series}

What is the sum of the following expressions?
%
\begin{align*}
\frac{1}{2} + \frac{1}{4} + \frac{1}{8} + \frac{1}{16} + \frac{1}{32} +
\cdots \\[4pt]
\frac{1}{3} + \frac{1}{9} + \frac{1}{27} + \frac{1}{81} + \frac{1}{243} +
\cdots \\[4pt]
\frac{1}{1} + \frac{1}{4} + \frac{1}{9} + \frac{1}{16} + \frac{1}{25} +
\cdots
\end{align*}
%
Consider the following sequence of partial sums:
\[
a_N
=
\sum_{n=1}^N \frac{1}{2^n}
=
\frac{1}{2} + \frac{1}{4} + \cdots + \frac{1}{2^N}.
\]
Can we compute
\[
\sum_{n=1}^{\infty} \frac{1}{2^n}?
\]
These partial sums look as follows:
\[
a_1 = \frac{1}{2},\qquad
a_2 = \frac{3}{4},\qquad
a_{10} = \frac{1023}{1024},\qquad
a_{20} = \frac{1048575}{1048576}.
\]
It looks very likely that $\sum_{n=1}^{\infty} 1/2^n = 1$, if
it makes any sense. But does it? In a moment we will \emph{define}
\[
\sum_{n=1}^{\infty} \frac{1}{2^n}
=
\lim_{N \to \infty} \sum_{n=1}^N \frac{1}{2^n}
=
\lim_{N \to \infty} a_N.
\]
A little later, we will show that $a_{N} = \frac{2^N - 1}{2^N}$, hence
indeed $\sum_{n=1}^{\infty} 1/2^n = 1$.

\begin{definition}
\textbf{Sum of series.}
\index{series!sum of}
If $\seq{a_n}{n}$ is a sequence, then the \emph{sum of the series} is
\[
\sum_{n=1}^{\infty} a_n
=
\lim_{N \to \infty} \sum_{n=1}^N a_n
%% =
%% \lim_{N \to \infty} s_N
\]
provided the limit exists. Otherwise we say that
$\sum_{n=1}^{\infty} a_n$ \emph{diverges}.
\end{definition}

\begin{example}
\label{ex:geoser}
\textbf{Geometric series.}
\index{series!geometric}
Consider the \emph{geometric series}
$\sum_{n=1}^{\infty} a r^{n-1}$ for $a \neq 0$. Show that
\[
\sum_{n=1}^N a r^{n - 1}
=
\frac{a(1 - r^N)}{1 - r}.
\]
\end{example}

\begin{proof}[Solution]
Multiply both sides by $1 - r$ and notice that all the terms on the
left-hand side middle cancel out. For what values of $r$ does
$\lim_{N \to \infty} \frac{a(1 - r^N)}{1 - r}$ converge? If
$|r| < 1$, then $\lim_{N \to \infty} r^N = 0$ and
\[
\lim_{N \to \infty} \frac{a(1 - r^N)}{1 - r}
=
\frac{a}{1 - r}.
\]
If $|r|> 1$, then $\lim_{N \to \infty} r^N$ diverges, so
$\sum_{n=1}^{\infty} a r^{n - 1}$ diverges. If $r = \pm 1$, then since
$a \neq 0$, so the series also diverges~(since the partial sums are
$s_N = \pm Na$).

For example, if $a = 1$ and $r = 1/2$, we get
\[
\sum_{n=1}^{\infty} a r^{n - 1}
=
\frac{1}{1 - 1/2}
\]
as claimed earlier.
\end{proof}


%%-----------------------------------------------------------------------%%
%%--- Integral and comparison tests -------------------------------------%%

\section{Integral and comparison tests}
\index{integral test}
\index{comparison test}

Do the following series converge?
\[
\sum_{n=1}^{\infty} \frac{1}{n^2}
\qquad\text{and}\qquad
\sum_{n=1}^{\infty} \frac{1}{n}
\]
Recall that Section~\ref{sec:series} began by asking for the sum of
several series.  We found the first two sums~(which were geometric
series) by finding an exact formula for the sum $s_N$ of the first $N$
terms. The third series was
%
\begin{equation}
\label{eqn:zeta2}
A
=
\sum_{n=1}^{\infty} \frac{1}{n^2}
=
\frac{1}{1} + \frac{1}{4} + \frac{1}{9} + \frac{1}{16} + \frac{1}{25} +
\cdots
\end{equation}
%
It can be difficult to find a nice formula for the sum of the first
$n$ terms of this series.

\begin{remark}
Sums of the form~(\ref{eqn:zeta2}) have important applications in
other areas of mathematics. In general, for any $s > 1$ one can
consider the sum
\[
\zeta(s)
=
\sum_{n=1}^{\infty} \frac{1}{n^s}.
\]
The number $A$ that we are interested in above is thus $\zeta(2)$. The
function $\zeta(s)$ is called the \emph{Riemann zeta function}.
\index{Riemann zeta function} There is a natural (but complicated) way
of extending $\zeta(s)$ to a (differentiable) function on all complex
numbers with a pole at $s = 1$. The
\emph{Riemann Hypothesis}\index{Riemann Hypothesis} asserts that if
$s$ is a complex number and $\zeta(s) = 0$, then either $s$ is an even
negative integer or $s = 1/2 + bi$ for some real number $b$. This is
probably \emph{the} most famous unsolved problem in mathematics
(e.g. it is one of the
Clay Mathematics Institute\index{Clay Mathematics Institute} million
dollar prize problems). Another famous open problem is to show that
$\zeta(3)$ is not a root of any polynomial with integer coefficients
(it is a theorem of Ape{\'e}ry\index{Ape{\'e}ry's theorem} that
$zeta(3)$ is not a fraction).

The function $\zeta(s)$ is incredibly important in mathematics
because it governs the properties of prime numbers. The
\emph{Euler product} representation of $\zeta(s)$ gives a hint as to
why this is the case:
\[
\zeta(s)
=
\sum_{n=1}^{\infty} \frac{1}{n^s}
=
\prod_{\text{\emph{primes} $p$}} \left( \frac{1}{1 - {p^{-s}}} \right).
\]
To see that this product equality holds when $s$ is real with
${\rm Re}(s) > 1$, use Example~\ref{ex:geoser} with $r = p^{-s}$ and
$a = 1$ above. We have
\[
\frac{1}{1 - p^{-s}}
=
1 + p^{-s} + p^{-2s} + \cdots
\]
Thus
%
\begin{align*}
\prod_{\text{\emph{primes} $p$}} \left( \frac{1}{1 - {p^{-s}}} \right)
&=
\prod_{\text{\emph{primes} $p$}} \left(
1 + \frac{1}{p^{s}} + \frac{1}{p^{2s}} + \cdots \right) \\
&=
\left( 1 + \frac{1}{2^{s}} + \frac{1}{2^{2s}} + \cdots \right) \cdot
\left( 1 + \frac{1}{3^{s}} + \frac{1}{3^{2s}} + \cdots \right) \cdots \\
&=
\left( 1 + \frac{1}{2^s} + \frac{1}{3^s} + \frac{1}{4^s} + \cdots \right) \\
&=
\sum_{n=1}^{\infty} \frac{1}{n^s}
\end{align*}
%
where the last line uses the distributive law and the property that
integers factor uniquely as a product of primes.
%Finally, Figure~\ref{fig:zetareal} is a graph $\zeta(x)$ as a
%function of a real variable $x$, and Figure~\ref{fig:zetaabs} is a graph
%% of $|\zeta(s)|$ for complex $s$.
%\fig{Riemann Zeta Function: $f(x)=\sum_{n=1}^{\infty} \frac{1}{n^x}$\label{fig:zetareal}}{real_zeta}
%\fig{Absolute Value of Riemann Zeta Function\label{fig:zetaabs}}{abs_zeta}
\end{remark}

This section is about how to leverage what you have learned so far in
this book to say something about sums that are hard~(or even
``impossibly difficult'') to evaluate exactly. For example, notice~(by
considering a graph of a step function) that if $f(x) = 1/x^2$, then
for positive integer $t$ we have
\[
\sum_{n=1}^t \frac{1}{n^2}
\leq
\frac{1}{1^2} + \int_{1}^{t} \frac{1}{x^2} \, dx.
\]
Thus
%
\begin{align*}
\sum_{n=1}^\infty \frac{1}{n^2}
&\leq
\frac{1}{1^2} + \int_{1}^{\infty} \frac{1}{x^2} \, dx \\
&=
1 + \lim_{t \to \infty} \int_1^t \frac{1}{x^2} \, dx \\
&=
1 + \lim_{t \to \infty} \left[ -\frac{1}{x} \right]_1^t \\
&=
1 + \lim_{t \to \infty} \left[ -\frac{1}{t} + \frac{1}{1} \right] \\
&=
2.
\end{align*}
%
We conclude that $\sum_{n=1}^{\infty}$ converges, since the sequence
of partial sums is getting bigger and bigger, but is always $\leq 2$.
And of course we also know something about $\sum_{n=1}^\infty 1/n^2$
even though we do not know the exact value, i.e.
$\sum_{n=1}^\infty 1/n^2 \leq 2$. Using a computer, we find that
%
\begin{center}
\begin{tabular}{|l|l|}                         \hline
$t$      & $\sum_{n=1}^t 1/n^2$              \\\hline\hline
$1$      & $1$                               \\
$2$      & $5/4 = 1.25$                      \\
$5$      & $5269/3600 = 1.4636\overline{1}$  \\
$10$     & $1968329/1270080 = 1.54976773117$ \\
$100$    & $1.63498390018$                   \\
$1000$   & $1.64393456668$                   \\
$10000$  & $1.64483407185$                   \\
$100000$ & $1.6449240669$                    \\\hline
\end{tabular}
\end{center}
%
The table is consistent with the fact that $\sum_{n=1}^{\infty} 1/n^2$
converges to a number $\leq 2$. In fact Euler was the first to compute
$\sum_{n=1}^{\infty} 1/n^2$ exactly; he found that the exact value is
\[
\frac{\pi^2}{6}
=
1.644934066848226436472415166646025189218949901206798437735557\dots
\]
There are many proofs of this fact, but they do not belong in this
book. You can find them on the Internet and are likely to see one if
you take more math classes. We next consider the \emph{harmonic series}
\index{series!harmonic}
%
\begin{equation}
\label{eqn:harmonic}
\sum_{n=1}^{\infty} \frac{1}{n}.
\end{equation}
%
Does it converge? Again by inspecting a graph and viewing an infinite
sum as the area under a step function, we have
%
\begin{align*}
\sum_{n=1}^{\infty} \frac{1}{n}
&\geq
\int_1^{\infty} \frac{1}{x} \, dx \\
&=
\lim_{t \to \infty} \Bigl[ \ln(x) \Bigr]_1^t \\
&=
\lim_{t \to \infty} \ln(t) - 0 \\
&=
+\infty.
\end{align*}
%
Thus the infinite sum~(\ref{eqn:harmonic}) must also diverge.

We formalize the above two examples as a general test for convergence
or divergence of an infinite sum.

\begin{theorem}
\label{thm:inttest}
\textbf{Integral test and bound.}
\index{integral test}
Suppose $f(x)$ is a continuous, positive, decreasing function on
$[1, \infty)$ and let $a_n = f(n)$ for integers $n \geq 1$. Then the
series $\sum_{n=1}^{\infty} a_n$ converges if and only if the integral
$\int_1^{\infty} f(x) \, dx$ converges. More generally, for any
positive integer $k$,
%
\begin{equation}
\label{eqn:inttest}
\int_{k}^{\infty} f(x) \, dx
\quad\leq\quad
\sum_{n=k}^{\infty} a_n
\quad\leq\quad
a_k + \int_k^{\infty} f(x) \, dx.
\end{equation}
\end{theorem}

The theorem means that you can determine convergence of an infinite
series by determining convergence of a corresponding integral. Thus
you can apply the powerful tools you know already for integrals to
understanding infinite sums. Also, you can use integration along with
computation of the first few terms of a series to approximate a series
very precisely.

Sometimes the first few terms of a series are ``funny'' or the series
does not even start at $n = 1$, e.g.
\[
\sum_{n=4}^{\infty} \frac{1}{(n - 3)^3}.
\]
In this case, use~(\ref{eqn:inttest}) with any specific $k > 1$.

\begin{proposition}
\textbf{Comparison test.}
\index{comparison test}
Suppose $\sum a_n$ and $\sum b_n$ are two series with positive terms.
If $\sum b_n$ converges and $a_n \leq b_n$ for all $n$, then
$\sum a_n$ converges. Likewise, if $\sum b_n$ diverges and
$a_n \geq b_n$ for all $n$, then $\sum a_n$ must also diverge.
\end{proposition}

\begin{example}
Does the series
\[
\sum_{n=1}^{\infty} \frac{1}{\sqrt{n}}
\]
converge?
\end{example}

\begin{proof}[Solution]
We have
\[
\sum_{n=1}^{\infty} \frac{1}{\sqrt{n}}
\geq
\int_1^{\infty} \frac{1}{\sqrt{x}} \, dx
=
\lim_{t \to \infty} (2\sqrt{t} - 2\sqrt{1})
=
+\infty
\]
so the series in question does not converge.
\end{proof}

\begin{example}
Does the series
\[
\sum_{n=1}^{\infty} \frac{1}{n^2 + 1}
\]
converge?
\end{example}

\begin{proof}[Solution]
Let's apply the comparison test. We have $1 / (n^2+1) < 1/n^2$ for
every $n$, so
\[
\sum_{n=1}^{\infty} \frac{1}{n^2 + 1}
<
\sum_{n=1}^{\infty} \frac{1}{n^2}.
\]
Alternatively, we can use the integral test, which also gives as a
bonus upper and lower bounds on the sum. Let $f(x) = 1/(1 + x^2)$. We
have
%
\begin{align*}
\int_1^{\infty} \frac{1}{1 + x^2} \, dx
&=
\lim_{t \to \infty} \int_1^t \frac{1}{1 + x^2} \, dx \\
&=
\lim_{t \to \infty} \tan^{-1}(t) - \frac{\pi}{4} \\
&=
\frac{\pi}{2} - \frac{\pi}{4} \\
&=
\frac{\pi}{4}
\end{align*}
%
Thus the sum converges. Moreover, taking $k = 1$ in
Theorem~\ref{thm:inttest} we have
\[
\frac{\pi}{4}
\leq
\sum_{n=1}^{\infty} \frac{1}{n^2 + 1}
\leq
\frac{1}{2} + \frac{\pi}{4}.
\]
The actual sum is $1.07\dots$, which is much different from
$\sum 1/n^2 = 1.64\dots$.
\end{proof}

The following proposition can be proved using methods similar to those
illustrated in the examples above.
%Note that this is nicely illustrated
%in Figure~\ref{fig:zetareal}.

\begin{proposition}
The series $\sum_{n=1}^{\infty} 1/n^p$ is convergent if $p > 1$ and
divergent if $p \leq 1$.
\end{proposition}


\subsection{Estimating the sum of a series}
\index{series!estimating sum of}

Suppose $\sum a_n$ is a convergent sequence of positive integers. Let
\[
R_m
=
\sum_{n=1}^{\infty} a_n - \sum_{n=1}^{m} a_n
=
\sum_{n=m+1}^{\infty} a_m
\]
which is the error if you approximate $\sum a_n$ using the first $n$
terms. From Theorem~\ref{thm:inttest} we get the following.
%
\begin{proposition}
\textbf{Remainder bound.}
\index{remainder bound}
Suppose $f$ is a continuous, positive, decreasing function on
$[m, \infty)$ and $\sum a_n$ is convergent. Then
\[
\int_{m+1}^{\infty} f(x) \, dx
\leq
R_m
\leq
\int_m^{\infty} f(x) \, dx.
\]
\end{proposition}

\begin{proof}
In Theorem~\ref{thm:inttest}, set $k = m + 1$. That results in
\[
\int_{m+1}^{\infty} f(x) \, dx
\quad\leq\quad
\sum_{n=m+1}^{\infty} a_n
\quad\leq\quad
a_{m+1} + \int_{m+1}^{\infty} f(x) \, dx.
\]
But
\[
a_{m+1} + \int_{m+1}^{\infty} f(x) \, dx
\leq
\int_m^{\infty} f(x) \, dx
\]
since $f$ is decreasing and $f(m+1) = a_{m+1}$.
\end{proof}

\begin{example}
Estimate the series
\[
\zeta(3)
=
\sum_{n=1}^{\infty} \frac{1}{n^3}
\]
using its first 10 terms.
\end{example}

\begin{proof}[Solution]
We have
\[
\sum_{n=1}^{10}
=
\frac{19164113947}{16003008000}
=
1.197531985674193\dots
\]
The proposition above with $m = 10$ tells us that
\[
0.00413223140495867\dots
=
\int_{11}^{\infty} \frac{1}{x^3} \, dx
\leq
\zeta(3) - \sum_{n=1}^{10}
\leq
\int_{10}^{\infty} \frac{1}{x^3} \, dx
=
\frac{1}{2 \cdot 10^2}
=
\frac{1}{200}
=
0.005.
\]
In fact,
\[
\zeta(3)
=
1.202056903159594285399738161511449990\dots
\]
and we have
\[
\zeta(3) - \sum_{n=1}^{10}
=
0.0045249174854010\dots
\]
so the integral error bound was really good in this case.
\end{proof}

\begin{example}
Determine if the series
\[
\sum_{n=1}^{\infty} \frac{2006}{117n^2 + 41n + 3}
\]
converges or diverges.
\end{example}

\begin{proof}[Solution]
It converges, since
\[
\frac{2006}{117n^2 + 41n + 3}
\leq
\frac{2006}{117n^2}
=
\frac{2006}{117} \cdot \frac{1}{n^2}
\]
and $\sum 1/n^2$ converges.
\end{proof}


%%-----------------------------------------------------------------------%%
%%--- Tests for convergence ---------------------------------------------%%

\section{Tests for convergence}


%%-----------------------------------------------------------------------%%
%%--- The comparison test -----------------------------------------------%%

\subsection{The comparison test}

\begin{theorem}
\label{thm:compare}
\textbf{The comparison test.}
\index{comparison test}
Suppose $\sum a_n$ and $\sum b_n$ are series with all $a_n$
and $b_n$ positive, and $a_n \leq b_n$ for each $n$.
%
\begin{enumerate}
\item If $\sum b_n$ converges, then so does $\sum a_n$.

\item If $\sum a_n$ diverges, then so does $\sum b_n$.
\end{enumerate}
\end{theorem}

\begin{proof}[Proof sketch]
The condition of the theorem implies that for
any $k$,
\[
\sum_{n=1}^{k} a_n
\leq
\sum_{n=1}^k b_n
\]
from which each claim follows.
\end{proof}

\begin{example}
Does the series
\[
\sum_{n=1}^{\infty} \frac{7}{3n^2 + 2n}
\]
converge or diverge?
\end{example}

\begin{proof}[Solution]
For each $n$ we have
\[
\frac{7}{3n^2 + 2n}
\leq
\frac{7}{3} \cdot \frac{1}{n^2}.
\]
Since $\sum_{n=1}^{\infty} 1/n^2$ converges, Theorem~\ref{thm:compare}
implies that $\sum_{n=1}^{\infty} 7 / (3n^2 + 2n)$ also converges.
\end{proof}

\begin{example}
Does the series
\[
\sum_{n=1}^{\infty} \frac{\ln(n)}{n}
\]
converge or diverge?
\end{example}

\begin{proof}[Solution]
It diverges since for each $n \geq 3$ we have
\[
 \frac{\ln(n)}{n}  \geq \frac{1}{n}
\]
and $\sum_{n=3}^{\infty} 1/n$ diverges.
\end{proof}


%%-----------------------------------------------------------------------%%
%%--- Absolute and conditional convergence ------------------------------%%

\subsection{Absolute and conditional convergence}

\begin{definition}
\textbf{Absolute convergence.}
\index{absolute convergence}
We say that $\sum_{n=1}^{\infty} a_n$ \emph{converges absolutely}
if $\sum_{n=1}^{\infty} |a_n|$ converges.
\end{definition}

For example,
\[
\sum_{n=1}^{\infty} (-1)^n \frac{1}{n}
\]
converges, but does \emph{not} converge absolutely (it converges
``conditionally'', though we will not explain why in this book).


%%-----------------------------------------------------------------------%%
%%--- The ratio test ----------------------------------------------------%%

\subsection{The ratio test}

Recall that $\sum_{n=1}^{\infty} a_n$ is a geometric series if and
only if $a_n = a r^{n-1}$ for some fixed~$a$ and~$r$. Here, we
call~$r$ the \emph{common ratio}.\index{common ratio} Notice that the
ratio of any two successive terms is $r$:
\[
\frac{a_{n+1}}{a_n}
=
\frac{a r^{n}}{a r^{n-1}}
=
r.
\]
Moreover, $\sum_{n=1}^{\infty} a r^{n-1}$ converges (to
$\frac{a}{1-r}$) if and only if $|r| < 1$ (and, of course it diverges
if $|r| \geq 1$). For example,
\[
\sum_{n=1}^{\infty} 3 \left( \frac{2}{3} \right)^{n-1}
\]
converges to $3 / (1 - 2/3) = 9$. However,
\[
\sum_{n=1}^{\infty} 3 \left( \frac{3}{2} \right)^{n-1}
\]
diverges.

\begin{theorem}
\label{thm:ratiotest}
\textbf{Ratio test.}
\index{ratio test}
Consider the sum $\sum_{n=1}^{\infty} a_n$.
%
\begin{enumerate}
\item If $\lim_{n \to \infty} \left| \frac{a_{n+1}}{a_n} \right| = L < 1$,
  then $\sum_{n=1}^{\infty} a_n$ is absolutely convergent.

\item If $\lim_{n \to \infty} \left| \frac{a_{n+1}}{a_n} \right| = L > 1$,
  then $\sum_{n=1}^{\infty} a_n$ diverges.

\item If $\lim_{n \to \infty} \left| \frac{a_{n+1}}{a_n} \right| = L = 1$,
  then we may conclude nothing from this.
\end{enumerate}
\end{theorem}

\begin{proof}
We will only prove part~1. Assume that we have
\[
\lim_{n \to \infty} \left| \frac{a_{n+1}}{a_n} \right|
=
L < 1.
\]
Let $r = \frac{L+1}{2}$ and notice that $L < r < 1$ (since
$0 \leq L < 1$, then $1 \leq L + 1 < 2$ so $1/2 \leq r < 1$, and also
$r - L = (L+1) / 2 - L = (1-L) / 2 > 0$). Since
\[
\lim_{n \to \infty} \left| \frac{a_{n+1}}{a_n} \right|
=
L
\]
there is an~$N$ such that for all $n > N$ we have
\[
\left| \frac{a_{n+1}}{a_n} \right| < r,
\quad\text{so}\quad
|a_{n+1}| < |a_n| \cdot r.
\]
Then we have
\[
\sum_{n=N+1}^{\infty} |a_n|
<
|a_{N+1}| \cdot \sum_{n=0}^{\infty} r^n.
\]
Here, the common ratio for the second one is $r < 1$, thus the
right-hand series converges, so the left-hand series converges as
well.
\end{proof}

\begin{example}
Does the series
\[
\sum_{n=1}^{\infty} \frac{(-10)^n}{n!}
\]
converge or diverge?
\end{example}

\begin{proof}[Solution]
The ratio of successive terms is
\[
\left| \frac{(-10)^{n+1} / (n+1)!} {(-10)^n / n!} \right|
=
\frac{10^{n+1}}{(n+1)n!} \cdot \frac{n!}{10^n}
=
\frac{10}{n+1} \longrightarrow 0 < 1.
\]
Thus this series converges \emph{absolutely}. Note, the minus sign is
missing above since in the ratio test we take the limit of the
absolute values.
\end{proof}

\begin{example}
\label{ex:27n}
Does the series
\[
\sum_{n=1}^{\infty} \frac{n^n}{3^{1+3n}}
\]
converge or diverge?
\end{example}

\begin{proof}[Solution]
We have
\[
\left| \frac{(n+1)^{n+1} / (3\cdot(27)^{n+1})} {n^n / 3^{1+3n} } \right|
=
\frac{(n+1)(n+1)^n}{27 \cdot 27^n} \cdot \frac{27^n}{n^n}
=
\frac{n+1}{27} \cdot \left( \frac{n+1}{n} \right)^n
\longrightarrow
+\infty.
\]
Thus our series diverges. Here, we use the fact that
$\left( \frac{n+1}{n} \right)^n \to e$.
\end{proof}

\begin{example}
Does the series
\[
\sum_{n=1}^{\infty} \frac{1}{n}
\]
converge or diverge?
\end{example}

\begin{proof}[Solution]
We have
\[
\lim_{n \to \infty} \left| \frac{1/(n+1)}{1/n} \right|
=
\frac{1}{n+1} \cdot \frac{n}{1}
=
\frac{n}{n+1}
\longrightarrow 1.
\]
This tells us nothing. If this happens, do something else, e.g. use
the integral test.
\end{proof}


%%-----------------------------------------------------------------------%%
%%--- The root test -----------------------------------------------------%%

\subsection{The root test}

Since $e$ and $\ln$ are inverses of each other, we have
$x = e^{\ln(x)}$.  This implies the very useful fact that
\[
x^a
=
e^{\ln(x^a)}
=
e^{a\ln(x)}.
\]
As a sample application, notice that for any nonzero $c$,
\[
\lim_{n \to \infty} c^{\frac{1}{n}}
=
\lim_{n \to \infty} e^{\frac{1}{n} \log(c)}
=
e^0
=
1.
\]
Similarly,
\[
\lim_{n \to \infty} n^{\frac{1}{n}}
=
\lim_{n \to \infty} e^{\frac{1}{n} \log(n)}
=
e^0
=
1
\]
where we have used the fact $\lim_{n \to \infty} \frac{\log(n)}{n} = 0$,
which can be proved using L'Hopital's rule.

\begin{theorem}
\textbf{Root test.}
\index{root test}
Consider the sum $\sum_{n=1}^{\infty} a_n$.
%
\begin{enumerate}
\item If $\lim_{n \to \infty} |a_n|^{1/n} = L < 1$, then
  $\sum_{n=1}^{\infty} a_n$ converges absolutely.

\item If $\lim_{n \to \infty} |a_n|^{1/n} = L > 1$, then
  $\sum_{n=1}^{\infty} a_n$ diverges.

\item If $L = 1$, then we may conclude nothing from this.
\end{enumerate}
\end{theorem}

\begin{proof}
We apply the comparison test~(Theorem~\ref{thm:compare}). First,
suppose $\lim_{n \to \infty} |a_n|^{1/n} = L < 1$. Then there is an
$N$ such that for $n \geq N$ we have
$|a_n|^{1/n} < k < 1$. Thus for such $n$, we have $|a_n| < k^n < 1$.
The geometric series $\sum_{i=N}^{\infty} k^i$ converges, so
$\sum_{i=N}^{\infty} |a_n|$ converges as well by
Theorem~\ref{thm:compare}. If $|a_n|^{1/n} > 1$ for $n \geq N$, then
we see that $\sum_{i=N}^{\infty} |a_n|$ diverges by comparing with
$\sum_{i=N}^{\infty} 1$.
\end{proof}
%The proof is similar is spirt to that of
%Theorem~\ref{thm:ratiotest} and will be omitted.

\begin{example}
Use the root test to conclude whether or not the series
\[
\sum_{n=1}^{\infty} a r^{n-1}
=
\frac{a}{r} \sum_{n=1}^{\infty} r^n
\]
is convergent or divergent.
\end{example}

\begin{proof}[Solution]
We have
\[
\lim_{n \to \infty} |r^n|^{1/n}
=
|r|.
\]
Thus the root test tells us exactly what we already know about
convergence of the geometric series (except when $|r| = 1$).
\end{proof}

\begin{example}
Does the series
\[
\sum_{n=1}^{\infty} \left( \frac{n^2+1}{2n^2+1} \right)^n
\]
converge or diverge?
\end{example}

\begin{proof}[Solution]
We have
\[
\lim_{n \to \infty} \left|
\left( \frac{n^2+1}{2n^2+1} \right)^n \right|^{1/n}
=
\lim_{n \to \infty} \frac{n^2+1}{2n^2+1}
=
\lim_{n \to \infty} \frac{1 + 1/n^2}{2 + 1/n^2}
=
\frac{1}{2}.
\]
Thus the series converges.
\end{proof}

\begin{example}
Does the series
\[
\sum_{n=1}^{\infty} \left( \frac{2n^2+1}{n^2+1} \right)^n
\]
converge or diverge?
\end{example}

\begin{proof}[Solution]
We have
\[
\lim_{n \to \infty} \left|
\left( \frac{2n^2+1}{n^2+1} \right)^n \right|^{1/n}
=
\lim_{n \to \infty} \frac{2n^2+1}{n^2+1}
=
\lim_{n \to \infty} \frac{2 + 1/n^2}{1 + 1/n^2}
=
2
\]
hence the series diverges.
\end{proof}

\begin{example}
Does the series
\[
\sum_{n=1}^{\infty} \frac{1}{n}
\]
converge or diverge?
\end{example}

\begin{proof}[Solution]
We have
\[
\lim_{n \to \infty} \left| \frac{1}{n} \right|^{1/n}
=
1
\]
so we cannot say anything about the convergence or otherwise of the
series.
\end{proof}

\begin{example}
Does the series
\[
\sum_{n=1}^{\infty} \frac{n^n}{3 \cdot (27^n)}
\]
converge or diverge?
\end{example}

\begin{proof}[Solution]
To apply the root test, we compute
\[
\lim_{n \to \infty} \left| \frac{n^n}{3 \cdot (27^n)} \right|^{1/n}
=
\lim_{n \to \infty} \left( \frac{1}{3}\right)^{1/n} \cdot \frac{n}{27}
=
+\infty.
\]
Again, the limit diverges as in Example~\ref{ex:27n}.
\end{proof}


%%-----------------------------------------------------------------------%%
%%--- Power series ------------------------------------------------------%%

\section{Power series}

Recall that a \emph{polynomial}\index{polynomial} is a function of the
form
\[
f(x)
=
c_0 + c_1 x + c_2 x^2 + \cdots + c_k x^k.
\]
%% \begin{center}
%% \shadowbox{\Large Polynomials are easy!!!}
%% \end{center}
Polynomials are easy to integrate, differentiate, etc.:
%
\begin{align*}
\frac{d}{dx} \left( \sum_{n=0}^k c_n x^n \right)
&=
\sum_{n=1}^k n c_n x^{n-1} \\
\int \sum_{n=0}^k c_n x^n \, dx
&=
C + \sum_{n=0}^k c_n \frac{x^{n+1}}{n+1}.
\end{align*}

\begin{definition}
\textbf{Power series.}
\index{power series}
A \emph{power series} is a series of the form
\[
\sum_{n=0}^{\infty} c_n x^n
=
c_0 + c_1 x + c_2 x^2 + \cdots
\]
where $x$ is a variable and the $c_n$ are coefficients.
\end{definition}

A power series is a function of $x$ for those $x$ for which the series
converges.

\begin{example}
Consider
\[
f(x)
=
\sum_{n=0}^{\infty} x^n
=
1 + x + x^2 + \cdots.
\]
When $|x| < 1$, i.e. $-1 < x < 1$, we have
\[
f(x) = \frac{1}{1-x}.
\]
\end{example}

But what good could this possibly be?  Why is writing the simple
function $\frac{1}{1-x}$ as the complicated series
$\sum_{n=0}^{\infty} x^n$ of any value?
%
\begin{enumerate}
\item Power series are \emph{relatively easy to work with}. They are
  ``almost'' polynomials. For example,
\[
\frac{d}{dx} \sum_{n=0}^{\infty} x^n
=
\sum_{n=1}^{\infty} nx^{n-1}
=
1 + 2x + 3x^2 + \cdots
=
\sum_{m=0}^{\infty} (m+1)x^m
\]
where in the last step we ``re-indexed'' the series. Power series are
only ``almost'' polynomials, since they do not stop; they can go on
forever.  More precisely, a power series is a limit of
polynomials. But in many cases we can treat them like a polynomial. On
the other hand, notice that
\[
\frac{d}{dx} \left( \frac{1}{1-x} \right)
=
\frac{1}{(1-x)^2}
=
\sum_{m=0}^{\infty} (m+1)x^m.
\]

\item For many functions, a power series is the
  \emph{best explicit representation available}. Consider $J_0(x)$,
  the Bessel function\index{Bessel function} of order $0$. It arises
  as a solution to the differential equation
  $x^2 y'' + x y' + x^2 y = 0$ and has the following power series
  expansion:
  %
  \begin{align*}
    J_0(x)
    &=
    \sum_{n=1}^{\infty} \frac{(-1)^n x^{2n}} {2^{2n}(n!)^2} \\
    &=
    1 - \frac{1}{4}x^{2} + \frac{1}{64}x^{4} - \frac{1}{2304}x^{6} +
    \frac{1}{147456}x^{8} - \frac{1}{14745600}x^{10} + \cdots.
  \end{align*}
  %
  This series is nice since it converges for all $x$~(one can prove this
  using the ratio test). It is also one of the most explicit forms of
  $J_0(x)$.
%sage: j = sum([(-1)^n * x^(2*n)/(2^(2*n)*factorial(n)^2) for n in range(20)]) + O(x^21)
%sage: x^2 * j.derivative().derivative()  + x * j.derivative() + x^2 * j
% 0
\end{enumerate}


\subsection{Shift the origin}

It is often useful to shift the origin of a power series, i.e. consider
a power series expanded about a different point.

\begin{definition}
The series
\[
\sum_{n=0}^{\infty} c_n (x-a)^n
=
c_0 + c_1(x-a) + c_2(x-a)^2 + \cdots
\]
is called a \emph{power series centered at} $x = a$ or
``a power series about $x = a$.''
\end{definition}

For example, consider
%
\begin{align*}
\sum_{n=0}^{\infty} (x-3)^n
&=
1 + (x-3) + (x-3)^2 + \cdots \\
&=
\frac{1}{1 - (x-3)} \qquad\qquad\text{equality holds when $|x-3| < 1$} \\
&=
\frac{1}{4-x}.
\end{align*}
Here, conceptually we are treating $3$ like we treated $0$ before.

Power series can be written in different ways, which have different
advantages and disadvantages. For example,
%
\begin{align*}
\frac{1}{4-x}
&=
\frac{1}{4} \cdot \frac{1}{1 - x/4} \\
&=
\frac{1}{4} \cdot \sum_{n=0}^{\infty}
\left( \frac{x}{4} \right)^n \qquad\text{converges for all $|x| < 4$}.
\end{align*}
%
Notice that the second series converges for $|x| < 4$, whereas the
first converges only for $|x-3| < 1$, which is not nearly as good.


\subsection{Convergence of power series}
\index{power series!convergence of}

\begin{theorem}
Given a power series $\sum_{n=0}^{\infty} c_n(x-a)^n$, there are
exactly three possibilities:
%
\begin{enumerate}
\item The series converges only when $x = a$.

\item The series converges for all $x$.

\item There is an $R > 0$ (called the \emph{radius of convergence})
  such that $\sum_{n=0}^{\infty} c_n(x-a)^n$ converges for
  $|x-a| < R$ and diverges for $|x-a| > R$.
\end{enumerate}
\end{theorem}

\begin{example}
For the power series $\sum_{n=0}^{\infty} x^n$, the radius of
convergence is $1$.
\end{example}

\begin{definition}
\textbf{Radius of convergence.}
\index{radius of convergence}
As mentioned in the theorem, $R$ is called the
\emph{radius of convergence}.
\end{definition}

If the series converges only at $x = a$, we say $R = 0$. If the series
converges everywhere, we say that $R = \infty$. The
\emph{interval of convergence}\index{interval of convergence} is the
set of $x$ for which the series converges. It will be one of the
following:
\[
(a-R,\, a+R),\qquad
[a-R,\, a+R),\qquad
(a-R,\, a+R],\qquad
[a-R,\, a+R].
\]
The point is that the statement of the theorem only asserts something
about convergence of the series on the open interval $(a-R,\, a+R)$.
What happens at the endpoints of the interval is not specified by the
theorem; you can only figure it out by looking explicitly at a given
series.

\begin{theorem}
If $\sum_{n=0}^{\infty} c_n (x-a)^n$ has radius of convergence $R > 0$,
then $f(x) = \sum_{n=0}^{\infty} c_n (x-a)^n$ is differentiable
on $(a-R,\, a+R)$ and
%
\begin{enumerate}
\item $\ds f'(x) = \sum_{n=1}^{\infty} n \cdot c_n (x-a)^{n-1}$

\item $\ds \int f(x) \, dx
  =
  C + \sum_{n=0}^{\infty} \frac{c_n}{n+1}(x-a)^{n+1}$.
\end{enumerate}
%
Both the derivative and integral have the same radius of convergence
as $f$.
\end{theorem}

\begin{example}
Find a power series representation for $f(x) = \tan^{-1}(x)$.
\end{example}

\begin{proof}[Solution]
Notice that
\[
f'(x)
=
\frac{1}{1+x^2}
=
\frac{1}{1- (-x^2)}
=
\sum_{n=0}^{\infty} (-1)^n x^{2n}
\]
which has radius of convergence $R = 1$, since the above series is
valid when $|-x^2| < 1$, i.e. $|x| < 1$. Now integrate to get
\[
f(x)
=
C + \sum_{n=0}^{\infty} (-1)^n \frac{x^{2n+1}}{2n+1}
\]
for some constant $C$. To find the constant, compute
$C = f(0) = \tan^{-1}(0) = 0$. Finally conclude that
\[
\sum_{n=0}^{\infty} (-1)^n \frac{x^{2n+1}}{2n+1}
\]
is a power series representation of $\tan^{-1}(x)$.
\end{proof}

\begin{example}
We will see later that the function $f(x) = e^{-x^2}$ has the power
series representation
\[
e^{-x^2}
=
1 - x^{2} + \frac{1}{2}x^{4} - \frac{1}{6}x^{6} + \cdots.
\]
Hence
\[
\int e^{-x^2} \, dx
=
C + x - \frac{1}{3}x^{3} + \frac{1}{10}x^{5} - \frac{1}{42}x^{7} + \cdots
\]
despite the fact that the antiderivative of $e^{-x^2}$ is not an
elementary function.% (see Example~\ref{ex:noant}).
\end{example}


%%-----------------------------------------------------------------------%%
%%--- Taylor series -----------------------------------------------------%%

\section{Taylor series}
\index{Taylor series}

\begin{example}
\label{ex:findpoly}
Suppose we have a cubic polynomial $p$ and we know that
$p(0) = 4$, $p'(0)=3$, $p''(0)=4$, and $p'''(0)=6$. Can we determine
$p$?
\end{example}

\begin{proof}[Solution]
We have
%
\begin{align*}
p(x) &= a + bx + cx^2 + dx^3 \\
p'(x) &= b + 2cx + 3dx^2 \\
p''(x) &= 2c + 6dx \\
p'''(x) &= 6d.
\end{align*}
%
From what we mentioned above, we have:
%
\begin{align*}
a &= p(0) = 4 \\
b &= p'(0) = 3 \\
c &= \frac{p''(0)}{2} = 2 \\
d &= \frac{p'''(0)}{6} = 1.
\end{align*}
%
Thus, the cubic polynomial that we seek is $p(x) = 4 + 3x + 2x^2 + x^3$.
\end{proof}

The idea of Example~\ref{ex:findpoly} can be used to compute power
series expansions of functions. For example, we will show below that
\[
   e^x = \sum_{n=0}^{\infty} \frac{x^n}{n!}.
\]
%% \begin{center}
%% \shadowbox{\Large Convergent series are determined by the values
%% of their derivatives.}
%% \end{center}
The key idea is that convergent series are determined by the values of
their derivatives.

Consider a general power series
\[
f(x)
=
\sum_{n=0}^{\infty} c_n (x-a)^n = c_0 + c_1 (x-a) + c_2 (x-a)^2 + \cdots.
\]
We have
%
\begin{align*}
c_0 &= f(a) \\
c_1 &= f'(a) \\
c_2 &= \frac{f''(a)}{2} \\
&\cdots \\
c_n &= \frac{f^{(n)}(a)}{n!}
\end{align*}
%
where for the last equality we use
\[
f^{(n)}(x)
=
n! c_n + (x-a)(\cdots + \cdots).
\]

\begin{remark}
The definition of $0!$ is $1$ (it is the empty product). The empty sum
is $0$ and the empty product is $1$.
\end{remark}

\begin{theorem}
\textbf{Taylor series.}
\index{Taylor series}
If $f(x)$ is a function that equals a power series centered about~$a$,
then that power series expansion is
%
\begin{align*}
f(x)
&=
\sum_{n=0}^{\infty} \frac{f^{(n)}(a)}{n!} (x-a)^n \\
&=
f(a) + f'(a)(x-a) + \frac{f''(a)}{2} (x-a)^2 + \cdots.
\end{align*}
\end{theorem}

\begin{remark}
WARNING: There are functions that have all derivatives defined, but do
not equal their Taylor expansion. For example, $f(x) = e^{-1/x^2}$ for
$x \neq 0$ and $f(0) = 0$. Its Taylor expansion is the $0$ series (which
converges everywhere), but it is not the $0$ function.
\end{remark}

\begin{definition}
\textbf{Maclaurin series.}
\index{Maclaurin series}
A \emph{Maclaurin series} is a Taylor series with $a = 0$.
\end{definition}

From hereon, we will not use the term ``Maclaurin series'' ever again
(it is common in textbooks).

\begin{example}
Find the Taylor series for $f(x) = e^x$ about $a = 0$.
\end{example}

\begin{proof}[Solution]
We have $f^{(n)}(x) = e^x$. Thus $f^{(n)}(0) = 1$ for all $n$. Hence
\[
e^x
=
\sum_{n=0}^{\infty} \frac{1}{n!} x^n
=
1 + x + \frac{x^2}{2} + \frac{x^3}{6} + \cdots.
\]
What is the radius of convergence? Use the ratio test:
%
\begin{align*}
\lim_{n \to \infty}
\left| \frac{ \frac{1}{(n+1)!} x^{n+1}} {\frac{1}{n!} x^n} \right|
&=
\lim_{n \to \infty} \frac{n!}{(n+1)!}|x| \\
&=
\lim_{n \to \infty} \frac{|x|}{n+1} \\
&=
0, \qquad\text{for any fixed $x$}.
\end{align*}
%
Thus the radius of convergence is $\infty$.
\end{proof}

\begin{example}
\label{ex:taysin}
Find the Taylor series of $f(x) = \sin(x)$ about
$x = \frac{\pi}{2}$.\footnote{
This expansion was first found in India by Madhava of
Sangamagrama~(1350--1425).\index{Madhava of Sangamagrama}}
\end{example}

\begin{proof}[Solution]
We have
\[
f(x)
=
\sum_{n=0}^{\infty} \frac{f^{(n)} \left( \frac{\pi}{2} \right)}{n!}
\left( x - \frac{\pi}{2} \right)^n.
\]
To do this, we have to \emph{puzzle out a pattern}:
%
\begin{align*}
f(x) &= \sin(x) \\
f'(x) &= \cos(x) \\
f''(x) &= -\sin(x) \\
f'''(x) &= -\cos(x) \\
f^{(4)}(x) &= \sin(x)
\end{align*}
%
First, notice how the signs behave. For $n = 2m$ even,
\[
f^{(n)}(x)
=
f^{(2m)}(x)
=
(-1)^{n/2} \sin(x)
\]
and for $n = 2m + 1$ odd,
\[
f^{(n)}(x)
=
f^{(2m+1)}(x)
=
(-1)^{m} \cos(x)
=
(-1)^{(n-1)/2} \cos(x).
\]
For $n=2m$ even we have
\[
f^{(n)}(\pi/2)
=
f^{(2m)}\left(\frac{\pi}{2}\right) = (-1)^m
\]
and for $n=2m+1$ odd we have
\[
f^{(n)}(\pi/2)
=
f^{(2m+1)} \left( \frac{\pi}{2} \right)
=
(-1)^m\cos(\pi/2) = 0.
\]
Finally,
%
\begin{align*}
\sin(x)
&=
\sum_{n=0}^{\infty} \frac{f^{(n)}(\pi/2)}{n!}(x-\pi/2)^n \\
&=
\sum_{m=0}^{\infty} \frac{(-1)^{m}}{(2m)!}
\left( x - \frac{\pi}{2} \right)^{2m}.
\end{align*}
%
Next we use the ratio test to compute the radius of convergence. We
have
%
\begin{align*}
\lim_{m \to \infty}
\frac{\ds \left|
  \frac{(-1)^{m+1}}{(2(m+1))!}
  \left(x - \frac{\pi}{2}\right)^{2(m+1)} \right|}
{\ds \left|
  \frac{(-1)^{m}}{(2m)!}
  \left( x - \frac{\pi}{2} \right)^{2m} \right|}
&=
\lim_{m \to \infty} \frac{(2m)!}{(2m+2)!}
\left( x - \frac{\pi}{2} \right)^2 \\
&=
\lim_{m \to \infty} \frac{\left( x - \frac{\pi}{2} \right)^2}{(2m+2)(2m+1)}
\end{align*}
%
which converges for each $x$. Hence $R = \infty$.
\end{proof}

\begin{example}
Find the Taylor series for $\cos(x)$ about $a = 0$.
\end{example}

\begin{proof}[Solution]
We have $\cos(x) = \sin(x + \pi/2)$. Thus from Example~\ref{ex:taysin}
(with infinite radius of convergence) and that the Taylor expansion is
unique, we have
%
\begin{align*}
\cos(x)
&=
\sin \left( x + \frac{\pi}{2} \right) \\
&=
\sum_{n=0}^{\infty} \frac{(-1)^{n}}{(2n)!}
\left( x + \frac{\pi}{2} - \frac{\pi}{2} \right)^{2n} \\
&=
\sum_{n=0}^{\infty} \frac{(-1)^{n}}{(2n)!} x^{2n}
\end{align*}
as required.
\end{proof}


%%-----------------------------------------------------------------------%%
%%--- Applications of Taylor series -------------------------------------%%

\section{Applications of Taylor series}

This section is about an example in the theory of
relativity.\index{relativity} Let $m$ be the (relativistic) mass of an
object and $m_0$ the mass at rest~(rest mass) of the object. Let $v$
be the velocity of the object relative to the observer and let $c$ be
the speed of light. These three quantities are related as follows:
\[
m
=
\frac{m_0}{\sqrt{1 - v^2/c^2}} \qquad\text{(relativistic) mass}.
\]
The total energy of the object is $mc^2$, hence the famous equation
%
%% \begin{center}
%% \shadowbox{\LARGE $
\[
E = mc^2.
\]
%% $}
%% \end{center}
In relativity, we define the kinetic energy\index{kinetic energy} to be
%
\begin{equation}
\label{eqn:kinetic}
K
=
mc^2 - m_0 c^2.
\end{equation}
%
\emph{What?} Isn't the kinetic energy $\frac{1}{2} m_0 v^2$?

Notice that
\[
mc^2 - m_0 c^2
=
\frac{m_0 c^2}{\sqrt{1 - v^2/c^2}} - m_0 c^2
=
m_0 c^2 \left[ \left(1 - \frac{v^2}{c^2}\right)^{-1/2} - 1 \right].
\]
Let
\[
f(x)
=
(1 - x)^{-1/2} - 1.
\]
Let's compute the Taylor series of $f$. We have
%
\begin{align*}
f(x) &= (1 - x)^{-1/2} - 1 \\
f'(x) &= \frac{1}{2}(1 - x)^{-3/2} \\
f''(x) &= \frac{1}{2} \cdot \frac{3}{2} (1 - x)^{-5/2} \\
f^{(n)}(x) &= \frac{1 \cdot 3 \cdot 5 \cdots (2n-1)}{2^n}
(1-x)^{-(2n + 1) / 2}.
\end{align*}
%
Thus
\[
f^{(n)}(0)
=
\frac{1 \cdot 3 \cdot 5 \cdots (2n - 1)}{2^n}.
\]
Hence
%
\begin{align*}
f(x)
&=
\sum_{n=1}^{\infty} \frac{f^{(n)}(0)}{n!} x^n \\
&=
\sum_{n=1}^{\infty} \frac{1 \cdot 3 \cdot 5 \cdots (2n-1)}{2^n \cdot n!} x^n \\
&=
\frac{1}{2}x + \frac{3}{8}x^2 + \frac{5}{16}x^3 + \frac{35}{128} x^4 + \cdots.
\end{align*}
%
We now use this to analyze the kinetic energy~(\ref{eqn:kinetic}):
%
\begin{align*}
mc^2 - m_0 c^2
&=
m_0 c^2 \cdot f \left( \frac{v^2}{c^2} \right) \\
&=
m_0 c^2 \cdot \left( \frac{1}{2} \cdot \frac{v^2}{c^2} +
\frac{3}{8} \cdot \frac{v^2}{c^2} + \cdots \right) \\
&=
\frac{1}{2} m_0 v^2 + m_0 c^2 \cdot
\left( \frac{3}{8} \frac{v^2}{c^2} + \cdots \right).
\end{align*}
%
We can ignore the higher order terms if $v^2 / c^2$ is small. But how
small is ``small'' enough, given that $v^2 / c^2$ appears in an
infinite sum?


\subsection{Estimation of Taylor series}
\index{Taylor series!estimation of}

Suppose
\[
f(x)
=
\sum_{n=0}^{\infty} \frac{f^{(n)}(a)}{n!}(x-a)^n
\]
and write
\[
R_N(x)
=
f(x) - \sum_{n=0}^{N} \frac{f^{(n)}(a)}{n!} (x-a)^n.
\]
We call
\[
T_N(x)
=
\sum_{n=0}^{N} \frac{f^{(n)}(a)}{n!} (x-a)^n
\]
the $N$-th degree \emph{Taylor polynomial}.\index{Taylor polynomial}
Notice that
\[
\lim_{N \to \infty} T_N(x)
=
f(x)
\]
if and only if
\[
\lim_{N \to \infty} R_N(x)
=
0.
\]
We would like to estimate $f(x)$ with $T_N(x)$. We need an estimate
for $R_N(x)$.

\begin{theorem}
\label{thm:taylor}
\textbf{Taylor's theorem.}
\index{Taylor's theorem}
If $\left| f^{(N+1)}(x) \right| \leq M$ for $|x - a| \leq d$, then
\[
|R_N(x)|
\leq
\frac{M}{(N+1)!} |x - a|^{N + 1} \qquad\text{for $|x - a| \leq d$}.
\]
\end{theorem}

For example, if $N = 0$, this says that
\[
|R(x)|
=
|f(x) - f(a)| \leq M |x - a|
\]
that is,
\[
\left| \frac{f(x) - f(a)}{x - a} \right|
\leq
M
\]
which should look familiar (the mean value theorem).
\index{mean value theorem}


\subsubsection*{Applications}

\begin{enumerate}
\item One can use Theorem~\ref{thm:taylor} to prove that functions
  converge to their Taylor series.

\item Returning to the relativity example above, we apply Taylor's
  theorem with $N = 1$ and $a = 0$. With $x = -v^2/c^2$ and $M$ any
  number such that $|f''(x)| \leq M$, we have
  \[
  |R_1(x)| \leq \frac{M}{2}x^2.
  \]
  For example, if we assume that $|v| \leq 100m/s$ we use
  \[
  |f''(x)|
  \leq
  \frac{3}{2}(1-100^2 / c^2)^{-5/2}
  =
  M.
  \]
  Using $c = 3 \times 10^8 m/s$, we get
  \[
  |R_1(x)|
  \leq
  4.17 \cdot 10^{-10} \cdot m_0.
  \]
  Thus for $v \leq 100m/s \approx 225~\text{mph}$, then the error in
  throwing away relativistic factors is $10^{-10} m_0$.  This is like
  200 feet out of the distance to the sun (93 million miles). So
  relativistic and Newtonian kinetic energies are almost the same for
  reasonable speeds.
\end{enumerate}
