%%%%(c) COPYRIGHT NOTICE%FOLDUP
%%%%(c)
%%%%(c)  This file is a portion of the source for the textbook
%%%%(c)
%%%%(c)    Numerical Methods
%%%%(c)    Copyright (C) 2004--2010 Steven E. Pav
%%%%(c)
%%%%(c)  See the file COPYING for copying conditions
%%%%(c)
%%%%(c)%UNFOLD

%%throat clearing%FOLDUP
\typeout{-- oldexams.tex}
\typeout{-- N© 2004-2010 Steven E. Pav}
%UNFOLD

%%local commands%FOLDUP
\newcounter{totalpoints}
\newcounter{pointsatend}
\newcounter{totalprobs}
\newcounter{probsatend}

\newcommand{\addpoints}[1]{\addtocounter{totalpoints}{#1}}
\providecommand{\pntorpnts}[1]{\ifcase#1\or #1 pnt\else #1 pnts\fi\relax}
\newcommand{\points}[1]{\addpoints{#1}({\bf \pntorpnts{#1}})}
\newcommand{\shpn}[1]{\stepcounter{enumi}P\arabic{enumi} \points{#1}}

\newenvironment{problems}{\begin{compactenum}}{%
\end{compactenum}
\setcounter{totalprobs}{\value{enumi}}
\addtocounter{totalprobs}{-1}\refstepcounter{totalprobs}%
\addpoints{-1}\refstepcounter{totalpoints}
}
%\addtocounter{totalprobs}{-1}\refstepcounter{totalprobs}\label{totalNumProbs}
%\addpoints{-1}\refstepcounter{totalpoints}\label{pointTotal}
\newcommand{\prblm}[1]{\item[\shpn{#1}]}
%UNFOLD

\chapter{Old Exams}

\section{First Midterm, Fall 2003}%FOLDUP

\begin{problems}
\prblm{7}%
Clearly state Taylor's Theorem for $f(x+h).$  Include all hypotheses, and state
the conditions on the variable that appears in the error term.
%
\prblm{7}%
Write out the Lagrange Polynomial $\ell_0(x)$ for nodes $x_0,x_1,\ldots,x_n.$
That is, write the polynomial that has value $1$ at $x_0$ and has value $0$ at
$x_1,x_2,\ldots,x_n.$
%
\prblm{7}%
Write the Lagrange form of the polynomial of lowest degree that interpolates
the following values:
$$\begin{array}{c||c|c}
x_k & -1 & 1 \\
\hline
y_k & 11 & 17 
\end{array}
$$
%
\prblm{14}%
Use the method of Bisection for finding the zero of the function $f(x) = x^3 -
3x^2 + 5x - \frac{5}{2}$. Let $a_0 = 0,$ and $b_0 = 2.$  What are $a_2,b_2$?
%
\prblm{21}%
\begin{compactitem}
\item Write out the iteration step of Newton's Method for finding a zero of the
function $f(x)$.  Your iteration should look like
$$x_{k+1} = \,??$$
\item Write the Newton's Method iteration for finding $\sqrt{5}.$
\item Letting $x_0 = 1,$ find the $x_1$ and $x_2$ for your iteration.
Note that $\sqrt{5} \approx 2.236$.
\end{compactitem}
%
\prblm{7}%
Write out the iteration step of the Secant Method for finding a zero of the
function $f(x)$.  Your iteration should look like
$$x_{k+1} = \,??$$
%
\prblm{21}%
Consider the following approximation:
$$f'(x) \approx \frac{f(x+h) - 5 f(x) + 4 f(x+\half[h])}{3h}$$
\begin{compactitem}
\item Derive this approximation using Taylor's Theorem.  
\item Assuming that $f(x)$ has bounded derivatives, give the accuracy of the
above approximation.  Your answer should be something like $\bigo{h^{?}}$.
\item Let $f(x) = x^2.$  Approximate $f'(0)$ with this approximation, using
$h=\oneby{3}.$
\end{compactitem}
%
\prblm{21}%
Consider the data:
$$\begin{array}{c||c|c|c}
k & 0 & 1 & 2 \\
\hline
\hline
x_k & 3 & 1 & -1 \\
\hline
f(x_k) & 5 & 15 & 9
\end{array}
$$
\begin{compactitem}
\item Construct the divided differences table for the data.
\item Construct the Newton form of the polynomial $p(x)$ of lowest degree that interpolates $f(x)$ at these nodes.
\item Suppose that these data were generated by the function $f(x) = x^3 -5x^2 +2x + 17.$
Find a numerical upper bound on the error $\abs{p(x) - f(x)}$ over the interval
\ccinv{-1}{3}.  Your answer should be a number. 
\end{compactitem}
%\end{enumerate}
\end{problems}
%Use three terms of a Taylor's expansion to approximate
%$$\cos\Parens{\pi/3 + 0.001}.$$
%You may use the fact that $\frac{\sqrt{3}}{2} = 0.866025403\ldots$
%How good of an approximation is your answer (how many digits do you think are
%correct)?
%

%%UNFOLD
\section{Second Midterm, Fall 2003}%FOLDUP

\begin{problems}
\prblm{6}%
Write the Trapezoid rule approximation for $\int_a^b f(x)\dx,$ using the nodes
\setBIdx{x_i}{i=0}{n}.
%
\prblm{6}%
Suppose Gaussian Elimination with scaled partial pivoting is applied to the
matrix
$$\Mtx{A} = \Bracks{\begin{array}{rrrr}
0.0001 & -0.1 & -0.01 & 0.001\\
43 & 91 & -43 & 1\\
-12 & 26 & -1 & 0\\
-7 & \half & -10 & \pi
\end{array}}.$$
Which row contains the first pivot?  You {\bf must} show your work.
%
\prblm{14}%
Let $\phi(n)$ be some quadrature rule that divides an interval into $2^n$ equal
subintervals.  Suppose
$$\int_a^b f(x) \dx = \phi(n) + a_5 h_n^5 + a_{10} h_n^{10} + a_{15} h_n^{15} + \ldots,$$
where $h_n = \frac{b-a}{2^n}.$  Using evaluations of $\phi(\cdot)$, devise a
``Romberg-style'' quadrature rule that is a \bigo{h_n^{10}} approximation to the
integral.
%
\prblm{14}%
Compute the LU factorization of the matrix
$$\Mtx{A} = \Bracks{\begin{array}{rrr}
1 & 0 & -1\\
0 & 2 & 0\\
2 & 2 & -1\\
\end{array}}$$
using \naive Gaussian Elimination.  Show all your work.  Your final answer
should be two matrices, \Mtx{L} and \Mtx{U}.  Verify your answer, \ie
verify that $\Mtx{A} = \Mtx{L}\Mtx{U}.$
%
\prblm{14}%
Run one iteration of either Jacobi or Gauss Seidel on the system:
$$\Bracks{\begin{array}{rrr}
1 & 3 & 4\\
-2 & 2 & 1\\
-1 & 3 & -2\\
\end{array}} \vect{x} = 
\Bracks{\begin{array}{r} -3\\-3\\1\end{array}}
$$
Start with $\vect{x^{(0)}} = \trans{\Bracks{1 \, 1\, 1}}.$
Clearly indicate your answer $\vect{x^{(1)}}.$  Show your work.
You {\bf must} indicate \emph{which} of the two methods you are using.
%
\prblm{21}%
Derive the two-node \emph{Chebyshev Quadrature} rule:
$$\int_{-1}^1 f(x) \dx \approx c \Bracks{f(x_0) + f(x_1)}.$$
Make this rule exact for all polynomials of degree $\le 2$.
%
\prblm{28}%
Consider the quadrature rule:
$$\int_0^1 f(x) \dx \approx z_0 f(0) + z_1 f(1) + z_2 f'(0) + z_3 f'(1).$$
%We want this rule to be exact for polynomials of the highest degree possible.
\begin{compactitem}
\item Write four linear equations involving $z_i$ to make this rule exact for
polynomials of the highest possible degree.
\item Solve your system for \vect{z} using \naive Gaussian Elimination.
\end{compactitem}
%
\end{problems}
%UNFOLD
\section{Final Exam, Fall 2003}%FOLDUP

\providecommand{\fclas}{\ensuremath{\mathcal{F}}\xspace}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\begin{problems}
\prblm{4}%
Clearly state Taylor's Theorem for $f(x+h).$  Include all hypotheses, and state
the conditions on the variable that appears in the error term.
%
%\item[\shpn{4}]%
%Write out the form of the error $e_n(x) = f(x) - p_n(x),$ where $p_n(x)$ is the
%polynomial of degree $\le n$ interpolating $f(x)$ on nodes
%$x_0,x_1,\ldots,x_n.$
%
\prblm{4}%
State the conditions that characterize $S(x)$ as a natural cubic spline on the
interval \ccinv{a}{b}.  Let the knots be $a = t_0 < t_1 < t_2 < \ldots < t_n =
b.$
%
\prblm{4}%
Let $x_0 = 3, x_1 = -5, x_2 = 0.$  Write out the Lagrange Polynomial
$\ell_0(x)$ for these nodes; that is, write the polynomial that has value $1$
at $x_0$ and has value $0$ at $x_1,x_2.$
%
\prblm{4}%
Consider the ODE:
$$\left\{\begin{array}{l}x'(t) = f(t,x(t))\\x(a) = c\end{array}\right.$$
Write out the Euler Method to step from $x(t)$ to $x(t+h).$
%
\prblm{7}%
Compute the LU factorization of the matrix
$$\Mtx{A} = \Bracks{\begin{array}{rrr}
-1 & 1 & 0\\
1 & 1 & -1\\
-2 & 0 & 2\\
\end{array}}$$
using \naive Gaussian Elimination.  Show all your work.  Your final answer
should be two matrices, \Mtx{L} and \Mtx{U}.  Verify your answer, \ie
verify that $\Mtx{A} = \Mtx{L}\Mtx{U}.$
%
\prblm{9}%
Let $\alpha$ be given.  Determine a quadrature rule of the form
$$\int_{-\alpha}^{\alpha} f(x) \dx \approx A f(0) + B f''(-\alpha) + C f''(\alpha)$$
that is exact for polynomials of highest possible degree.
What is the highest degree polynomial for which this rule is exact?
%
\prblm{9}%
Suppose that $\phi(n)$ is some computational function that approximates a
desired quantity, $L.$  Furthermore suppose that
$$\phi(n) = L + a_1 n^{-\frac{1}{2}}
+ a_2 n^{-\frac{2}{2}}
+ a_3 n^{-\frac{3}{2}}
+ \ldots
$$
Combine two evaluations of $\phi(\cdot)$ in the manner of Richardson to get a
$\bigo{n^{-1}}$ approximation to $L.$
%
\prblm{9}%
Find the least-squares best approximate of the form $y = \ln \Parens{cx}$ to
the data
$$\begin{array}{c|c|c|c}
x_0 & x_1 & \ldots & x_n\\ 
\hline
y_0 & y_1 & \ldots & y_n
\end{array}
$$
\emph{Caution:} this should \emph{not} be done with basis vectors.
%
\prblm{9}%
Let 
$$\fclas = \setwo{f(x) = c_0x + c_1 \log x + c_2 \cos x}{c_0,c_1,c_2 \in \reals{}}.$$
Set up (\emph{but do not attempt to solve}) the normal equations to find the
least-squares best $f \in \fclas$ to approximate the data:
$$\begin{array}{c|c|c|c}
x_0 & x_1 & \ldots & x_n\\ 
\hline
y_0 & y_1 & \ldots & y_n
\end{array}
$$
The normal equations should involve the vector $\vect{c} =
\smooshvec{c_0\,c_1\,c_2}$, which uniquely
determines a function in \fclas.
%
\prblm{9}%
Consider the following approximation:
$$f''(x) \approx \frac{3f(x-h) - 4 f(x) + f(x+3h)}{6 h^2}$$
\begin{compactitem}
\item Derive this approximation using Taylor's Theorem.  
\item Assuming that $f(x)$ has bounded derivatives, give the accuracy of the
above approximation.  Your answer should be something like $\bigo{h^{?}}$.
%\item Let $f(x) = x^4.$  Approximate $f''(0)$ with this approximation, using
%$h=\oneby{2}.$
\end{compactitem}
%
\prblm{11}%
Consider the ODE:
$$\left\{\begin{array}{l}x'(t) = x(t) g(t)\\x(0) = 1\end{array}\right.$$
\begin{compactitem}
\item Use the Fundamental Theorem of Calculus to write a step update of the
form:
$$x(t+h) = x(t) + \int_t^{t+h} ??$$
\item Approximate the integral using the Trapezoid Rule to get an
\emph{explicit} step update of the form
$$x(t+h) \leftarrow Q(t,x(t),h)$$
\item Suppose that $g(t) \ge m > 0$ for all $t.$  Do you see any problem with
this stepping method if $h > 2/m$?\\
\emph{Hint:} the actual solution to the ODE is
$$x(t) = \exp{\int_0^t g(r) \dx[r]}.$$
\end{compactitem}
%
\prblm{9}%
Consider the ODE:
$$\left\{\begin{array}{l}x'(t) = \exp{x(t)} + \sin\Parens{x(t)}\\x(0) = 0\end{array}\right.$$
Write out the Taylor's series method of order three to approximate $x(t+h)$
given $x(t)$.  It is permissible to write the update as a small program like:
\begin{eqnarray*}
x'(t) &\leftarrow& Q_0(x(t))\\
x''(t) &\leftarrow& Q_1(x(t),x'(t))\\
&\vdots&\\
x(t+h) &\leftarrow& R(x(t),x'(t),x''(t),\ldots)
\end{eqnarray*}
rather than write the update as a monstrous equation of the form
$$x(t+h) \leftarrow M(x(t)).$$
%
\prblm{15}%
Consider the data:
$$\begin{array}{c||c|c|c}
k & 0 & 1 & 2 \\
\hline
\hline
x_k & -0.5 & 0 & 0.5 \\
\hline
f(x_k) & 5 & 15 & 9
\end{array}
$$
\begin{compactitem}
\item Construct the divided differences table for the data.
\item Construct the Newton form of the polynomial $p(x)$ of lowest degree that interpolates $f(x)$ at these nodes.
\item Suppose that these data were generated by the function $f(x) = 40 x^3 -
32 x^2 - 6 x + 15.$
Find a numerical upper bound on the error $\abs{p(x) - f(x)}$ over the interval
\ccinv{-0.5}{0.5}.  Your answer should be a number. 
\end{compactitem}
%
\prblm{15}%
\begin{compactitem}
\item Write out the iteration step of Newton's Method for finding a zero of the
function $f(x)$.  Your iteration should look like
$$x_{k+1} = \,??$$
\item Write the Newton's Method iteration for finding $\sqrt[3]{25/4}.$
\item Letting $x_0 = \frac{5}{2},$ find the $x_1$ and $x_2$ for your iteration.
Note that $\sqrt[3]{25/4} \approx 1.842$.
\end{compactitem}
%
%\prblm{7}%
%Write out the iteration step of the Secant Method for finding a zero of the
%function $f(x)$.  Your iteration should look like
%$$x_{k+1} = \,??$$
%
\prblm{15}%
\begin{compactitem}
\item Let $x$ be some given number, and let $f(\cdot)$ be a given, ``black
box'' function.  Using Taylor's Theorem, derive a \bigo{h} approximation to the
derivative $f'(x).$  Call your approximation $\phi(h)$; the approximation
should be defined in terms of $f(\cdot)$ evaluated at some values, and should
probably involve $x$ and $h.$
\item Let $f(x) = x^2 + 1.$  Approximate $f'(0)$ using your $\phi(h),$ for
$h=\half.$
\item For the same $f(x)$ and $h,$ get a \bigo{h^2} approximation to $f'(0)$
using the method of Richardson's Extrapolation;  your answer should probably
include a table like this:
$$\begin{array}{cc}
D(0,0) & \\
D(1,0) & D(1,1) \\
\end{array}$$
\end{compactitem}
%
\end{problems}

%UNFOLD
\section{First Midterm, Fall 2004}%FOLDUP
\providecommand{\threebythree}[9]{\Bracks{\begin{array}{rrr}{#1}&{#2}&{#3}\\{#4}&{#5}&{#6}\\{#7}&{#8}&{#9}\end{array}}}
\providecommand{\threebyone}[3]{\Bracks{\begin{array}{r}{#1}\\{#2}\\{#3}\end{array}}}
\providecommand{\avecUL}[3]{\ensuremath{\neUL{\vect{#1}}{#2}{#3}}\xspace}
\providecommand{\thex}[1]{\avecUL{x}{\wrapNeParens{#1}}{}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

{\bf [Definitions] Answer \emph{no more than two} of the following. }
\begin{problems}
\prblm{12}%%FOLDUP
Clearly state Taylor's Theorem for $f(x+h).$  Include all hypotheses, and state
the conditions on the variable that appears in the error term.
%%UNFOLD
\prblm{12}%%FOLDUP
Write the general form of the iterated solution to the problem
\(\Mtx{A}\vect{x} = \vect{b}.\)
Let \Mtx{Q} be your ``splitting matrix,'' and use the factor $\omega.$
Your solution should look something like:
\[??\thex{k} =  ?? \thex{k-1} + ??\qquad\text{or}\qquad
 \thex{k} =  ?? \thex{k-1} + ?? \]

For one of the following variants, describe what \Mtx{Q} is, in relation to
\Mtx{A} : Richardson's, Jacobi, Gauss-Seidel.

%%UNFOLD
\prblm{12}%%FOLDUP
Write the iteration for Newton's Method \emph{or} the Secant Method for
finding a root to the equation $f(x) = 0.$ Your solution should look something
like:
\[x_{k+1} =  ?? + \frac{??}{??}\]
%%UNFOLD
\end{problems}
{\bf [Problems] Answer \emph{no more than four} of the following. }
\begin{problems}
\prblm{14}%%FOLDUP
Perform bisection on the function graphed below. Let the first interval be
\ccinv{0}{256}.  Give the second, third, fourth, and fifth intervals.
\begin{figure}[htbp!]
\begin{center}
\begin{picture}(280,140)(0,-80)
\graphpoop[16]{2}(0,-64)(256,128)
\thicklines
\qbezier(0,-48)(108,-64)(256,64)
%\put(0,0){\line(1,0){256}}
\end{picture}
\end{center}
\end{figure}
%%UNFOLD
\prblm{14}%%FOLDUP
Give a \bigo{h^4} approximation to $\sin \Parens{h + \pi/2}.$  Find a
reasonable $C$ such that the truncation error is no greater than $C h^4.$
%%UNFOLD
\prblm{14}%%FOLDUP
Use Newton's Method to find a good approximation to $\sqrt[3]{24}.$  Use $x_0 =
3.$  That is, iteratively define a sequence with $x_0 = 3,$ such that $x_k \to
\sqrt[3]{24}.$
%%UNFOLD
\prblm{14}%%FOLDUP
One of the roots of $x^2 - bx + c = 0$ as found by the quadratic formula is
subject to subtractive cancellation when $\abs{b} \gg \abs{c}.$  Which root is
it? Rewrite the expression for that root to eliminate subtractive cancellation.
%%UNFOLD
\prblm{14}%%FOLDUP
Find the LU decomposition of the following matrix by Gaussian Elimination with
\naive pivoting.
\[\threebythree{3}{2}{4}{-6}{1}{-6}{12}{-12}{10}\]
%%UNFOLD
\prblm{14}%%FOLDUP
Consider the linear equation:
\[\threebythree{6}{1}{1}{2}{4}{0}{1}{2}{6} \vect{x} = \threebyone{5}{-6}{3}\]
Letting $\thex{0} = \trans{\Bracks{1\,\,1\,\,1}}$, find \thex{1} using either
the Gauss-Seidel or Jacobi iterative schemes.  Use weighting $\omega=1.$  You
must indicate which scheme you are using.  Show all your work.
%%UNFOLD
\end{problems}
{\bf [Questions] Answer \emph{only one} of the following. }
\begin{problems}
\prblm{20}%%FOLDUP
Suppose that the eigenvalues of \Mtx{A} are in \ccinv{\alpha}{\beta} with $0 <
\alpha < \beta.$  Find the $\omega$ that minimizes \norm{\Mtx{I} - \omega \Mtx{A}^2}.
What is the minimal value of \norm{\Mtx{I} - \omega \Mtx{A}^2} for this optimal
$\omega$?  For full credit you need to make some argument \emph{why} this
$\omega$ minimizes this norm.
%%UNFOLD
\prblm{20}%%FOLDUP
Let $f(x) = 0$ have a unique root $r.$
Recall that the form of the error for Newton's Method is
\[e_{k+1} = \frac{-f''(\xi_k)}{2f'(x_k)} e_k^2,\]
where $e_k = r - x_k.$  Suppose that $f(x)$ has the properties that
$f'(x) \ge m > 0$ for all $x,$ and $\abs{f''(x)} \le M$ for all $x.$
Find $\delta > 0$ such that $\abs{e_k} < \delta$ insures that $x_n \to r.$
Justify your answer.
%%UNFOLD
\prblm{20}%%FOLDUP
Suppose that $f(r) = 0 = f'(r) \ne f''(r),$ and $f''(x)$ is continuous.
Letting $e_k = x_k - r,$ show that for Newton's Method,
\[e_{k+1} \approx \half e_k,\]
when $\abs{e_k}$ is sufficiently small.  (\emph{Hint:} Use Taylor's Theorem
twice.)  Is this faster or slower convergence than for Newton's Method with a
simple root?
%%UNFOLD
\end{problems}
%UNFOLD
\section{Second Midterm, Fall 2004}%FOLDUP
{\bf [Definitions] Answer \emph{no more than two} of the following three. }
\begin{problems}
\prblm{10}%%FOLDUP
For a set of distinct nodes $x_0,x_1,\ldots, x_{13},$ what is the Lagrange
Polynomial $\ell_5(x)$?  You may write your answer as a product.  What degree
is this polynomial?
%%UNFOLD
\prblm{10}%%FOLDUP
Complete this statement of the polynomial interpolation error theorem:
{\em
Let $p$ be the polynomial of degree at most $n$ interpolating function $f$ at
the $n+1$ distinct nodes $x_0,x_1,\ldots,x_n$ on \ccinv{a}{b}.  Let $f^{(n+1)}$
be continuous.  Then for each $x \in \ccinv{a}{b}$ there is some
$\xi\in\ccinv{a}{b}$ such that
\[
f(x) - p(x) = \frac{?}{?} \prod_{i=0}^n \Parens{?}.
\]
}
%%UNFOLD
\prblm{10}%%FOLDUP
State the conditions which define the function $S(x)$ as a natural cubic spline
on the interval \ccinv{a}{b}.
%%UNFOLD
\end{problems}
{\bf [Problems] Answer \emph{no more than five} of the following six. }
\begin{problems}
\prblm{16}%%FOLDUP
How large must $n$ be to interpolate the function $\exp{x}$ to within $0.001$
on the interval \ccinv{0}{2} with the polynomial interpolant at $n$ equally
spaced nodes?
%%UNFOLD
\prblm{16}%%FOLDUP
Let $\phi(h)$ be some computable approximation to the quantity $L$ such that
\[\phi(h) = L + a_5 h^5 + a_{10} h^{10} + a_{15} h^{15} + \ldots\]
Combine evaluations of $\phi(\cdot)$ to devise a \bigo{h^{10}} approximation to
$L$.
%%UNFOLD
\prblm{16}%%FOLDUP
Derive the constants $A$ and $B$ which make the quadrature rule
\[\int_{-1}^{1} f(x) \dx \approx A f\Parens{-\frac{\sqrt{15}}{5}} + B f(0)
+ A f\Parens{\frac{\sqrt{15}}{5}}\]
exact for polynomials of degree $\le 2.$
Use this quadrature rule to approximate
\[\pi = \int_{-1}^{1} \frac{2}{1+x^2}\dx\]
%%UNFOLD
\prblm{16}%%FOLDUP
Find the polynomial of degree $\le 3$ interpolating the data
	\[
	\begin{array}{c|*{4}{|c}}
	x & -1 & 0 & 1 & 2\\
	\hline
	y & -2 & 8 & 0 & 4\\
	\end{array}
	\]
(\emph{Hint:} using Lagrange Polynomials will probably take too long.)
%%UNFOLD
\prblm{16}%%FOLDUP
Find constants $\alpha, \beta$ such that the following is a
quadratic spline on \ccinv{0}{4}.
\[Q(x) = \begin{cases} 
 x^2 - 3x + 4 &: 0 \le x < 1\\
 \alpha \wrapparens{x-1}^2 + \beta \wrapparens{x-1} + 2 &: 1 \le x < 3\\
 x^2 + x - 4 &: 3 \le x \le 4\end{cases}\]
%%UNFOLD
\prblm{16}%%FOLDUP
Consider the following approximation:
\[f'(x) \approx \frac{f(x+h) - 5 f(x) + 4 f(x+\half[h])}{3h}\]
\begin{compactitem}
\item Derive this approximation via Taylor's Theorem.  
\item Assuming that $f(x)$ has bounded derivatives, give the accuracy of the
above approximation.  Your answer should be something like $\bigo{h^{?}}$.
\item Let $f(x) = x^2.$  Approximate $f'(0)$ with this approximation, using
$h=\oneby{3}.$
\end{compactitem}
%%UNFOLD
\end{problems}
%UNFOLD
\section{Final Exam, Fall 2004}%FOLDUP
{\bf [Definitions] Answer \emph{all} of the following.}
\begin{problems}
\prblm{10}%%FOLDUP
Clearly state Taylor's Theorem for $f(x+h).$  Include all hypotheses, and state
the conditions on the variable that appears in the error term.
%UNFOLD
\prblm{10}%%FOLDUP
Write the iteration for Newton's Method \emph{or} the Secant Method for
finding a root to the equation $f(x) = 0.$ Your solution should look something
like:
\[x_{k+1} =  ?? + \frac{??}{??}\]
%%UNFOLD
\prblm{10}%%FOLDUP
Write the general form of the iterated solution to the problem
\(\Mtx{A}\vect{x} = \vect{b}.\)
Let \Mtx{Q} be your ``splitting matrix,'' and use the factor $\omega.$
Your solution should look something like:
\[??\thex{k} =  ?? \thex{k-1} + ??\qquad\text{or}\qquad
 \thex{k} =  ?? \thex{k-1} + ?? \]

For one of the following variants, describe what \Mtx{Q} is, in relation to
\Mtx{A} : Richardson's, Jacobi, Gauss-Seidel.
%%UNFOLD
\prblm{10}%%FOLDUP
Define what it means for the function $f(x)\in\fclas$ to be the least squares 
best approximant to the data
\[
\begin{array}{*{3}{c|}c}
x_0 & x_1 & \ldots & x_n\\
\hline
y_0 & y_1 & \ldots & y_n
\end{array}
\]
%%UNFOLD
\end{problems}
{\bf [Problems] Answer \emph{all} of the following. }
\begin{problems}
\prblm{10}%%FOLDUP
Rewrite this system of higher order ODEs as a system of first order ODEs:
\[\begin{cases}  
x''(t) = x'(t)\Bracks{x(t) + ty(t)}\\
y''(t) = y'(t)\Bracks{y(t) - tx(t)}\\
x'(0) = 1\\
x(0) = 4\\
y'(0) = -3\\
y(0) = -2
\end{cases}\]
%%UNFOLD
\prblm{10}%%FOLDUP
Consider the ODE:
\[\begin{cases}  x'(t) = t\Parens{x + 1}^2 &\\
x(2) = 1/2\end{cases}\]
Use Euler's Method to find an approximate value of $x(2.1)$ using a single
step.
%%UNFOLD
\prblm{10}%%FOLDUP
Let $\phi(h)$ be some computable approximation to the quantity $L$ such that
\[\phi(h) = L + a_4 h^4 + a_{8} h^{8} + a_{12} h^{12} + \ldots\]
Combine evaluations of $\phi(\cdot)$ to devise a \bigo{h^{8}} approximation to
$L$.
%%UNFOLD
\prblm{15}%%FOLDUP
Consider the approximation
\[f'(x) \approx \frac{9f(x+h) - 8 f(x) - f(x-3h)}{12h}\]
\begin{compactenum}[(a)]
\item Derive this approximation using Taylor's Theorem.
\item Assuming that $f(x)$ has bounded derivatives, give the accuracy of the
above approximation.  Your answer should be something like $\bigo{h^{?}}$.
\end{compactenum}
%%UNFOLD
\prblm{15}%%FOLDUP
Find constants, $\alpha,\beta,\gamma$ such that the following is a
natural cubic spline on \ccinv{1}{3}.
\[Q(x) = \begin{cases} 
 \alpha \Parens{x-1}^3 + \beta \Parens{x-1}^2 + 12\Parens{x-1} + 1  &: 1 \le x < 2\\
 \gamma x^3 + 18x^2 -30x + 19 &: 2 \le x < 3
 \end{cases}\]
%alpha = 2, beta = 0, gamma = -2
%%UNFOLD
\prblm{15}%%FOLDUP
Devise a subroutine that, given an input number $z,$ calculates an approximate
to $\sqrt[3]{z}$ via Newton's Method.  Write your subroutine in pseudo code,
Matlab, or C/C++.  Include reasonable termination conditions so your subroutine
will terminate if it finds a good approximate solution.
Your subroutine should use only the basic arithmetic operations of addition,
subtraction, multiplication, division; in particular your subroutine should not
have access to a cubed root or logarithm operator.
%%UNFOLD
\prblm{15}%%FOLDUP
Consider the ODE:
\[\begin{cases}  x'(t) = \cos\Parens{x(t)} + \sin\Parens{x(t)} &\\
x(0) = 1\end{cases}\]
Use Taylor's theorem to devise a \bigo{h^3} approximant to $x(t+h)$ which is a
function of $x,\,t,$ and $h$ only.  
You may write your answer as a program:
\begin{align*}
x'(t) &\leftarrow Q_0(x(t))\\
x''(t) &\leftarrow Q_1(x(t),x'(t))\\
&\vdots\\
\tilde{x} &\leftarrow R(x(t),x'(t),x''(t),\ldots)
\end{align*}
such that
$x(t+h) = \tilde{x} + \bigo{h^3}.$
%%UNFOLD
\end{problems}
{\bf [Questions] Answer \emph{all} of the following. }
\begin{problems}
\prblm{20}%%FOLDUP
Consider the ``quadrature'' rule:
\[\int_0^1 f(x) \dx \approx A_0 f(0) + A_1 f(1) + A_2 f'(1) + A_3 f''(1)\]
\begin{compactenum}[(a)]
\item Write four linear equations involving $A_i$ to make this rule exact for
polynomials of the highest possible degree.
\item Find the constants $A_i$ using Gaussian Elimination with \naive pivoting.
\end{compactenum}
%%UNFOLD
\prblm{20}%%FOLDUP
Consider the data:
$$\begin{array}{*{3}{c|}c}
k & 0 & 1 & 2 \\
\hline
\hline
x_k & 0 & 0.25 & 0.5\\
\hline
f(x_k) & 1.5 & 1 & 0.5\\
\end{array}
$$
\begin{compactenum}[(a)]
\item Construct the divided differences table for the data.
\item Construct the Newton form of the polynomial $p(x)$ of lowest degree that interpolates $f(x)$ at these nodes.
\item What degree is your polynomial? Is this strange?
\item Suppose that these data were generated by the function 
\[f(x) = 1 + \half[\cos{2\pi x}].\]
%$f(x) = \cos^2\Parens{\pi x}$.

Find an upper bound on the error $\abs{p(x) - f(x)}$ over the interval
\ccinv{0}{0.5}.  Your answer should be a number. 
\end{compactenum}
%%UNFOLD
\prblm{20}%%FOLDUP
Let 
\[\fclas = \setwo{f(x) = c_0\sqrt{x} + c_1 \ell(x) + c_2 \exp x}{c_0,c_1,c_2 \in
\reals{}},\]
where $\ell(x)$ is some black box function.
We are concerned with finding the least-squares best $f\in\fclas$ to
approximate the data
\[\begin{array}{*{3}{c|}c}
x_0 & x_1 & \ldots & x_n\\ 
\hline
y_0 & y_1 & \ldots & y_n
\end{array}
\]
\begin{compactenum}[(a)]
\item Set up (\emph{but do not attempt to solve}) the normal equations to find the
the vector $\vect{c} = \smooshvec{c_0\,c_1\,c_2}$, which determines the least
squares approximant in \fclas.
\item Now suppose that the function $\ell(x)$ happens to be the Lagrange
Polynomial associated with $x_7,$ among the values $x_0,x_1,\ldots,x_n.$  That
is, $\ell(x_j) = \delta_{j7}.$
Prove that $f(x_7) = y_7,$ where $f$ is the least squares best
approximant to the data.
\end{compactenum}
%%UNFOLD
\prblm{10}%%FOLDUP
State some substantive question which you thought might appear on this exam,
but did not.  Answer this question (correctly).
%%UNFOLD
\end{problems}
%UNFOLD
%for vim modeline: (do not edit)
% vim:ts=2:sw=2:tw=79:fdm=marker:fmr=FOLDUP,UNFOLD:cms=%%s:tags=tags;:syntax=tex:filetype=tex:ai:si:cin:nu:fo=croqt:cino=p0t0c5(0:
