\documentclass[twoside,a4paper]{article}
\usepackage{geometry}
\geometry{margin=1.5cm, vmargin={0pt,1cm}}
\setlength{\topmargin}{-1cm}
\setlength{\paperheight}{29.7cm}
\setlength{\textheight}{25.3cm}

% useful packages.
\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{enumerate}
\usepackage{graphicx}
\usepackage{multicol}
\usepackage{fancyhdr}
\usepackage{layout}
\usepackage{tikz}
\usepackage{caption}

% some common command
\newcommand{\dif}{\mathrm{d}}
\newcommand{\avg}[1]{\left\langle #1 \right\rangle}
\newcommand{\difFrac}[2]{\frac{\dif #1}{\dif #2}}
\newcommand{\pdfFrac}[2]{\frac{\partial #1}{\partial #2}}
\newcommand{\OFL}{\mathrm{OFL}}
\newcommand{\UFL}{\mathrm{UFL}}
\newcommand{\fl}{\mathrm{fl}}
\newcommand{\op}{\odot}
\newcommand{\Eabs}{E_{\mathrm{abs}}}
\newcommand{\Erel}{E_{\mathrm{rel}}}

\begin{document}

\pagestyle{fancy}
\fancyhead{}
\lhead{NAME JiatuYan}
\chead{Numerical PDE/ODE homework \#4}
\rhead{Date 2021.5.22}


\section*{I. Exercise 9.8 Prove Theorem 9.7.}
From Definition 9.3 and Lemma 9.4, we have
\[
\left\{
	\begin{array}{c}
		Ae=r\\
		f=Au\\
	\end{array}
	\right.
.\] 
We left-multiply $A^{-1}$ on both sides of the equations, we have
\[
\left\{
	\begin{array}{c}
		A^{-1}r=e\\
		A^{-1}f=u\\
	\end{array}
	\right.
.\] 
Take two-norms on both sides of the four equations, we can deduce two inequalities listed below.
\begin{equation*}
	\begin{split}
		&\left\{
			\begin{array}{l}
				\lVert r \rVert_2= \lVert Ae\rVert_2\le  \lVert A \rVert _2 \lVert e \rVert _2\\ 
				\lVert u\rVert_2= \lVert A^{-1}f \rVert_2
				\le \lVert A^{-1} \rVert_2 \lVert f \rVert_2\\
			\end{array}
		\right.
		\Rightarrow \frac{1}{ \lVert A \rVert_2 \lVert A^{-1} \rVert_2  }\frac{ \lVert r \rVert _2}{ \lVert f \rVert _2}
		=\frac{1}{\mathrm{cond}\left( A \right) }\frac{ \lVert r \rVert_2}{ \lVert f \rVert _2}
		\le \frac{ \lVert e \rVert _2}{ \lVert u \rVert _2}\\
		&\left\{
			\begin{array}{l}
				\lVert f \rVert _2= \lVert Au \rVert _2
				\le  \lVert A \rVert _2 \lVert u  \rVert_2\\
				\lVert e \rVert_2= \lVert A^{-1}r \rVert_2
				\le  \lVert A^{-1} \rVert _2 \lVert r \rVert _2\\
			\end{array}
			\right.
			\Rightarrow \frac{ \lVert e \rVert_2}{ \lVert u \rVert_2}
			\le  \lVert A \rVert_2 \lVert A^{-1} \rVert _2 \frac{ \lVert r \rVert _2}{ \lVert f \rVert _2}
			=\mathrm{cond}\left( A \right) \frac{ \lVert r \rVert_2}{ \lVert f \rVert_2}.\\ 
	\end{split}
\end{equation*}
The two right arrows is right because the 2-norm is non-zero.
Merge the two inequalities and we will get the inequality in Theorem 9.6.
\section*{II. Exercise 9.8 Compute by hand the values of $\mathrm{cond}\left( A \right) $ for $n=8$ and  $n=1024$.}
Since A is a symmetric matrix, we have 
\[\mathrm{cond}\left( A \right)= \lVert A \rVert_2 \lVert A^{-1} \rVert _2=  
	\rho\left( A \right)\rho\left( A^{-1} \right)
	=\max_{\lambda\in\lambda\left( A \right) }\mid \lambda \mid
	\min_{\lambda\in\lambda\left( A \right) }\mid \lambda \mid
.\]
In Lemma 8.31 we know the eigenvalues of matrix A are 
\[
	\lambda_k\left( A \right)=\frac{4}{h^2}\sin^2\left( \frac{k\pi}{2\left( n \right) } \right),\quad k=1,\ldots,n-1\quad h=\frac{1}{n} 
.\]
Because of the monotonicity and nonnegativity of the sine function in $\left( 0, \pi /2 \right) $,
we can easily calculate $\mathrm{cond}\left( A \right) $ by hand when $n=8$ and  $n=1024$.
\begin{equation*}
	\begin{split}
		&n=8,\quad\mathrm{cond}\left( A \right)=4n^2\sin^2\left( \frac{\pi}{16} \right)\times
		4n^2\sin^2\left(  \frac{7\pi}{16}\right)
		\approx 2399.381249,\\
		&n=1024,\quad\mathrm{cond}\left( A \right)=4n^2\sin^2\left( \frac{\pi}{2048} \right)
		\times4n^2\sin^2\left( \frac{1023\pi}{2048} \right)\approx 41395991.34.\\   
	\end{split}
\end{equation*}

\section*{III. Exercise 9.12.}
Do not lose the generiality, we set $n=4$.

If values of the Fourier modes are 0 at $\partial \Omega^{h}$, the figures is drawn in Figure \ref{fig1}.
We can find that all the values at each nodes are 0 when $k=4$, which means the Fourier mode with $k=4$ can not be represented in the grid, while the other modes can be represented in the grid.
Thus the range of wavenumber is $\left[1,n\right)$.
\begin{figure}[ht]
	\centering
\begin{tikzpicture}
	\draw (0,0)--(2*6.28,0);
	\draw (0,0)--(0,0.25);
	\draw[dashed] (2*1.5708,-1.2)--(2*1.5708,1.1);
	\draw[dashed] (2*3.14,-1.2)--(2*3.14,1.1);
	\draw[dashed] (2*4.712,-1.2)--(2*4.712,1.1);
	\draw (2*6.28,0)--(2*6.28,0.25);
	\draw[blue, smooth, domain=0:rad(720)] plot (\x, {sin(\x*1/4 r)} );
	\draw[blue] (0, 0-3)--(3.14, 0.707-3);
	\draw[blue] (3.14, 0.707-3)--(6.28, 1-3);
	\draw[blue] (6.28, 1-3)--(9.42, 0.707-3);
	\draw[blue] (9.42, 0.707-3)--(12.56, 0-3);
	\node[blue, above] at (6.2831,1) {k=1};
	\node[blue, above] at (6.2831,1-3) {k=1};
	\node[red, below] at (9.4247,-1) {k=2};
	\node[red, below] at (9.4247,-1-3) {k=2};
	\draw[red, smooth, domain=0:rad(720)] plot (\x, {sin(\x*2/4 r)});
	\draw[red] (0, 0-3)--(3.14, 1-3);
	\draw[red] (3.14, 1-3)--(6.28, 0-3);
	\draw[red] (6.28, 0-3)--(9.42, -1-3);
	\draw[red] (9.42, -1-3)--(12.56, 0-3);
	\node[green, below] at (6.28,-1) {k=3};
	\node[green, below] at (6.28,-1-3) {k=3};
	\draw[green, smooth, domain=0:rad(720)] plot (\x, {sin(\x*3/4 r)});
	\draw[green] (0, 0-3)--(3.14, 0.707-3);
	\draw[green] (3.14, 0.707-3)--(6.28, -1-3);
	\draw[green] (6.28, -1-3)--(9.42, 0.707-3);
	\draw[green] (9.42, 0.707-3)--(12.56, 0-3);
	\node[brown, below] at (4.712,-1) {k=4};
	\node[brown, below] at (2,-3) {k=4};
	\draw[brown, smooth, domain=0:rad(720)] plot (\x, {sin(\x*4/4 r)});
	\draw[brown] (0,0-3)--(12.56, 0-3);
	\draw[dashed] (2*1.5708,-1.2-3)--(2*1.5708,1.1-3);
	\draw[dashed] (2*3.14,-1.2-3)--(2*3.14,1.1-3);
	\draw[dashed] (2*4.712,-1.2-3)--(2*4.712,1.1-3);
\end{tikzpicture}
\caption{The figures of wave modes vanishing at $\partial\Omega^{h}$.}
        \label{fig1}
\end{figure}

If the values of the Fourier modes are nonzero at $\partial\Omega^{h}$,
we assume the values at  $x=0$ is 0.5 without losing generality.
The figures are shown in Figure \ref{fig2}, we can see that the Fourier mode with $k=4$ can be represented in the grid.
If one Fourier mode vanishes at all the nodes except the boundary nodes, it must be a combination of some 
Fourier modes because it has changing wavelength. 
Thus we have if we do not requide the Fourier mode to be zero at the domain boundary the range of wavenumbers is $\left[1, n\right]$.
\begin{figure}[ht]
        \centering
\begin{tikzpicture}
        \draw (0,0)--(2*6.28,0);
        \draw (0,0)--(0,0.25);
	\draw (0, -3)--(2*6.28, -3);
        \draw[dashed] (2*1.5708,-1.2)--(2*1.5708,1.1);
        \draw[dashed] (2*3.14,-1.2)--(2*3.14,1.1);
        \draw[dashed] (2*4.712,-1.2)--(2*4.712,1.1);
        \draw (2*6.28,0)--(2*6.28,0.25);
	\draw[blue, smooth, domain=0:rad(720)] plot (\x, {sin((rad(30)+\x*1/4) r)} );
        \draw[blue] (0, 0.5-3)--(3.14, 0.9659-3);
        \draw[blue] (3.14, 0.9659-3)--(6.28, 0.866-3);
        \draw[blue] (6.28, 0.866-3)--(9.42, 0.259-3);
        \draw[blue] (9.42, 0.259-3)--(12.56, -0.5-3);
	\node[blue, above] at (6.2831-2.09,1) {k=1};
        \node[blue, above] at (6.2831,0.866-3) {k=1};
        \node[red, below] at (9.4247-1.05,-1) {k=2};
        \node[red, below] at (9.4247,-0.866-3) {k=2};
	\draw[red, smooth, domain=0:rad(720)] plot (\x, {sin((\x*2/4+rad(30)) r)});
        \draw[red] (0, 0.5-3)--(3.14, 0.866-3);
        \draw[red] (3.14, 0.866-3)--(6.28, -0.5-3);
        \draw[red] (6.28, -0.5-3)--(9.42, -0.866-3);
        \draw[red] (9.42, -0.866-3)--(12.56, 0.5-3);
        \node[green, below] at (6.28-0.698,-1) {k=3};
        \node[green, below] at (6.28,-0.866-3) {k=3};
	\draw[green, smooth, domain=0:rad(720)] plot (\x, {sin((\x*3/4+rad(30)) r)});
        \draw[green] (0, 0.5-3)--(3.14, 0.259-3);
        \draw[green] (3.14, 0.259-3)--(6.28, -0.866-3);
        \draw[green] (6.28, -0.866-3)--(9.42, 0.966-3);
        \draw[green] (9.42, 0.966-3)--(12.56, -0.5-3);
        \node[brown, below] at (4.712-0.524,-1) {k=4};
        \node[brown, below] at (4.12-0.524,-0.5-3) {k=4};
	\draw[brown, smooth, domain=0:rad(720)] plot (\x, {sin((\x*4/4+rad(30)) r)});
        \draw[brown] (0, 0.5-3)--(3.14, -0.5-3);
        \draw[brown] (3.14, -0.5-3)--(6.28, 0.5-3);
        \draw[brown] (6.28, 0.5-3)--(9.42, -0.5-3);
        \draw[brown] (9.42, -0.5-3)--(12.56, 0.5-3);
        \draw[dashed] (2*1.5708,-1.2-3)--(2*1.5708,1.1-3);
        \draw[dashed] (2*3.14,-1.2-3)--(2*3.14,1.1-3);
        \draw[dashed] (2*4.712,-1.2-3)--(2*4.712,1.1-3);
\end{tikzpicture}
\caption{The figures of Fourier modes nonvarnishing at $\partial\Omega^{h}$.}
        \label{fig2}
\end{figure}

\section*{IV. Exercise 9.15 Plot the case of $n=6$ for Example 9.14.}
From the Figure \ref{fig3}, we can clearly find that the values of $\sin\left( x_j\frac{k}{2}\pi \right) $
and $-\sin\left( x_j \frac{3k}{2}\pi \right) $ at each nodes $x_j$ are the same. This means the mode with $k=3n /2$ is 
represented by  $k=n /2$.
\begin{figure}[ht]
	\centering
	\begin{tikzpicture}
		\draw(0,0)--(9.425,0);
		\draw(0,0)--(0,0.25);
		\draw(9.425, 0)--(9.425, 0.25);
		\draw[dashed] (1.571,-1.2)--(1.571, 1.1);
		\draw[dashed] (2*1.571,-1.2)--(2*1.571, 1.1);
		\draw[dashed] (3*1.571,-1.2)--(3*1.571, 1.1);
		\draw[dashed] (4*1.571,-1.2)--(4*1.571, 1.1);
		\draw[dashed] (5*1.571,-1.2)--(5*1.571, 1.1);
		\draw[blue, smooth, domain=0:rad(540)] plot (\x, {sin((\x) r)});
		\draw[red, smooth, domain=0:rad(180)] plot (\x, {-sin((3*\x) r)});
		\draw[red, smooth, domain=rad(180):rad(360)] plot (\x, {-sin((3*\x) r)});
		\draw[red, smooth, domain=rad(360):rad(540)] plot (\x, {-sin((3*\x) r)});
		\draw[blue](10, 0.8)--(10.5, 0.8);
		\node[blue, right] at (10.5, 0.8) {$\sin(x_j\frac{k}{2}\pi)$};
		\draw[red](10, 0.4)--(10.5, 0.4);
		\node[red, right] at (10.5, 0.4) {$-\sin(x_j\frac{3k}{2}\pi)$};
	\end{tikzpicture}
	\caption{The figure for situation that $n=6$ in Example 9.14}
        \label{fig3}
\end{figure}

\section*{V. Exercise 9.19 Prove Lemma 9.18.}
\subsection*{\small{1. Prove  $e^{\left( \ell \right) }=T^{\ell}e^{\left(0  \right) }$.}}
When $\ell=0$, it is obvious that $e^{\left(0  \right) }=I_{n-1}e^{\left(0  \right) }=e^{\left(0  \right) }$.

We hypothesize that when $\ell=k$, we have $e^{\left( k \right) }=T^{k}e^{\left( 0 \right) }$.
Then for $\ell=k+1$, we assume the exact solution is $u$, then we have
\begin{equation*}
	\begin{split}
		e^{\left( k+1 \right) }&=u-u^{k+1}\\
		       &=Tu+c-Tu^{k}+c\\
		       &=T\left( u-u^{k} \right)\\
		       &=Te^{k}\\
		       &=T^{k+1}e^{\left( 0 \right) }.\\
	\end{split}
\end{equation*}
Thus we have proved $e^{\left( \ell \right) }=T^{\ell}e^{\left(0  \right) }$ by induction.

\subsection*{\small{2. Prove that the iteration converges to 0 iff the spectral radius $\rho\left(T  \right)<1 $.}}
We assume the eigenvalues of $T$ are  $\lambda_1,\ldots,\lambda_{n-1}$ and we can find nonsingular matrix P that satisfies
$T=P^{-1}\Lambda P$, where $\Lambda=\mathrm{diag}\{\lambda_1,\ldots,\lambda_{n-1}\}$.
Thus we have $T^{\ell}=P^{-1}\Lambda^{\ell}P$.

For sufficiency,
we denote that $b=Pe^{\left( 0 \right) }=\left( b_i \right)_{\left( n-1 \right) \times1} $ and we will have
\[
	\Lambda^{\ell}Pe^{\left( 0 \right) }=\left[\lambda_1^{\ell}b_1,\ldots,\lambda_{n-1}^{\ell}b_{n-1}\right]^{T}
.\] 
Due to $\rho\left( T \right)<1 $, we have $ \mid \lambda_i \mid <1$ for all $i\in\{1,\ldots,n-1\}$. Thus we have 
$\lim_{\ell\to \infty}\Lambda b=0$. Which means
 \[
	 \lim_{\ell\to \infty}T^{\ell}e^{\left( 0 \right) }=P^{-1}\left(\lim_{\ell\to \infty}\Lambda b\right)=P^{-1}0=0.
\] 
The order of the limit can be changed since the operator identified by matrix $P$ is linear and has finit size. So the iteration converges to 0.

For necessity, if the iteration converges to 0, we have $\lim_{\ell\to \infty}T^{\ell}e^{\left( 0 \right) }=0$ for all 
$e^{\left( 0 \right) }\in\mathbb{R}^{n-1}$.
If $\rho\left( T \right)\ge 1 $, then there exists one nonzero eigenvector $T$ and its corresponding eigenvalue,
denoted as $v_i$ and $\lambda_i$, satisfiy $\lambda_iT=\lambda_iv_i$ and 
$ \mid \lambda_i \mid\ge 1 $.
Then we set $e^{\left( 0 \right) }=v_i$ and we will have  
\[
	\lim_{\ell\to \infty}T^{l}v_i=\lim_{\ell\to \infty}T^{\ell-1}\lambda_iv_i=\lim_{\ell\to \infty}\lambda_i^{\ell}v_i\neq 0
.\] 
This contradicts with the hypothesis that the iteration is convergent. Thus we have $\rho\left( T \right)<1 $.

Combining sufficiency and necessity, the Lemma 9.18 is proved.

\section*{VI. Exercise 9.24 Prove Lemma 9.23.}
We denote $T_{\mathit{w}}=\left[t_1, \ldots,t_{n-1}\right]^{T}$, where $t_i$'s are vectors with length  $n-1$.
Assume the eigenvectors of $T_{\mathit{w}}$ are $v_k$, $k\in{1,\ldots,n-1}$. 
$v_k$'s are the same with the $w_k$'s in Lemma 8.31, respectively.
$\forall k\in\{1,2,\ldots,n-1\}$, we denote $v_{k,0}=\sin\left( 0k\pi /n \right)=0 $ and $v_{k,n}=\sin\left( nk\pi /n \right)=0 $ for the sake of convenience.
Then $\forall i\in\{1,\ldots,n-1\}$we have
\begin{equation*}
	\begin{split}
		t_i^{T}v_k&=\frac{\mathit{w}}{2}\sin\left( \frac{\left( i-1 \right)k\pi}{n}  \right)
		+\left( 1-\mathit{w} \right)\sin\left( \frac{ik\pi}{n} \right) 
		+\frac{\mathit{w}}{2}\sin\left( \frac{\left( i+1 \right)k\pi }{n} \right)\\
				   &=\mathit{w}\sin\left( \frac{ik\pi}{n} \right)\cos\left( \frac{k\pi}{n} \right)
				   -\mathit{w}\sin\left( \frac{ik\pi}{n} \right)+\sin\left( \frac{ik\pi}{n} \right)\\
				   &=\mathit{w}\sin\left( \frac{ik\pi}{n} \right) 
				   \left[1-2\sin^2\left( \frac{k\pi}{2n} \right)-1  \right]+\sin\left( \frac{ik\pi}{n} \right)\\
				   &=\left[  1-2\mathit{w}\sin^2\left( \frac{k\pi}{2n} \right)  \right]\sin\left( \frac{ik\pi}{n} \right),
	\end{split}
\end{equation*}
where the second equation is achieved by implementing the addition formula of sine on the first and last elements
and the third one is achieved by the formula $\cos\left( 2x \right)=1-2\sin^2\left( x \right)  $.
Then we have $T_{\mathit{w}}v_k=\lambda_k\left( T_{\mathit{w}} \right) v_k$, which means we have proved Lemma 9.23.

\section*{VII. Exercise  9.25.}
The Fig.2.7 in the book by Briggs et al. [2000] is reproduced in Figure \ref{fig4}.

For $n=64$,  $\mathit{w}\in\left[0, 1\right]$, we have 
\[\rho\left( T_\mathit{w} \right)=max_{\lambda_k\in\lambda\left( T_\mathit{w} \right) } \mid \lambda_k \mid
=\max_{k=1,\ldots,n-1} \mid 1-2\mathit{w}\sin^2\left( \frac{k\pi}{2n} \right) \mid   . \]
Because $\rho{T_\mathit{w}}$ increases monotonically as $\mathit{w}$ increases, its minimum is reached at $\mathit{w}=1$.
And due to the monotonically increasing property of sine function in $\left[0,\frac{\pi}{2}\right]$ and the range of its image,
the minimum of $\rho \left(T_\mathit{w}  \right)$ is reached at $k=1$ or $k=n-1$.
Then we can calculate the lower bound of $\rho\left(T_\mathit{w}  \right) $ by
\begin{equation*}
	\begin{split}
		\rho\left(T_\mathrm{w}\right)\ge \rho\left( T_1 \right) &=\max\{ \mid \lambda_1 \mid , \mid \lambda_{n-1} \mid \}\\
					       &=\max\{ \mid 1-2\sin^2\left( \frac{\pi}{128} \right) \mid ,
					       \mid 1-2\sin^2\left( \frac{63\pi}{128} \right) \mid   \}\\
					       &\ge \max\{0.998795, 0.998795\}\\
					       &\ge 0.9986.
	\end{split}
\end{equation*}
The first inequality in the inequalities above is achieved by rounding down the solutions.
We find $\rho\left( T_{\mathit{w}} \right)\ge 0.9986 $, which means the iteration about $T_{\mathit{w}}$ will converge to zero very slowly.

\begin{figure}[ht]
	\centering
	\includegraphics[width=13cm, height = 10cm]{Exercise9_25.eps}
	\caption{Eigenvalues of the iteration matrix $T_{\mathit{w}}$ for $\mathit{w}=\frac{1}{3}$, $\frac{1}{2}$, $\frac{2}{3}$, 1.}
        \label{fig4}
\end{figure}

\section*{VIII. EXercise 9.28. Reproduce Figure 2.8 in the book by Briggs et al. [2000].}
The figure is reproduced in Figure \ref{fig5}. From the figure we can find that regular Jacobi is good for damping modes $16\le k\le 48$. While for weighted Jacobi with $\mathit{w}=\frac{2}{3}$, the modes $16\le k\le 64$ are all damped out quickly.

\begin{figure}[ht]
	\centering
	\begin{minipage}[t]{0.45\linewidth}
                \centering
		\includegraphics[width=8cm, height = 6cm]{Exercise9_28_1.eps}
		\end{minipage}
	\begin{minipage}[t]{0.45\linewidth}
                \centering
		\includegraphics[width=8cm, height=6cm]{Exercise9_28_2.eps}
	\end{minipage}
	\caption{Weighted Jacobi method with $\mathit{w}=1$ and $\mathit{w}=\frac{2}{3}$ applied to the
        one-dimensional model problem with n=64 points.}
        \label{fig5}
\end{figure}
\section*{IX. Exercise 9.41 Show that the computational cost of an FMG cycle is less than $\frac{2}{\left(1-2^{-D}  \right)^2 }$.}
We assume the dimension is D and $n=2^{m}$.
Let WU denote the computational cost of performing one relaxation sweep on the finest grid,
$C_m$ denote the computational cost of a single V-cycle in a D-dimension doamin with $n=2^{m}$
and $C$ denote the computational cost of a FMG cycle.
As what was done in Lemma 9.39, we neglect the intergrid transfer.
Then for the case $\nu_1=\nu_2=1$ we have
\begin{equation*}
	\begin{split}
		\mathrm{cost}_1&=C_1=2WU2^{-D\left( m-1 \right) },\\
		\mathrm{cost}_2&=C_2=2WU2^{-D\left( m-1 \right) }\left(1+2^{D}  \right) ,\\
			       &\ldots\\
		\mathrm{cost}_m&=C_m=2WU2^{-D\left(m-1  \right) }\left( 1+2^{D}+\ldots+2^{D\left( m-1 \right) } \right) ,
	\end{split}
\end{equation*}
where $\mathrm{cost}_i$'s are the computational costs of the V-cycle started from the grid with $n=2^{i}$ in full multigrid V-cycle.
We get the results by adding up these costs.
\[
	C=\sum_{i=1}^{m}\mathrm{cost}_i=2WU\left[ \sum_{i=1}^{m}\left(i2^{-D\left( i-1 \right) }   \right)  \right] 
.\]
We can calculate its upper bound roughly by
\begin{equation*}
	\begin{split}
		&C<S=2WU\left[\sum_{i=1}^{\infty}\left(i 2^{-D\left( i-1 \right) }\right)   \right],\\
		&S-2^{-D}S=2WU\left[1+\sum_{i=1}^{\infty}\left( 2^{-Di } \right) \right]=\frac{2}{1-2^{-D}}WU,\\
		&\Rightarrow C<S=\frac{2}{1-2^{-D}}\frac{1}{1-2^{-D}}WU=\frac{2}{\left( 1-2^{-D} \right)^2 }WU.
	\end{split}
\end{equation*} 
Thus we have the computational cost of an FMG cycle is less than $\frac{2}{\left( 1-2^{-D} \right) }WU$.
For $D=1,2,3$, we can calculate the upper bounds more precisely. They are $C_{D=1}<4WU$, $C_{D=2}<\frac{8}{3}WU$ and $C_{D=3}<\frac{16}{7}WU$. 

\section*{X. Exercise 9.47}
By Theorem 9.46 and Lemma 9.23, we have 
\begin{equation*}
	\begin{split}
		TG
		\left[
			\begin{array}{c}
				w_k\\
				w_{k'}\\
			\end{array}
		\right]
		&=
		\left[
			\begin{array}{c c}
				\lambda_k^{\nu_1+\nu_2}s_k&\lambda_k^{\nu_1}\lambda_{k'}^{\nu_2}s_k\\
				\lambda_{k'}^{\nu_1}\lambda_k^{\nu_2}c_k&\lambda_{k'}^{\nu_1+\nu_2}c_k\\
			\end{array}
		\right]
		\left[
			\begin{array}{c}
				w_k\\
				w_{k'}\\
			\end{array}
		\right]\\
		&=
		\left[
			\begin{array}{c c}
				c_1\left( k \right) &c_2\left( k \right) \\
				c_3\left( k \right) &c_4\left( k \right) \\
			\end{array}
		\right]\\
		\Rightarrow&
		\left\{
			\begin{array}{l}
				c_1\left( k \right) =\left( 1-2\mathit{w}\sin^2\left( \frac{k\pi}{2n} \right)  \right)^{\nu_1+\nu_2}\sin^2\left( \frac{k\pi}{2n} \right)\\
				c_2\left( k \right) =
				\left( 1-2\mathit{w}\sin^2\left( \frac{k\pi}{2n} \right)  \right)^{\nu_1}
				\left( 1-2\mathit{w}\sin^2\left( \frac{\left( n-k \right) \pi}{2n} \right)  \right)^{\nu_2}
				\sin^2\left( \frac{k\pi}{2n} \right)\\
				c_3\left( k \right) =
				\left( 1-2\mathit{w}\sin^2\left( \frac{\left( n-k \right) \pi}{2n} \right)  \right)^{\nu_1}
				\left( 1-2\mathit{w}\sin^2\left( \frac{k\pi}{2n} \right)  \right)^{\nu_2}
				\cos^2\left( \frac{k\pi}{2n} \right)\\
				c_4\left( k \right) =
				\left( 1-2\mathit{w}\sin^2\left( \frac{\left( n-k \right) \pi}{2n} \right)  \right)^{\nu_1+\nu_2}
				\cos^2\left( \frac{k\pi}{2n} \right)\\ 
			\end{array}
		\right.
		,
	\end{split}
\end{equation*}
where $k\in\left[1,\frac{n}{2}  \right] $.

The plots of damping coefficients $c_i$'s of two-drid correction with weighted Jacobi for $n=64$ and $\mathit{w}=\frac{2}{3}$ are reproduced in the Figure \ref{fig6} to \ref{fig11}.

Let k vary from $1$ to $n$. By 
\[s_k=\sin^2\left( \frac{k\pi}{2n} \right)
	=\cos^2\left( \frac{\pi}{2}-\frac{k\pi}{2n} \right)
	=\cos^2\left( \frac{\left( n-k \right)\pi }{2n} \right) =c_{k'}\]
we can write $c_1$ and $c_4$ together in one smooth function.
Similarly, we can write $c_2$ and $c_3$ together.
We have 
\[
\left\{
	\begin{array}{l}
		c_{14}\left( k \right) = \left( 1-2\mathit{w}\sin^2\left( \frac{k\pi}{2n} \right)  \right)^{\nu_1+\nu_2}
		\sin^2\left( \frac{k\pi}{2n} \right), \\
		c_{23}\left( k \right)=\left( 1-2\mathit{w}\cos^2\left( \frac{k\pi}{2n} \right)  \right)^{\nu_1}
		\left( 1-2\mathit{w}\sin^2\left( \frac{\left( n-k \right)\pi }{2n} \right)  \right)^{\nu_2}
		\cos^2\left( \frac{k\pi}{2n} \right),\\ 
	\end{array}
	\right.
\] 
where 
\[\left\{
	\begin{array}{l}
		c_1\left( 1:\frac{n}{2} \right)=c_{14}\left( 1:\frac{n}{2} \right),\\
		c_2\left( \frac{n}{2}:n \right)=c_{23}\left( \frac{n}{2}:n \right),\\ 
		c_3\left( 1:\frac{n}{2} \right)=c_{23}\left( 1:\frac{n}{2} \right),\\
		c_4\left( \frac{n}{2}:n \right)=c_{14}\left( \frac{n}{2}:n\right).\\
	\end{array}
	\right.
\]
Firstly, by the range of sine and cosine functions and $\mathit{w} = \frac{2}{3}$, we can easily observe that 
\begin{equation*}
	\begin{split}
		\left\{
			\begin{array}{l}
				  \mid 1-2\mathit{w}\sin^2\left( \frac{k\pi}{2n} \right) \mid \le 1\\
				  \mid 1-2\mathit{w}\cos^2\left( \frac{k\pi}{2n} \right)  \mid \le 1\\
				   \mid \sin^2\left( \frac{k\pi}{2n} \right)  \mid \le 1\\
				   \mid \cos^2\left( \frac{k\pi}{2n} \right) \mid \le 1\\ 
			\end{array}
			\right.
			.
	\end{split}
\end{equation*}
Thus we have 
\[
\left\{
	\begin{array}{l}
		\mid c_{14}\left( k \right)  \mid \le 1\\
		\mid c_{23}\left( k \right)  \mid \le 1\\
	\end{array}
	\right.\Rightarrow
		\mid c_i\left( k \right)  \mid \le 1
.\] 

Then, we step further. We can denote $\sin^2\left( \frac{k\pi}{2n} \right)$ as $y$ and $\cos^2\left( \frac{k\pi}{2n} \right) $ as $z$.
Because sine function increases monotonically in $\left[0, \frac{\pi}{2}  \right] $ 
and cosine function decreases monotonically in $\left[0, \frac{\pi}{2}\right]$, we have y increases monotonically from 0 to 1 and z decreases from 1 to 0 when k becomes larger.
For the sake of convenience we expand the range of y,z into $\left[0, 1\right]$.
The expansion is will not influence too much on the maximum of $c_i$'s due to the continuity.
Then we can transform $c_{41}\left( k \right) $ and $c_{23}\left( k \right) $ by
\[
\left\{
	\begin{array}{l}
		c_{41}\left( k \right)=f\left( y \right)=\left( 1-\frac{4}{3}y \right)^{\nu_1+\nu_2}y,\\
		c_{23}\left( k \right)=g\left( z \right)=\left( 1-\frac{4}{3}z \right)^{\nu_1}\left(\frac{4}{3}z-\frac{1}{3}  \right)^{\nu_2}z.\\    
	\end{array}
	\right.
\]
Firstly, we observe $f\left( y \right) $.
If $\nu_1+\nu_2=0$, $f\left( y \right)=y $. The range of $c_{14}$ is the same as that of $y$, which is close to $\left(0,1\right) $.
While when $\nu_1+\nu_2=a>0$,
the maximum may be reached at $y=1$ or $y=m$ where
\begin{equation*}
	\begin{split}
		f'\left( m \right)
		&=\left( 1-\frac{4}{3}y \right)^{a}-a\frac{4}{3}\left( 1- \frac{4}{3}y\right)^{a-1}y\\
		&=\left( 1-\frac{4}{3}y \right)^{a-1}\left[ 1-\left(\frac{4}{3}+\frac{4a}{3}\right)y \right]  =0\\
		\Rightarrow&y=\frac{3}{4}\quad \mathrm{or}\quad \frac{3}{4+4a} \\
	\end{split}
\end{equation*}
Since $f$ vanishes at $y=\frac{3}{4}$, we have 
\begin{equation*}
	\begin{split}
		\max_{k\in\left[1,n-1\right]}  \mid c_{14}\left( k \right) \mid 
		&=\max_{k\in\left[1, n-1\right]}\{ \mid f\left(  \frac{3}{4+4a} \right) \mid ,  \mid f\left( 1 \right) \mid   \}\\ 
		&=\max\{\frac{3\left( 4a \right)^{a} }{\left( 4+4a \right)^{a+1} } ,\frac{1}{3^{a}} \}\\
		&\le\max\{\frac{3}{4+4a}, \frac{1}{3^{a}}\}. \\
	\end{split}
\end{equation*}
From $a\ge 1$ we can conclude that $\max_{k\in\left[1,n-1\right]}c_{41}\left( k \right) $ is small.

Then we ovserve $g\left( z \right) $.
If $\nu_1+\nu_2=0$. The range of $c_{23}$ is also the same as that of y, which is close to $\left(0, 1  \right) $.
When $\nu_1 = 1$ and $\nu_2 \neq  0$, $g\left( z \right) $, we have 
\[
	\lim_{z\to 0}g\left(z  \right)=\left( \frac{4}{3}-\frac{1}{3} \right)^{\nu_2}\times1=1  
,\] 
which means 
\[
	\lim_{n\to \infty}c_{3}\left( 1 \right) = 1 
.\] 
While for $c_2$, we have  $ \mid  \frac{4}{3}z-\frac{1}{3} \mid\le \frac{1}{3}  $. 
It is obvious that $c_2\left( k \right)\le \frac{1}{2\times3^{\nu_2}} $.
When $\nu_1!=0$, the situation is reduced in the case when $a\neq 0$ about $f\left( y \right) $, which has been disussed,
because $ \mid \frac{3}{4}z - \frac{1}{3} \mid \le 1$.

Thus we can conclude that $c_i$'s are small.

\begin{figure}[ht]
        \centering
	\begin{minipage}[t]{0.45\linewidth}
                \centering
		\caption{$\nu_1=0$, $\nu_2=0$.}
		\includegraphics[width=8cm, height = 6cm]{Exercise9_47_1.eps}
		\label{fig6}
        \end{minipage}
        \begin{minipage}[t]{0.45\linewidth}
                \centering
		\caption{$\nu_1=0$, $\nu_2=2$.}
                \includegraphics[width=8cm, height=6cm]{Exercise9_47_2.eps}
		\label{fig7}
        \end{minipage}
	
	\begin{minipage}[t]{0.45\linewidth}
                \centering
                \caption{$\nu_1=1$, $\nu_2=1$.}
                \includegraphics[width=8cm, height = 6cm]{Exercise9_47_3.eps}
                \label{fig8}
        \end{minipage}
        \begin{minipage}[t]{0.45\linewidth}
                \centering
                \caption{$\nu_1=2$, $\nu_2=0$.}
                \includegraphics[width=8cm, height=6cm]{Exercise9_47_4.eps}
                \label{fig9}
        \end{minipage}

	\begin{minipage}[t]{0.45\linewidth}
                \centering
                \caption{$\nu_1=2$, $\nu_2=2$.}
                \includegraphics[width=8cm, height = 6cm]{Exercise9_47_5.eps}
                \label{fig10}
        \end{minipage}
        \begin{minipage}[t]{0.45\linewidth}
                \centering
                \caption{$\nu_1=4$, $\nu_2=0$.}
                \includegraphics[width=8cm, height=6cm]{Exercise9_47_6.eps}
                \label{fig11}
        \end{minipage}
	\caption*{Figures of damping coefficients of two-grid correction with weighted Jacobi for $n=64$ and $\mathit{w}=\frac{2}{3}$.} 
\end{figure}

\section*{XI. Exercise 9.51 Prove Lemma 9.50}

We assume the standard bases of $\mathbb{R}^{n - 1}$ and $\mathbb{R}^{\frac{n}{2} - 1}$ are $\{e_1, e_2, \ldots, e_{n - 1}\}$ and $\{e'_1, e'_2,\ldots,e'_{n /2-1}\}$ respectively.
Firstly we write the matrix of the operator $I_{h}^{2h}$, which is
\[
	I_{h}^{2h}=\frac{1}{4}\left[
	\begin{array}{c c c c c c c}
		1& 2& 1&0&0&\ldots&0\\
		0&0&1&2&1&\ldots&0\\
		\vdots&\vdots&\vdots&\vdots&\vdots&\ddots&\vdots\\
		0&0&0&0&0&\cdots&1\\
	\end{array}
\right]
.\] 
Take $i\in\{1, 2, \ldots, \frac{n}{2} - 1\}$ randomly and we have
\begin{equation*}
	\begin{split}
		&I_{h}^{2h}e_{2i} = \frac{1}{2}e'_{i}\\
	\end{split}
\end{equation*}
Thus we have
\[
	\mathcal{R}\left( I_{h}^{2h} \right)
	=\{I_{h}^{2h}v:v\in\mathrm{R^{n-1}}\}
	=\mathrm{span}\{e'_1, e'_2, \ldots, e'_{\frac{n}{2}-1}\}
	=\mathbb{R}^{\frac{n}{2}-1} 
.\] 
So we have
 \[
	 \dim\mathcal{R}\left( I_{h}^{2h} \right)=\dim\mathbb{R}^{\frac{n}{2}-1}=\frac{n}{2}-1 
.\] 
Then by Theorem B.49, we have
\[
	n-1=\dim \Omega^{h}=\dim \mathcal{N}\left( I_h^{2h} \right)+\dim\mathcal{R}\left( I_h^{2h} \right)
	\Rightarrow \dim \mathcal{N}\left( I_h^{2h} \right)=n-1-\frac{n}{2}+1=\frac{n}{2}  
.\] 

From the structure of $I_{2h}^{h}$, we can easily see $\mathcal{N}\left( I_{2h}^{h} \right)=\{0\}$. Thus we have 
$\dim \mathcal{R}\left( I_{2h}^{h} \right)=\dim \Omega^{2h}=\frac{n}{2}-1 $.
Now we prove that $\mathcal{N}\left( I_{h}^{2h} \right)\cap\mathcal{R}\left( I_{2h}^{h} \right)=\{0\} $.
If $\exists v\in\mathcal{N}\left( I_{h}^{2h} \right)\cap\mathcal{R}\left( I_{2h}^{h} \right)  $ and $v\neq 0$.
Then we have $\exists a\in\Omega^{2h}$ satisfying $I_{2h}^{h}a=v$. Obviously, $a\neq 0$.
So by the equation (9.39a), we have 
\[
	0=a^{T}I_{h}^{2h}v=a^{T}\left(I_{h}^{2h}I_{2h}^{h}a\right)=\frac{1}{c}\left( I_{2h}^{h}a \right)^{T}\left( I_{2h}^{h}a \right)=v^{T}v\neq 0  
.\] 
There is a contradiction. So by 
\[\mathcal{N}\left( I_{h}^{2h} \right)\cap\mathcal{R}\left( I_{2h}^{h} \right)=\{0\}  \]
and 
\[\dim\mathcal{R}\left( I_{2h}^{h} \right)+\dim\mathcal{N}\left( I_{h}^{2h} \right)=\frac{n}{2}-1+\frac{n}{1}=\dim\Omega^{h} , \]
we have $\mathcal{N}\left( I_{2h}^{h} \right)\cup\mathcal{N}\left( I_{h}^{2h} \right)=\Omega^{h}  $. 

Since $\dim\mathcal{R}\left(I_h^{2h}  \right)=\dim\Omega^{2h}=\frac{n}{2}-1 $ and 
$I_h^{2h}v=0$ $\forall v\in\mathcal{N}\left( I_h^{2h} \right) $, we can easily get the arrow about $I_h^{2h}$ in the figure.
The arrow about $I_{2h}^{h}$ is obvious.
\end{document}

%%% Local Variables: 
%%% mode: latex
%%% TeX-master: t
%%% End: 
