\documentclass[a4paper]{article}
\usepackage{xeCJK}
\setCJKmainfont{WenQuanYi Micro Hei}
\usepackage[affil-it]{authblk}
\usepackage[backend=bibtex,style=numeric]{biblatex}
\usepackage{float}
\usepackage{subcaption}
\usepackage{graphicx}
\usepackage{geometry}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{hyperref}   
\usepackage{fancyhdr}
\geometry{margin=1.5cm, vmargin={0pt,1cm}}
\setlength{\topmargin}{-1cm}
\setlength{\paperheight}{29.7cm}
\setlength{\textheight}{25.3cm}

\addbibresource{citation.bib}

\begin{document}
% =================================================
\title{NumPDE homework \# 2}

\author{wangjie 3220100105
  \thanks{Electronic address: \texttt{3220100105@zju.edu.cn}}}
\affil{(math), Zhejiang University }


\date{Due time: \today}

\maketitle

\begin{abstract}
    theoretical homework     
\end{abstract}





% ============================================
\section*{theoretical homework}

All exercises in Chapter 9.
\cite{zqh}

\subsection*{Exercise 9.5}  

We aim to prove:
\[
\frac{1}{\text{cond}(A)} \frac{\|r\|_2}{\|b\|_2} \leq \frac{\|e\|_2}{\|x\|_2} \leq \text{cond}(A) \frac{\|r\|_2}{\|b\|_2},
\]
where \( r = b - A\hat{x} \), \( e = x - \hat{x} \), and \( \text{cond}(A) = \|A\|_2 \|A^{-1}\|_2 \).

\subsection*{Step 1: Express \( e \) in terms of \( r \)}
From \( r = b - A\hat{x} \) and \( b = Ax \), we have:
\[
r = A(x - \hat{x}) = Ae.
\]
Thus:
\[
e = A^{-1}r.
\]

\subsection*{Step 2: Upper bound on \( \|e\|_2 \)}
Taking norms:
\[
\|e\|_2 = \|A^{-1}r\|_2 \leq \|A^{-1}\|_2 \|r\|_2.
\]
From \( b = Ax \), we have:
\[
\|x\|_2 \geq \frac{\|b\|_2}{\|A\|_2}.
\]
Combining:
\[
\frac{\|e\|_2}{\|x\|_2} \leq \frac{\|A^{-1}\|_2 \|r\|_2}{\frac{\|b\|_2}{\|A\|_2}} = \text{cond}(A) \frac{\|r\|_2}{\|b\|_2}.
\]

\subsection*{Step 3: Lower bound on \( \|e\|_2 \)}
From \( r = Ae \), we have:
\[
\|r\|_2 \leq \|A\|_2 \|e\|_2 \implies \|e\|_2 \geq \frac{\|r\|_2}{\|A\|_2}.
\]
From \( \|x\|_2 \leq \|A^{-1}\|_2 \|b\|_2 \), we get:
\[
\frac{\|e\|_2}{\|x\|_2} \geq \frac{\frac{\|r\|_2}{\|A\|_2}}{\|A^{-1}\|_2 \|b\|_2} = \frac{1}{\text{cond}(A)} \frac{\|r\|_2}{\|b\|_2}.
\]

\subsection*{Conclusion}
Combining the bounds:
\[
\frac{1}{\text{cond}(A)} \frac{\|r\|_2}{\|b\|_2} \leq \frac{\|e\|_2}{\|x\|_2} \leq \text{cond}(A) \frac{\|r\|_2}{\|b\|_2}.
\]
This completes the proof.
  
\subsection*{Exercise 9.8}  

Given the matrix \( A \) of the form:

\[
A = \frac{1}{h^2} \begin{bmatrix}
2 & -1 & & & \\
-1 & 2 & -1 & & \\
& -1 & 2 & -1 & \\
& & \ddots & \ddots & \ddots \\
& & & -1 & 2
\end{bmatrix},
\]

we need to compute the condition number \( \text{cond}(A) = \|A\|_2 \|A^{-1}\|_2 \) for \( n = 8 \) and \( n = 1024 \).

\subsection*{Step 1: Compute \( \|A\|_2 \)}
The eigenvalues of \( A \) are:

\[
\lambda_k = \frac{4}{h^2} \sin^2\left(\frac{k\pi}{2n}\right), \quad k = 1, 2, \dots, n.
\]

The largest eigenvalue (\( \|A\|_2 \)) is:

\[
\lambda_{\text{max}} = \frac{4}{h^2} \sin^2\left(\frac{(n-1)\pi}{2n}\right).
\]

\subsection*{Step 2: Compute \( \|A^{-1}\|_2 \)}
The smallest eigenvalue is:

\[
\lambda_{\text{min}} = \frac{4}{h^2} \sin^2\left(\frac{\pi}{2n}\right).
\]

\subsection*{Step 3: Compute \( \text{cond}(A) \)}
The condition number is:

\[
\text{cond}(A) = \|A\|_2 \|A^{-1}\|_2  = \frac{\sin^2\left(\frac{(n-1)\pi}{2n}\right)}{\sin^2\left(\frac{\pi}{2n}\right)}.
\]

\subsection*{Step 4: Specific Calculations}
For \( n = 8 \):

\[
\text{cond}(A) \approx 25.2741.
\]

For \( n = 1024 \):

\[
\text{cond}(A) \approx 424971.1792.
\]

\subsection*{Conclusion}
For \( n = 8 \), \( \text{cond}(A) \approx 25.2741 \).
For \( n = 1024 \), \( \text{cond}(A) \approx 424971.1792 \).

\subsection*{Exercise 9.11}

For the domain \( \Omega = (0, 1) \), show that the maximum wavenumber representable on the grid \( \Omega_h \) is \( k_{\text{max}} = \frac{1}{h} \). Additionally, analyze the case where the Fourier mode is required to be zero at the boundary points.

\subsection*{1. Maximum Wavenumber \( k_{\text{max}} = \frac{1}{h} \)}
On the interval \( \Omega = (0, 1) \), the grid \( \Omega_h \) has a step size \( h \) and \( N = \frac{1}{h} \) grid points. According to the Nyquist sampling theorem, the maximum representable wavenumber \( k_{\text{max}} \) is determined by the grid resolution.

\begin{itemize}
    \item \textbf{Wavelength of Fourier mode}: The wavelength is given by \( L = \frac{2}{k} \).
    \item \textbf{Minimum wavelength}: On the grid, the smallest resolvable wavelength is \( L_{\text{min}} = 2h \) (i.e., a full cycle requires at least two grid points).
    \item \textbf{Maximum wavenumber}: From \( L_{\text{min}} = \frac{2}{k_{\text{max}}} \), we obtain:
    \[
    k_{\text{max}} = \frac{2}{L_{\text{min}}} = \frac{2}{2h} = \frac{1}{h}.
    \]
\end{itemize}

Thus, the maximum representable wavenumber is:
\[
k_{\text{max}} = \frac{1}{h}.
\]

\subsection*{2. Visualization}
To verify the above conclusions, we plot the Fourier modes \( w_k(x) = \sin(k\pi x) \) for different wavenumbers \( k \). The plot is shown in Figure~\ref{fig:9.11}.

\begin{figure}[h!]
    \centering
    \includegraphics[width=0.8\textwidth]{9.11.png}
    \caption{Fourier modes on \( \Omega_h \) for different wavenumbers \( k \).}
    \label{fig:fourier_modes}
\end{figure}

\subsection*{Conclusion}
\begin{itemize}
    \item The maximum representable wavenumber on the grid \( \Omega_h \) is \( k_{\text{max}} = \frac{1}{h} \).
\end{itemize}

\subsection*{Exercise 9.14}

For \( n = 6 \), plot the original mode \( k = \frac{3}{2}n \) and the aliased mode \( k = \frac{1}{2}n \) due to Lemma 9.12.

\subsection*{Parameters}
\begin{itemize}
    \item Grid points: \( n = 6 \).
    \item Step size: \( h = \frac{1}{6} \).
    \item Maximum wavenumber: \( k_{\text{max}} = 6 \).
\end{itemize}

\subsection*{Wavenumbers}
\begin{itemize}
    \item Original: \( k = \frac{3}{2}n = 9 \).
    \item Aliased: \( k = \frac{1}{2}n = 3 \).
\end{itemize}

\subsection*{Plot}
The original mode \( \sin(9\pi x) \) and aliased mode \( \sin(3\pi x) \) are shown in Figure~\ref{fig:9.14}.

\begin{figure}[h!]
    \centering
    \includegraphics[width=0.8\textwidth]{9.14.png}
    \caption{Original mode (\( k = 9 \)) and aliased mode (\( k = 3 \)) for \( n = 6 \).}
    \label{fig:aliasing}
\end{figure}

\subsection*{Conclusion}
\begin{itemize}
    \item Aliasing occurs when \( k > k_{\text{max}} \).
    \item For \( n = 6 \), \( k = 9 \) is aliased to \( k = 3 \).
    \item The plot confirms the aliasing effect.
\end{itemize}

\subsection*{Exercise 9.17}

We are tasked with proving Lemma 9.16, which provides the iteration matrix \( T_\omega \) for the weighted Jacobi method and the corresponding eigenvalues.

\subsection*{Lemma 9.16}
For the linear system (9.7), the weighted Jacobi method in Definition 8.9 has the iteration matrix:

\[
T_\omega = (1 - \omega)I + \omega D^{-1}(L + U) = I - \frac{\omega h^2}{2} A,
\]

whose eigenvectors are the same as those of \( A \), with the corresponding eigenvalues:

\[
\lambda_k(T_\omega) = 1 - 2\omega \sin^2 \left( \frac{k\pi}{2n} \right),
\]

where \( k = 1, 2, \dots, n-1 \).

\subsection*{Proof}
We begin by recalling that for a linear system \( A \mathbf{u} = \mathbf{b} \), where \( A \) is a symmetric matrix, we can decompose \( A \) as:

\[
A = D - L - U,
\]

where \( D \) is the diagonal matrix of \( A \), \( -L \) is the strictly lower triangular part, and \( -U \) is the strictly upper triangular part.

The iteration matrix for the weighted Jacobi method is given by:

\[
T_\omega = (1 - \omega)I + \omega D^{-1}(L + U) = (1 - \omega)I + \omega D^{-1}(D - A).
\]

By \(D^{-1} = \frac{1}{h^2}I \) , we get:

\[
T_\omega = I - \frac{\omega h^2}{2} A.
\]

\subsection*{Eigenvalues of \( T_\omega \)}
We know that the eigenvalues of \( A \) are:

\[
\lambda_k(A) = \frac{4}{h^2} \sin^2\left(\frac{k\pi}{2n}\right), \quad k = 1, 2, \dots, n.
\]

Let \( \mathbf{v}_k \) be an eigenvector of \( A \) with eigenvalue \( \lambda_k(A) \), i.e.,

\[
A \mathbf{v}_k = \lambda_k(A) \mathbf{v}_k.
\]

Since \( T_\omega \) and \( A \) share the same eigenvectors (because the weighted Jacobi method is defined in terms of the diagonalization of \( A \)), for the eigenvector \( \mathbf{v}_k \), we have:

\[
T_\omega \mathbf{v}_k = \left( I - \omega h^2 \cdot \frac{1}{2} A \right) \mathbf{v}_k.
\]

Expanding this expression, we get:

\[
T_\omega \mathbf{v}_k = \mathbf{v}_k - \omega h^2 \cdot \frac{1}{2} \lambda_k(A) \mathbf{v}_k.
\]

Thus, the eigenvalue of \( T_\omega \) corresponding to the eigenvector \( \mathbf{v}_k \) is:

\[
\lambda_k(T_\omega) = 1 - \omega h^2 \cdot \frac{1}{2} \lambda_k(A).
\]

Substituting this into the formula for \( \lambda_k(T_\omega) \), we get:

\[
\lambda_k(T_\omega) = 1 - \frac{\omega h^2}{2} \frac{4}{h^2} \sin^2\left(\frac{k\pi}{2n}\right) = 1 - 2 \omega \sin^2 \left( \frac{k\pi}{2n} \right).
\]

This is the desired result, completing the proof.

\subsection*{Exercise 9.18}

\subsection*{1. \(\rho(T_{\omega})\)}
The eigenvalues of the matrix \(T_{\omega}\) are given by the formula:
\[
\lambda_k(T_{\omega}) = \rho(T_{\omega}) = 1 - 2 \omega \sin^2 \left( \frac{k \pi}{2n} \right)
\]
where \(k = 1, 2, \dots, n-1\) and \(n = 64\) in our case. We calculate the eigenvalues for \(\omega\) values in the range \([0, 1]\) and plot them for selected values of \(\omega\).

\begin{figure}[H]
    \centering
    \includegraphics[width=0.8\textwidth]{9.18.png}
    \caption{Eigenvalues of the iteration matrix \(T_{\omega}\) for different values of \(\omega\). The values of \(\omega\) used are \(\omega = \frac{1}{3}, \frac{1}{2}, \frac{2}{3}, 1\).}
    \label{fig:eigenvalues}
\end{figure}

\subsection*{2. Spectral Radius Validation}
The spectral radius \(\rho(T_{\omega})\) is defined as the largest eigenvalue of \(T_{\omega}\). We compute the spectral radius for each \(\omega \in [0, 1]\) and check whether it meets the condition:
\[
\rho(T_{\omega}) \geq 0.9986
\]
The results of this validation are shown below for each value of \(\omega\):

\begin{table}[H]
    \centering
    \begin{tabular}{|c|c|}
    \hline
    \textbf{Value of \(\omega\)} & \textbf{Spectral Radius \(\rho(T_{\omega})\)} \\
    \hline
    0.00  & 1.0000 (pass) \\
    0.01  & 1.0000 (pass) \\
    0.02  & 1.0000 (pass) \\
    0.03  & 1.0000 (pass) \\
    0.04  & 1.0000 (pass) \\
    0.05  & 0.9999 (pass) \\
    0.06  & 0.9999 (pass) \\
    \vdots & \vdots \\
    0.95  & 0.9989 (pass) \\
    0.96  & 0.9988 (pass) \\
    0.97  & 0.9988 (pass) \\
    0.98  & 0.9988 (pass) \\
    0.99  & 0.9988 (pass) \\
    1.00  & 0.9988 (pass) \\
    \hline
    \end{tabular}
    \caption{Spectral radius \(\rho(T_{\omega})\) for different values of \(\omega\). Values that meet the condition \(\rho(T_{\omega}) \geq 0.9986\) are marked as "pass".}
    \label{tab:spectral_radius}
\end{table}

\subsection*{Conclusion}
From the results above, we can conclude that for \(\omega \in [0, 1]\), the spectral radius \(\rho(T_{\omega})\) meets the desired condition \(\rho(T_{\omega}) \geq 0.9986\). This validates that for \(\omega \in [0, 1]\), the spectral radius is greater than or equal to 0.9986.

\subsection*{Exercise 9.21}

Reproduce Figure 2.8 from Briggs et al. [2000], verifying that:
\begin{itemize}
    \item Regular Jacobi is only effective for damping modes \( 16 \leq k \leq 48 \).
    \item For weighted Jacobi with \( \omega = \frac{2}{3} \), all modes \( 16 \leq k < 64 \) are damped quickly.
\end{itemize}

\subsection*{Implementation}
A Python program was written to simulate the Jacobi and weighted Jacobi methods for the one-dimensional model problem with \( n = 64 \) grid points. The initial guesses consist of the modes \( w_k \) for \( 1 \leq k \leq 63 \). The number of iterations required to reduce the initial error by a factor of 100 was recorded for each mode.

\subsection*{Results}
The results are shown in Figure~\ref{fig:9.21}.

\begin{figure}[h!]
    \centering
    \includegraphics[width=0.9\textwidth]{9.21.png}
    \caption{Iterations required to reduce the error by a factor of 100 for each mode \( w_k \). Left: Regular Jacobi. Right: Weighted Jacobi (\( \omega = \frac{2}{3} \)).}
    \label{fig:jacobi}
\end{figure}

\subsection*{Conclusion}
\begin{itemize}
    \item Regular Jacobi is only effective for damping modes \( 16 \leq k \leq 48 \).
    \item Weighted Jacobi with \( \omega = \frac{2}{3} \) damps all modes \( 16 \leq k < 64 \) quickly.
    \item The results confirm the observations in Briggs et al. [2000].
\end{itemize}

\subsection*{Exercise 9.35}

Show that for \( \nu_1 = \nu_2 = 1 \), the computational cost of the full multigrid (FMG) V-cycle is less than \( \frac{2}{(1 - 2^{-D})^2} WU \), and provide an upper bound for the computational cost for \( D = 1, 2, 3 \).

\subsection*{1. FMG Cycle Computational Cost}
From the Figure 9.3 and Lemma 9.33 of bookNumPDEs , We know that the computational cost of FMG cycle:

\begin{align*}
& 2 \left[ 2WU \left( 1 + 2^{-D} + 2^{-2D} + \cdots + 2^{-mD} \right) \right] \\
& + 2WU \left( 2^{-D} + 2^{-2D} + \cdots + 2^{-mD} \right) \\
& + \cdots \\
& + 2WU \left( 2^{-(m-1)D} + 2^{-mD} \right) \\
& + 2WU 2^{-mD} \\
& = 2WU\left[ 2 + 3*2^{-D} + \cdots + (m+2)2^{-mD}\right] \\
& = 2WU\frac{2-2^{-D}-(m+3)2^{-(m+1)D}+(m+2)2^{-(m+2)D}}{(1-2^{-D})^2} \\
& < \frac{2}{(1 - 2^{-D})^2} WU.
\end{align*}

\subsection*{2. Computational Cost Upper Bound for \( D = 1, 2, 3 \)}
Substituting \( D = 1, 2, 3 \) into the formula gives the following upper bounds:

\begin{itemize}
    \item \textbf{For \( D = 1 \):}
    \[
    \text{Computational Cost}_{\text{FMG}} < 2WU\frac{2-2^{-1}}{(1-2^{-1})^2} = 12WU.
    \]
    \item \textbf{For \( D = 2 \):}
    \[
    \text{Computational Cost}_{\text{FMG}} < 2WU\frac{2-2^{-2}}{(1-2^{-2})^2} = \frac{56WU}{9}.
    \]
    \item \textbf{For \( D = 3 \):}
    \[
    \text{Computational Cost}_{\text{FMG}} < 2WU\frac{2-2^{-3}}{(1-2^{-3})^2} = \frac{240WU}{49}.
    \]
\end{itemize}

\subsection*{Conclusion}
\begin{itemize}
    \item For \( \nu_1 = \nu_2 = 1 \), the computational cost of the FMG cycle is less than \( \frac{2W_U}{(1 - 2^{-D})^2} \).
    \item For \( D = 1, 2, 3 \), the upper bounds on the computational cost are:
    \begin{itemize}
        \item \( D = 1 \): \( 12WU \).
        \item \( D = 2 \): \( \frac{56WU}{9} \).
        \item \( D = 3 \): \( \frac{240WU}{49} \).
    \end{itemize}
\end{itemize}

\subsection*{Exercise 9.41}

\subsection*{1. Rewriting (9.32) as (9.35)}
The two-grid correction operator \( TG \) can be expressed in matrix form as:
\[
TG \begin{bmatrix} w_k \\ w_{k'} \end{bmatrix} = 
\begin{bmatrix}
\lambda_k^{\nu_1 + \nu_2} & s_k \lambda_k^{\nu_1} \lambda_{k'}^{\nu_2} \\
s_k \lambda_{k'}^{\nu_1} \lambda_k^{\nu_2} & c_k \lambda_{k'}^{\nu_1 + \nu_2}
\end{bmatrix}
\begin{bmatrix} w_k \\ w_{k'} \end{bmatrix} =
\begin{bmatrix} c_1 & c_2 \\ c_3 & c_4 \end{bmatrix}
\begin{bmatrix} w_k \\ w_{k'} \end{bmatrix},
\]
where:
\begin{itemize}
    \item \( \lambda_k \) and \( \lambda_{k'} \) are the eigenvalues of the weighted Jacobi method for wavenumbers \( k \) and \( k' = n - k \), respectively.
    \item \( s_k \) and \( c_k \) are the smoothing and coarse-grid coefficients, respectively.
    \item \( c_1, c_2, c_3, c_4 \) are the damping coefficients.
\end{itemize}

\subsection*{2. Why the Magnitude of \( c_i \)'s is Small}
The magnitude of all four \( c_i \)'s is small because:
\begin{itemize}
    \item For high-frequency modes (\( k \) large),  \( \lambda_k \) and \( c_k \) are small.
    \item For low-frequency modes (\( k \) small), \( \lambda_k' \) and \( s_k \) are small.
    \item So \( c_i \)'s is small. Overall, the eigenvalues of \( TG \) are small, leading to \( \rho(TG) \approx 0.1 \).
\end{itemize}

\subsection*{3. Reproducing the Plots}
The damping coefficients for \( n = 64 \) and \( n = 128 \) are shown in Figures~\ref{fig:n64} and~\ref{fig:n128}, respectively. The plots demonstrate that \( \rho(TG) \approx 0.1 \) is independent of the grid size.

\begin{figure}[h!]
    \centering
    \includegraphics[width=0.9\textwidth]{9.41_64.png}
    \caption{Damping coefficients for \( n = 64 \).}
    \label{fig:n64}
\end{figure}

\begin{figure}[h!]
    \centering
    \includegraphics[width=0.9\textwidth]{9.41_128.png}
    \caption{Damping coefficients for \( n = 128 \).}
    \label{fig:n128}
\end{figure}

\section*{Conclusion}
\begin{itemize}
    \item The magnitude of all \( c_i \)'s is small, leading to \( \rho(TG) \approx 0.1 \).
    \item The damping coefficients are independent of the grid size, as shown by the plots for \( n = 64 \) and \( n = 128 \).
\end{itemize}

\subsection*{Exercise 9.45}

\subsection*{1. Range of \( I_h^{2h} \):}

Let the matrix corresponding to \( I_h^{2h}： \mathbb{R}^{n-1} \rightarrow \mathbb{R}^{\frac{n}{2}-1}\) be denoted as \( A \). The matrix \( A \) is a symmetric tridiagonal matrix, given by:

\[
A = \frac{1}{4}\begin{pmatrix}
1 & 2 & 1 \\
 & 1 & 2 & 1  \\
 &  & 1 & 2 & 1 \\
 &  &  &  & \ddots \\
 &  &  &  & 1 & 2 & 1\\
\end{pmatrix}
\]

This matrix is tridiagonal, with entries \( 1 \) on the main diagonal, \( 2 \) on the upper and lower diagonals, and zeros elsewhere. The factor \( \frac{1}{4} \) is a scaling factor applied to each entry.

\subsection*{Step 1: Investigating the structure of \( A \)}

We know that the matrix \( A \) is a tridiagonal matrix. Such matrices often arise in finite difference schemes for discretizing differential equations, and they typically have a simple structure. To analyze the range of \( A \), we need to understand the null space and the behavior of \( A \) as it acts on vectors.

The matrix \( A \) is symmetric and diagonally dominant, meaning that its eigenvalues are all positive. This suggests that \( A \) is invertible, and thus its range is the entire space.

\subsection*{Step 2: Determining the rank of \( A \)}

Since \( A \) is invertible (its eigenvalues are non-zero), it has full rank. Since \( A \) is \( (\frac{n}{2}-1) \times n-1 \), its rank is \( \frac{n}{2}-1 \). Therefore, the range of \( I_h^{2h} \), represented by \( A \), spans the entire \( \frac{n}{2}-1 \) dimensional space.

\subsection*{Step 3: Conclusion}

Thus, we have:

\[
\dim R(I_h^{2h}) = \frac{n}{2}-1, \quad \dim N(I_h^{2h}) = (n-1) - (\frac{n}{2}-1) = \frac{n}{2}
\]

This completes the proof.







% ===============================================
\section*{ \center{\normalsize {Acknowledgement}} }
None.


\printbibliography

\end{document}