\documentclass{article}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsfonts}
\usepackage{graphicx} 
\usepackage[left=1cm,right=1cm,top=1cm,bottom=2cm]{geometry}
\title{NumPDE Homework 3}
\author{Jiang Zhou 3220101339 }
\date{2025/4/2}

\begin{document}
\maketitle
\section*{Exercise 10.14}
The solutions $v(t)$ and $w(t)$ can be written in integral forms:
\begin{align*}
    v(t) = v_0 + \int_a^t v'(s)ds \\
    w(t) = w_0 + \int_a^t w'(s)ds 
\end{align*}
Subtract the two equations:
\begin{align*}
    v(t) - w(t) = v_0 - w_0 +\int_a^t [v'(s)-w'(s)]ds = v_0 - w_0 +\int_a^t [f(v(s), s)-f(w(s), s)]ds
\end{align*}
Take the norm of the both sides:
\begin{align*}
    \|v(t) - w(t)\| =  \|v_0 - w_0 +\int_a^t [f(v(s), s)-f(w(s), s)]ds\| \leq \|v_0-w_0\| + \int_a^t \|f(v(s), s)-f(w(s), s)\|ds
\end{align*}
Beacuse the initial condition of problems satisfies Lipschitz condition, we can get:
\begin{align*}
    \|v(t) - w(t)\| \leq \|v_0-w_0\| + \int_a^t L\|v(s) - w(s)\|ds
\end{align*}
By the Gronwall's inequality (integral form), we can get:
\begin{align*}
    \|v(t) - w(t)\| \leq \|v_0-w_0\|e^{L(t-a)}
\end{align*}

\section*{Exercise 10.64}
\subsection*{Example 10.50} 
For \(s=1, \alpha_1=1, \alpha_0 = -1, \beta_1 = \beta_0 = \frac{1}{2}\), the coefficients $C_i$ are:
\begin{align*}
    C_0 &= \sum_{j=0}^s \alpha_j = 1-1 = 0\\
    C_1 &= \sum_{j=0}^s (j\alpha_j -\beta_j) = 1 - \frac{1}{2} - \frac{1}{2} = 0\\
    C_2 &= \sum_{j=0}^s (\frac{1}{2}j^2\alpha_j - j\beta_j) = \frac{1}{2} - \frac{1}{2} = 0\\
    C_3 &= \sum_{j=0}^s (\frac{1}{6}j^3\alpha_j - \frac{1}{2}j^2\beta_j) = \frac{1}{6} - \frac{1}{4} = -\frac{1}{12}\\
    C_4 &= \sum_{j=0}^s (\frac{1}{24}j^4\alpha_j - \frac{1}{6}j^3\beta_j) = \frac{1}{24} - \frac{1}{12} = -\frac{1}{24}
\end{align*}
\subsection*{Example 10.52}
For \(s=2, \alpha_2=1, \alpha_1 = 0, \alpha_0 = -1, \beta_1 = 2, \beta_0 = \beta_2=0\), the coefficients $C_i$ are:
\begin{align*}
    C_0 &= \sum_{j=0}^s \alpha_j = 1-1 = 0\\
    C_1 &= \sum_{j=0}^s (j\alpha_j -\beta_j) = 2 - 2 = 0\\
    C_2 &= \sum_{j=0}^s (\frac{1}{2}j^2\alpha_j - j\beta_j) = 2 - 2 = 0\\
    C_3 &= \sum_{j=0}^s (\frac{1}{6}j^3\alpha_j - \frac{1}{2}j^2\beta_j) = \frac{4}{3} - 1 = -\frac{1}{3}\\
    C_4 &= \sum_{j=0}^s (\frac{1}{24}j^4\alpha_j - \frac{1}{6}j^3\beta_j) = \frac{2}{3} - \frac{1}{3} = \frac{1}{3}
\end{align*}

\section*{Exercise 10.66}
For \(\|\mathcal L\mathbf{u}(t_n)\| = O(k^3)\), the first three terms in (10.44) shoubld be zero, i.e.,
\begin{align*}
    C_0 = \sum_{j=0}^s \alpha_j = 0
    ,\quad C_1 = \sum_{j=0}^s (j\alpha_j -\beta_j) =  0
    ,\quad C_2 = \sum_{j=0}^s (\frac{1}{2}j^2\alpha_j - j\beta_j) = 0
\end{align*}
which is equivalent to the following system of equations:
\begin{align*}
    \rho(1) = 0, \quad \rho'(1) = \sigma(1), \quad \frac{1}{2}\rho''(1) = \sigma'(1)
\end{align*}

\section*{Exercise 10.67}
By (10.45), we can derive coefficients of LMMs.\\
By running the code in the Matlab file, we can get the following results in \ref{fig:Adams_Bashforth_coefficients}, \ref{fig:Adams_Moulton_coefficients} and \ref{fig:Backward_Differentiation_coefficients}.\\
\begin{figure}[h!]
    \centering
    \begin{minipage}{0.3\textwidth}
        \centering
        \includegraphics[width=\textwidth]{figure/Adams_Bashforth_coefficients.png}
        \caption{Adams Bashforth coefficients}
        \label{fig:Adams_Bashforth_coefficients}
    \end{minipage}
    \hfill
    \begin{minipage}{0.22\textwidth}
        \centering
        \includegraphics[width=\textwidth]{figure/Adams_Moulton_coefficients.png}
        \caption{Adams Moulton coefficients}
        \label{fig:Adams_Moulton_coefficients}
    \end{minipage}
    \hfill
    \begin{minipage}{0.3\textwidth}
        \centering
        \includegraphics[width=\textwidth]{figure/Backward_Differentiation_Coefficients.png}
        \caption{Backward Differentiation coefficients}
        \label{fig:Backward_Differentiation_coefficients}
    \end{minipage}
\end{figure}

\section*{Exercise 10.72}
By Exercise 10.67, we can get the coefficients of the third-order BDF:
\[\alpha_0 = -\frac{2}{11}, \alpha_1= \frac{9}{11}, \alpha_2 = -\frac{18}{11}, \alpha_3 = 1,\beta_3=\frac{6}{11}\]
Then we can apply the coefficients to Defintion 10.55:
\begin{align*}
    \rho(z) &= \sum_{j=0}^3 \alpha_j z^j = -\frac{2}{11} + \frac{9}{11}z - \frac{18}{11}z^2 + z^3\\
    \sigma(z) &= \sum_{j=0}^3 \beta_j z^j = \frac{6}{11}z^3
\end{align*}
Then we apply Theorem 10.70 to verify the order of the accuracy of the BDF method:
\begin{align*}
    \frac{\rho(z)}{\sigma(z)} &= \frac{-\frac{2}{11} + \frac{9}{11}z - \frac{18}{11}z^2 + z^3}{\frac{6}{11}z^3} = -\frac{1}{3z^3} + \frac{3}{2z^2} - \frac{3}{z} + \frac{11}{6} = \frac{-\frac{2}{11} + \frac{9}{11}z - \frac{18}{11}z^2 + z^3}{\frac{6}{11}z^3} \\
    &= -\frac{1}{3[(z-1)+1]^3} + \frac{3}{2[(z-1)+1]^2} - \frac{3}{[(z-1)+1]} + \frac{11}{6} 
\end{align*}
As \(z \to 1\), we can apply Taylor expansion to get:
\begin{align*}
    \frac{\rho(z)}{\sigma(z)} &= -\frac{1}{3} + \frac{3}{2} - 3 + \frac{11}{6} + (1-3+3)\cdot(z-1) + \frac{-4+9-6}{2}(z-1)^2+\frac{20 - 36 + 18}{3!}(z-1)^3+O((z-1)^4) \\
    &= (z-1) -\frac{1}{2}(z-1)^2 +\frac{1}{3}(z-1)^3 + \Theta((z-1)^4) = \log z + \frac{C_{p+1}}{\sigma(1)}(z-1)^{p+1} + O((z-1)^{p+2})
\end{align*}
As a result, the order of the accuracy is indeed 3.

\section*{Exercise 10.73}
Let's apply the condition \(u_t = q(t)\) to an s-step LMM:
\[
  \sum_{j=0}^s \alpha_j u(t_n + jh) = h \sum_{j=0}^s \beta_j q(t_n + jh).
\]

\subsection{Necessity$\Rightarrow$}
From Definition 10.60, we can get by LMM has order of accuracy $p$: 
\begin{align}
    \label{eq:1}
    C_0 = C_1 = C_2 = \cdots = C_p = 0, \quad C_{p+1} \neq 0
\end{align}
\subsubsection{if \(q(t)\) is a polynomial of degree \(< p\):}
Lemma 10.59 yiedls:
\begin{align*}
    \mathcal{L} u(t_n) = C_0u(t_n) + C_1hu_t(t_n) + C_2h^2u_{tt}(t_n) + \cdots + C_p h^p u^{(p)}(t_n) \cdots
\end{align*}
Due to the condition (\(q(t)\) is a polynomial of degree \(< p\)), we can get \(u^{(i)} = 0\) for \(i \geq p + 1\). Thus, we can get:
\begin{align*}
    \mathcal{L} u(t_n) = C_0u(t_n) + C_1hu_t(t_n) + C_2h^2u_{tt}(t_n) + \cdots + C_p h^p u^{(p)}(t_n)
\end{align*}
Then we apply \ref{eq:1} to get \(\mathcal{L} u(t_n) = 0\) which represents that the LMM gives exact results

\subsection{if \(q(t)\) is a polynomial of degree \(p\):}
Due to the condition (\(q(t)\) is a polynomial of degree \(< p\)), we can get \(u^{(i)} = 0\) for \(i \geq p + 2\). Thus, we can get:
\begin{align*}
    \mathcal{L} u(t_n) &= C_0u(t_n) + C_1hu_t(t_n) + C_2h^2u_{tt}(t_n) + \cdots + C_{p+1} h^{p+1} u^{(p+1)}(t_n)\\
    &= C_{p+1} h^{p+1} u^{(p+1)}(t_n) \neq 0.
\end{align*}
As a result, an LMM does not give exact results whenever $q$ is a polynomial of degree $p$.

\subsection{Sufficiency$\Leftarrow$}
\subsubsection{if \(q(t)\) is a polynomial of degree \(< p\):}
\begin{align*}
    \mathcal{L} u(t_n) = C_0u(t_n) + C_1hu_t(t_n) + C_2h^2u_{tt}(t_n) + \cdots + C_{p} h^{p} u^{(p)}(t_n).
\end{align*}
Beacuse the result is exact, we can get:\(\mathcal{L} u(t) = 0, \quad \forall t\).\\
So we can get: \(C_0 = C_1 = C_2 = \cdots = C_p = 0\), which implies that the LMM has order of accuracy \(p\).\\

\subsubsection{if \(q(t)\) is a polynomial of degree \(p\):}
\begin{align*}
    \mathcal{L} u(t_n) = C_0u(t_n) + C_1hu_t(t_n) + C_2h^2u_{tt}(t_n) + \cdots + C_{p} h^{p} u^{(p)}(t_n)
\end{align*}
If the result is not exact, we can get:\(\mathcal{L} u(t) \neq 0, \quad \forall t\).\\
So we can not get \(C_0 = C_1 = C_2 = \cdots = C_p = 0\), which implies that the LMM does not have order of accuracy \(p\).\\


\section*{Exercise 10.88}
As shown in Figure \ref{fig:Exercise_10_88}:
\begin{figure}[!h]
    \centering
    \includegraphics[width=0.8\textwidth]{figure/Exercise10_88.png}
    \caption{Exercise 10.88}
    \label{fig:Exercise_10_88}
\end{figure}

\section*{Exercise 10.93}
To prove Theorem 10.92, we proceed by induction on \( n \).
\subsection{Base Case (\( n = s \)):}
For \( n = s \), the solution (10.73) becomes:
\[
y_s = \sum_{i=0}^{s-1} \theta_{s-i} \tilde{y}_i + \sum_{i=s}^{s} \theta_{s-i} \psi_i = \sum_{i=0}^{s-1} \theta_{s-i} \tilde{y}_i + \theta_0 \psi_s.
\]
From the initial conditions (10.71), we have:
\[
\theta_0 = 1, \quad \theta_{-1} = \theta_{-2} = \cdots = \theta_{-s+1} = 0.
\]
Thus:
\[
y_s = \sum_{i=0}^{s-1} \theta_{s-i} \tilde{y}_i + \psi_s.
\]
This matches the given initial values \( y_0, y_1, \ldots, y_{s-1} \) when \( \tilde{y}_i \) are determined by (10.74). Therefore, the base case holds.

\subsection{Inductive Step:}
Assume the solution (10.73) holds for all \( k \) such that \( s \leq k \leq n \). We need to show it holds for \( n+1 \).

From the recurrence relation (10.70):
\[
y_{n+s+1} = -\sum_{i=0}^{s-1} \alpha_i y_{n+i+1} + \psi_{n+s+1}.
\]
Substitute the inductive hypothesis for \( y_{n+i+1} \):
\[
y_{n+s+1} = -\sum_{i=0}^{s-1} \alpha_i \left( \sum_{j=0}^{s-1} \theta_{n+i+1-j} \tilde{y}_j + \sum_{j=s}^{n+i+1} \theta_{n+i+1-j} \psi_j \right) + \psi_{n+s+1}.
\]
Simplify the double sums:
\[
y_{n+s+1} = -\sum_{j=0}^{s-1} \tilde{y}_j \left( \sum_{i=0}^{s-1} \alpha_i \theta_{n+i+1-j} \right) - \sum_{j=s}^{n+1} \psi_j \left( \sum_{i=0}^{s-1} \alpha_i \theta_{n+i+1-j} \right) + \psi_{n+s+1}.
\]
From the definition of \( \theta_k \), we know:
\[
\theta_{n+s+1} = -\sum_{i=0}^{s-1} \alpha_i \theta_{n+i+1}.
\]
Thus, the expression simplifies to:
\[
y_{n+s+1} = \sum_{j=0}^{s-1} \theta_{n+s+1-j} \tilde{y}_j + \sum_{j=s}^{n+1} \theta_{n+s+1-j} \psi_j + \psi_{n+s+1}.
\]
This matches the form of (10.73) for \( n+1 \), completing the inductive step.


\section*{Exercise 10.98}
Suppose the IMM is not consistent. Then by Defintion 10.62 the LMM's order of accuracy is 0 which yields:
\begin{align*}
    C_0 = \sum_{j=0}^s \alpha_j = 0\text{(preconsistent)}, C_1 = \sum_{j=0}^s (j\alpha_j -\beta_j) \neq 0
\end{align*}
For (10.78a), \(\sum_{j=0}^s \alpha_ju(t_n+jk) = k\sum_{j=0}^{s}\beta_jq(t_n+jk) = 0\).\\
From the initial condition\(u(0) = 1\), we can get: \(lim_{k \to 0}U^N = lim_{k \to 0}U^{N-1} = u(t_N)\), which represents that \(C_0 = 0\).As a result, the LMM is preconsistent.\\
For (10.78b), \(\sum_{j=0}^s \alpha_jU_{n+j} = k\sum_{j=0}^{s}\beta_jq(t_n+jk) = k\sum_{j=0}^{s}\beta_j\).\\
Consider the IVP \(u'(t) = 1 \)with u(0) = 0, which \(u(t) = t\) is the exact solution.
Due to the condition(convergent), we can get: \(lim_{k \to 0}U_{n+j} = u_{n+j} = t_n + jk\).\\
\begin{align*}
    \sum_{j=0}^s \alpha_jU_{n+j} &= k\sum_{j=0}^{s}\beta_j \\
    \rightarrow \sum_{j=0}^s \alpha_j(t_n + jk) &= k\sum_{j=0}^{s}\beta_j\\
    \rightarrow \sum_{j=0}^s \alpha_jt_n  - k\sum_{j=0}^{s}(j\alpha_j- \beta_j) &= 0
\end{align*}
So we can get \(C_0 = \sum_{j=0}^s \alpha_j = 0\) and \(C_1 = \sum_{j=0}^{s}(j\alpha_j- \beta_j) \neq 0\).\\
So the LMM is consistent.
\end{document}
