\documentclass{article}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsfonts}
\usepackage{graphicx} 
\usepackage[left=1cm,right=1cm,top=1cm,bottom=2cm]{geometry}
\title{NumPDE Homework 5}
\author{Jiang Zhou 3220101339}
\date{2025/4/25}

\begin{document}
\maketitle
\section{Exercise 11.36}
For the three-stage $\mathbf{ERK}$ method:
\begin{equation*}
    \begin{cases}
        \mathbf{y_1} = \mathbf{f}(\mathbf{U^n}, t_n),\\
        \mathbf{y_2} = \mathbf{f}(\mathbf{U^n} + ka_{2,1}\mathbf{y_1}, t_n + c_2k),\\
        \mathbf{y_3} = \mathbf{f}(\mathbf{U^n} + ka_{3,1}\mathbf{y_1} + ka_{3,2}\mathbf{y_2}, t_n + c_3k),\\
        \mathbf{U^{n+1}} = \mathbf{U^n} + k\left(b_1\mathbf{y_1} + b_2\mathbf{y_2} + b_3\mathbf{y_3}\right).
    \end{cases}
\end{equation*}
the one-step error $\mathcal{L}\mathbf{u}(t_n)$ is:
\begin{align*}
    &\mathbf{u}(t_n+k) - \mathbf{u}(t_n) - k\left[b_1\mathbf{f}(\mathbf{u}(t_n), t_n) + b_2\mathbf{f}(\mathbf{u}(t_n)+ ka_{2,1}\mathbf{y_1}, t_n + c_2k) + b_3\mathbf{f}(\mathbf{u}(t_n)+ ka_{3,1}\mathbf{y_1} + ka_{3,2}\mathbf{y_2}, t_n + c_3k)\right]\\
    &= (\mathbf{u} + k\mathbf{u'} + \frac{k^2}{2}\mathbf{u''} + \frac{k^3}{6}\mathbf{u'''} + \mathcal{O}(k^4)) - \mathbf{u} - kb_1\mathbf{u'} - kb_2[\mathbf{f} + kc_2(\mathbf{f_u f}+\mathbf{f_t}) + \frac{k^2c_2^2}{2}(\mathbf{f}^2 \mathbf{f_{uu}} + 2\mathbf{f}\mathbf{f_{ut}} +\mathbf{f_{tt}})+\mathcal{O}(k^3)] \\
    &-kb_3[\mathbf{f} + (ka_{3,1}\mathbf{y_1}+ka_{3,2}\mathbf{y_2})\mathbf{f_u} + c_3k\mathbf{f_t} + \frac{(ka_{3,1}\mathbf{y_1}+ka_{3,2}\mathbf{y_2})^2}{2}\mathbf{f_{uu}} + \frac{c_3^2 k^2}{2}\mathbf{f_{tt}} + (ka_{3,1}\mathbf{y_1}+ka_{3,2}\mathbf{y_2})(c_3 k) \mathbf{f_{tu}} + \mathcal{O}(k^3)]\\
    &= k(1-b_1-b_2-b_3)\mathbf{u'} + k^2(\frac{1}{2} - b_2c_2 - b_3c_3)\mathbf{u''} + k^3[(\frac{1}{3} - b_2c_2^2 - b_3c_3^2)\mathbf{u'''} + (\frac{1}{6} - b_3a_{3,2}c_2)\mathbf{f_{u}}\mathbf{u''}] + \mathcal{O}(k^4).
\end{align*}
To maximize the order of accuracy, we choose:
\begin{align*}
    b_1 + b_2 + b_3 &= 1,\\
    b_2c_2 + b_3c_3 &= \frac{1}{2},\\
    b_2c_2^2 + b_3c_3^2 &= \frac{1}{3},\\
    b_3a_{3,2}c_2 &= \frac{1}{6}.
\end{align*}
which lead to a family of three-stage, three-order $\mathbf{ERK}$ methods:
\[
\begin{array}{c|ccc}
0 & 0 & 0 & 0 \\
\frac{2}{3} & \frac{2}{3} & 0 & 0 \\
\frac{2}{3} & \frac{2}{3}-\frac{1}{4\alpha} & \frac{1}{4\alpha} & 0 \\
\hline
  & \frac{1}{4} & \frac{3}{4} - \alpha & \alpha
\end{array}
\]
For the Butcher tableau of Heun's third-order formula:
\[
\begin{array}{c|ccc}
0 & 0 & 0 & 0 \\
\frac{1}{3} & \frac{1}{3} & 0 & 0 \\
\frac{2}{3} & 0 & \frac{2}{3} & 0 \\
\hline
  & \frac{1}{4} & 0 & \frac{3}{4}
\end{array}
\]
As a result, Heun's third-order formula doesn't belong to the family of three-stage, three-order $\mathbf{ERK}$ methods.

\section{Exercise 11.42}
\subsection{Prove $\Leftarrow$: \( B(r) \) Implies Exactness for \( \mathbb{P}_{r-1} \)}
Assume the RK method is \( B(r) \). We show that \( I_s(f) = I(f) \) for all \( f \in \mathbb{P}_{r-1} \).
Any polynomial \( f \) of degree \( < r \) can be written as:
\[
    f(t) = \sum_{m=0}^{r-1} a_m (t - t_n)^m.
\]
The integral \( I(f) \) is:
\[
    I(f) = \int_{t_n}^{t_n+k} f(t) dt = \sum_{m=0}^{r-1} a_m \int_{t_n}^{t_n+k} (t - t_n)^m dt = \sum_{m=0}^{r-1} a_m \frac{k^{m+1}}{m+1}.
\]
The quadrature formula \( I_s(f) \) is:
\[
    I_s(f) = k \sum_{j=1}^s b_j f(t_n + c_j k) = k \sum_{j=1}^s b_j \sum_{m=0}^{r-1} a_m (c_j k)^m = \sum_{m=0}^{r-1} a_m k^{m+1} \sum_{j=1}^s b_j c_j^m.
\]
By the \( B(r) \) condition, \( \sum_{j=1}^s b_j c_j^m = \frac{1}{m+1} \) for \( m = 0, 1, \ldots, r-1 \). Thus:
\[
    I_s(f) = \sum_{m=0}^{r-1} a_m \frac{k^{m+1}}{m+1} = I(f).
\]
Therefore, \( I_s(f) \) is exact for all \( f \in \mathbb{P}_{r-1} \).

\subsection{Prove $\Rightarrow$ : \( \mathbb{P}_{r-1} \) Implies \( B(r) \)}
Assume \( I_s(f) = I(f) \) for all \( f \in \mathbb{P}_{r-1} \). We show that the RK method is \( B(r) \).\\
For each \( l = 1, 2, \ldots, r \), consider the monomial \( f(t) = (t - t_n)^{l-1} \). Then:
\[
    I(f) = \int_{t_n}^{t_n+k} (t - t_n)^{l-1} dt = \frac{k^l}{l}.
\]
\[
    I_s(f) = k \sum_{j=1}^s b_j (c_j k)^{l-1} = k^l \sum_{j=1}^s b_j c_j^{l-1}.
\]
Since \( I_s(f) = I(f) \), we have:
\[
    k^l \sum_{j=1}^s b_j c_j^{l-1} = \frac{k^l}{l} \implies \sum_{j=1}^s b_j c_j^{l-1} = \frac{1}{l}.
\]
This holds for \( l = 1, 2, \ldots, r \), so the RK method is \( B(r) \).

\section{Exercise 11.59}
\begin{align*}
    &\mathbf{p}'(t_n+\tau k) = \sum_{i=1}^{s} \mathbf{f}(\mathbf{p}(t_n^{(j)}), t_n^{(j)})l_j(\tau)\\
    &\mathbf{u}'(t_n+\tau k) = \sum_{i=1}^{s} \mathbf{f}(\mathbf{u}(t_n^{(j)}), t_n^{(j)})l_j(\tau) + k^sE(\tau, k)
\end{align*}
where \(\|E(\tau, k)\| \leq 2 \max_{t\in [t_n,t_n + k]} \frac{\|y^{s+1}(t)\|}{s!}\) is local truncation error.\\
Then we consider the error is:
\begin{align*}
    \mathbf{u}(t_{n}+\tau k) - \mathbf{p}(t_{n}+\tau k) =  
    k\sum_{i=1}^{s}[\mathbf{f}(\mathbf{u}(t_n^{(j)}), t_n^{(j)})-\mathbf{f}(\mathbf{p}(t_n^{(j)}), t_n^{(j)})] \int_{0}^{\tau}l_i(\beta)d\beta + k^{s+1}\int_{0}^{\tau}E(\beta, k)d\beta
\end{align*}
By the Lipschitz condition of $\mathbf{f}$, we can get:
\begin{align*}
    &\|\mathbf{u}(t_{n}+\tau k) - \mathbf{p}(t_{n}+\tau k)\| \leq kCL\max_{t\in [t_n,t_n + k]}\|\mathbf{u}(t) - \mathbf{p}(t)\| + \mathcal{O}(k^{s+1})\\
    \Rightarrow & \max_{t\in [t_n,t_n + k]}\|\mathbf{u}(t) - \mathbf{p}(t)\|\leq kCL\max_{t\in [t_n,t_n + k]}\|\mathbf{u}(t) - \mathbf{p}(t)\| + \mathcal{O}(k^{s+1})
\end{align*}
yields:
\begin{align*}
    \mathcal{L}\mathbf{u}(t_n)  = \|\mathbf{u}(t_{n+1}) - \mathbf{p}(t_{n})\| \leq C\cdot k^{s+1}
\end{align*}
An s-stage collocation method constructs a polynomial of degree s that matches the exact solution's behavior at s points, ensuring that the local truncation error is $\mathcal{O}(k^{s+1})$.


\section{Exercise 11.60}
\subsection{Prove (11.23)}
\begin{align*}
    \sum_{j=1}^s a_{i,j} &= \sum_{j=1}^s\int_0^{c_i} l_j(\tau) d\tau = \sum_{j=1}^{s} \int_0^{c_i} \prod_{i \neq j;i=1}^s\frac{\tau - c_i}{c_j - c_i} d\tau\\
                         &= \int_0^{c_i} \sum_{j=1}^{s} \prod_{i \neq j;i=1}^s\frac{\tau - c_i}{c_j - c_i} d\tau = \int_0^{c_i} 1 d\tau = c_i.
\end{align*}

\subsection{Prove (11.24)}
\begin{align*}
    \sum_{j=1}^s b_i &=  \sum_{j=1}^{s} \int_0^{1} l_j(\tau) d\tau = \int_0^{1}\sum_{j=1}^{s}l_j(\tau) d\tau = \int_0^{1} 1 d\tau = 1.
\end{align*}

\section{Exercise 11.62}
Let \(c_1 = \frac{1}{4}, c_2 = \frac{1}{2}, c_3 = \frac{3}{4}\), then the corresponding  elementary Lagrange interpolation polynomials are:
\begin{align*}
    l_1(\tau) &= \frac{(\tau - c_2)(\tau - c_3)}{(c_1 - c_2)(c_1 - c_3)} = (2x-1)(4x-3) = 8x^2 - 10x + 3,\\
    l_2(\tau) &= \frac{(\tau - c_1)(\tau - c_3)}{(c_2 - c_1)(c_2 - c_3)} = -(4x-1)(4x-3) = -16x^2 + 16x -3,\\
    l_3(\tau) &= \frac{(\tau - c_1)(\tau - c_2)}{(c_3 - c_1)(c_3 - c_2)} = (2x-1)(4x-1) = 8x^2 - 6x + 1.
\end{align*}
and (11.48) yields the $\mathbf{IRK}$ method with Butcher tableau:
\[
    \begin{array}{c|ccc}
        \frac{1}{4} & \frac{23}{48} & -\frac{1}{3} & \frac{5}{48} \\
        \frac{1}{2} & \frac{7}{12} & -\frac{1}{6} & \frac{1}{12} \\
        \frac{3}{4} & \frac{9}{16} & 0 & \frac{3}{16} \\
        \hline
        & \frac{2}{3} & -\frac{1}{3} & \frac{2}{3}
    \end{array}
\]

\section{Exercise 11.65}
For distinct nodes \( c_1, c_2, \ldots, c_s \), the Vandermonde matrix \( V \) is invertible, where:
\[
   V = \begin{bmatrix}
   1 & c_1 & \cdots & c_1^{s-1} \\
   1 & c_2 & \cdots & c_2^{s-1} \\
   \vdots & \vdots & \ddots & \vdots \\
   1 & c_s & \cdots & c_s^{s-1}
   \end{bmatrix}.
   V^{T} = \begin{bmatrix}
    1 & 1 & \cdots & 1 \\
    c_1 & c_2 & \cdots & c_s \\
    \vdots & \vdots & \ddots & \vdots \\
    c_1^{s-1} & c_2^{s-1} & \cdots & c_s^{s-1}
    \end{bmatrix}.\\
\]
\subsection{Compute \( V^T u \):}
\begin{align*}
    (V^T u)_k = \sum_{j=1}^s c_j^{k-1} u_j &= \sum_{j=1}^s c_j^{k-1} \sum_{i=1}^s b_i c_i^{m-1} a_{i,j}\\
                                           &= \sum_{i=1}^s b_i c_i^{m-1} \sum_{j=1}^s a_{i,j} c_j^{k-1}.
\end{align*}
By the \( C(s) \) condition (\(\sum_{j=1}^s a_{i,j} c_j^{k-1} = \frac{c_i^k}{k} \quad \text{for} \quad k = 1, 2, \ldots, s. \)), this simplifies to:
\[
   (V^T u)_k = \sum_{i=1}^s b_i c_i^{m-1}\frac{c_i^k}{k} .
\]
By the \( B(s + r) \) condition:\( \sum_{i=1}^s b_i c_i^{m+k-1} = \frac{1}{m+k} \) (for \( m+k \leq s + r \)), this implies:
\[
    (V^T u)_k = \frac{1}{k(m+k)}.
\]
\subsection{Compute \(V^T v \):}
\begin{align*}
    (V^T v)_k &= \sum_{j=1}^s c_j^{k-1} v_j = \frac{1}{m} \sum_{j=1}^s b_j (1 - c_j^m) c_j^{k-1}\\
        &= \frac{1}{m} \left( \sum_{j=1}^s b_j c_j^{k-1} - \sum_{j=1}^s b_j c_j^m c_j^{k-1} \right).
\end{align*}
By the \( B(s + r) \) condition:
\begin{enumerate}
    \item \( \sum_{j=1}^s b_j c_j^{k-1} = \frac{1}{k} \) (for \( k \leq s + r \)).
    \item \( \sum_{j=1}^s b_j c_j^m c_j^{k-1} = \frac{1}{m+k} \) (for \( m+k \leq s + r \)).
\end{enumerate}
Thus:
\[
(V v_j)_k = \frac{1}{m} \left( \frac{1}{k} - \frac{1}{m + k} \right) = \frac{1}{k(m + k)}.
\]
As a result, \((V^T u)_k = (V^T v)_k\) for \( k = 1, 2, \ldots, s \). This implies that \( V^T u = V^T v \). Due to \(V^T \) is invertible, we have \( u = v \).\\
The equality \( u_j = v_j \) for all \( j \) and \( m = 1, 2, \ldots, r \) directly implies the \( D(r) \) condition:
\[
\sum_{i=1}^s b_i c_i^{m-1} a_{i,j} = \frac{b_j}{m} (1 - c_j^m).
\]
Thus, under the assumptions \( B(s + r) \), \( C(s) \), and distinct nodes, the \( D(r) \) condition holds.

\section{Exercise 11.69}
The $\mathbf{IRK}$ method with Butcher tableau:
\[
    \begin{array}{c|ccc}
        \frac{1}{4} & \frac{23}{48} & -\frac{1}{3} & \frac{5}{48} \\
        \frac{1}{2} & \frac{7}{12} & -\frac{1}{6} & \frac{1}{12} \\
        \frac{3}{4} & \frac{9}{16} & 0 & \frac{3}{16} \\
        \hline
        & \frac{2}{3} & -\frac{1}{3} & \frac{2}{3}
    \end{array}
\]
\subsection{Compute \(B(p)\):}
\begin{align*}
    \sum_{j=1}^3 b_j c_j^{1-1} &= \sum_{j=1}^3 b_j = 1,\\
    \sum_{j=1}^3 b_j c_j^{2-1} &= \sum_{j=1}^3 b_j c_j = \frac{1}{6} - \frac{1}{6} + \frac{1}{2} = \frac{1}{2},\\
    \sum_{j=1}^3 b_j c_j^{3-1} &= \sum_{j=1}^3 b_j c_j^2 = \frac{1}{24} - \frac{1}{12} + \frac{3}{8} = \frac{1}{3},\\
    \sum_{j=1}^3 b_j c_j^{4-1} &= \sum_{j=1}^3 b_j c_j^3 = \frac{1}{96} - \frac{1}{24} + \frac{9}{32} = \frac{1}{4},\\
    \sum_{j=1}^3 b_j c_j^{5-1} &= \sum_{j=1}^3 b_j c_j^4 = \frac{37}{192} \neq \frac{1}{5}.
\end{align*}
As a result, \(p = 4\).
\subsection{Compute \(C(\eta)\):}
\begin{align*}
    \sum_{j=1}^{3} a_{i,j} c_j^{1-1} &= \sum_{j=1}^{3} a_{i,j} = c_i,\\
    \sum_{j=1}^{3} a_{1,j} c_j^{2-1} &= \sum_{j=1}^{3} a_{1,j} c_j = \frac{1}{32} = \frac{c_1^2}{2},\\
    \sum_{j=1}^{3} a_{2,j} c_j^{2-1} &= \sum_{j=1}^{3} a_{2,j} c_j = 1/8 = \frac{c_2^2}{2},\\
    \sum_{j=1}^{3} a_{3,j} c_j^{2-1} &= \sum_{j=1}^{3} a_{3,j} c_j = \frac{9}{32} = \frac{c_3^2}{2}.
\end{align*}
\begin{align*}
    \sum_{j=1}^{3} a_{1,j} c_j^{3-1} &= \sum_{j=1}^{3} a_{1,j} c_j^2 = \frac{1}{192} = \frac{c_1^3}{3},\\
    \sum_{j=1}^{3} a_{2,j} c_j^{3-1} &= \sum_{j=1}^{3} a_{2,j} c_j^2 = \frac{1}{24} = \frac{c_2^3}{3},\\
    \sum_{j=1}^{3} a_{3,j} c_j^{3-1} &= \sum_{j=1}^{3} a_{3,j} c_j^2 = \frac{9}{64} = \frac{c_3^3}{3}.
\end{align*}
Thus \(C(3)\) is satisfied.

\subsection{Compute \(D(\zeta)\):}
\begin{align*}
    \sum_{j=1}^{3} b_j c_j^{1-1} a_{j,1} &= \sum_{j=1}^{3} b_j a_{j,1} = \frac{1}{2} = b_1(1-c_1) = 1/2\\
    \sum_{j=1}^{3} b_j c_j^{1-1} a_{j,2} &= \sum_{j=1}^{3} b_j a_{j,2} = -\frac{1}{6} = b_2(1-c_2) = -1/6\\
    \sum_{j=1}^{3} b_j c_j^{1-1} a_{j,3} &= \sum_{j=1}^{3} b_j a_{j,3} = \frac{1}{6} = b_3(1-c_3) = 1/6
\end{align*}
Thus \(\zeta = 1\).\\
Applying Lemma 11.66 to \(p=4, \eta = 3, \zeta = 1\), we can get that the method has order of accuracy at least \boxed{4}.

\end{document}
