\documentclass{ctexart}
\usepackage{listings}%插入代码
\usepackage{geometry}%设置页面大小边距等
\usepackage{graphicx}%插入图片
\usepackage{amssymb}%为了用\mathbb
\usepackage{amsmath}%数学方程的显示
\usepackage{listings}%插入代码
\usepackage{fancyhdr}%设置页眉页脚
\usepackage{lastpage}%总页数
\usepackage{hyperref}%引用网页
\usepackage{xcolor}
\usepackage{tikz}
\usepackage{float}
\usepackage{subcaption}
\usepackage{mathrsfs}
\usepackage{subcaption}

\lstset{
    basicstyle=\ttfamily, basewidth=0.5em
}


\geometry{a4paper,left=2cm,right=2cm,top=2cm,bottom=2cm}%一定要放在前面！
\pagestyle{fancy}%设置页眉页脚
\lhead{陈冠宇\ 3200102033}%页眉左
\chead{Numerical Methods for Differential Equations}%页眉中
\rhead{HW2}%章节信息
\cfoot{\thepage/\pageref{LastPage}}%当前页，记得调用前文提到的宏包
\lfoot{Zhejiang University}
\rfoot{School of Mathematical Sciences}
\renewcommand{\headrulewidth}{0.1mm}%页眉线宽，设为0可以去页眉线
\renewcommand{\footrulewidth}{0.1mm}%页脚线宽，设为0可以去页眉线
\setlength{\headwidth}{\textwidth}

\hypersetup{%设置网页链接颜色等
    colorlinks=true,%链接将会有颜色，默认是红色
    linkcolor=blue,%内部链接，那些由交叉引用生成的链接将会变为蓝色（blue）
    filecolor=magenta,%链接到本地文件的链接将会变为洋红色（magenta）
    urlcolor=blue,%链接到网站的链接将会变为蓝绿色（cyan）
    }

\newtheorem{theorem}{Theorem}
\newtheorem{proof}{Proof}
\newtheorem{solution}{Solution:}

\title{\textbf{微分方程数值解第一次作业}}
\date{\today}

\begin{document}
\subsection*{\uppercase\expandafter{\romannumeral 1} Exercise 10.9}
\begin{proof}
    1)real positivity:
    Since $||T||:=inf\{M\geq 0:\forall x\in \mathbb{F}^n, ||Tx||\leq M||x||\}$, then we have $\forall T, ||T||\geq 0$.

    2)point separation:
    If $||T||=0$, then $\forall x\in \mathbb{F}^n, ||Tx|| \leq 0$, that is $||Tx|| \equiv 0$, i.e. $T = 0$.

    3)absolute homogeneity:
    $\forall x\in \mathbb{F}^n, T, ||aT||:=inf\{M'\geq 0:\forall x\in \mathbb{F}^n, ||aTx||\leq M||ax||\leq |a|M||x||=m'||x||\}=|a|||T||$.

    4)triangle inequality:
    $$\forall T_1, T_2,||T_1+T_2|| = inf\{M\geq 0:\forall x\in \mathbb{F}^n, ||(T_1+T_2)x||\leq M||x||\}$$
    and
    $$\begin{aligned}
        ||T_1|| + ||T_2||&:= inf\{M\geq 0:\forall x\in \mathbb{F}^n, ||T_1x||\leq M||x||\}+inf\{M\geq 0:\forall x\in \mathbb{F}^n, ||T_2x||\leq M||x||\}\\
        &= inf\{M\geq 0:\forall x\in \mathbb{F}^n, ||T_1x||+ ||T_2x||\leq M||x||\}
    \end{aligned}$$

    Since $||(T_1+T_2)x||\leq ||T_1x|| + ||T_2x||$, then $||T_1+ T_2|| \leq ||T_1|| + ||T_2||$, then $||.||$ is a norm.
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 2} Exercise 10.13}
\begin{proof}
    We have defined the metric as $d(T,S) = ||T-S||$ in the space $\mathbb{L} (\mathbb{F}^n, \mathbb{F}^m)$. Then we prove $(\mathbb{L}, d)$ is a metric space.

    1) By the last Exercise, we have the real positivity.

    2) If $d(T, S) = 0$, i.e. $||T-S|| = 0$, by last Exercise, we have $T - s=0$, that is $T = S$.

    3) Clearly, $d(T,S) = d(S,T)$

    4) $d(T, S) = ||T - S|| = ||T - H + H - S||\leq ||T - H|| + ||H - S|| = d(T, H) + d(H, S)$.

    Then $(\mathbb{L}, d)$ is a metric space.
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 3} Exercise 10.16}
\begin{proof}
    1)real positivity:
    Since $|T|:= \left(\sum_{j = 1}^{n}||Te_j||^2\right)^\frac{1}{2}$, then we have $\forall T, |T|\geq 0$.

    2)point separation:
    If $||T||=0$, then $\left(\sum_{j = 1}^{n}||Te_j||^2\right)^\frac{1}{2}$, that is $\forall j, ||Te_j|| = 0$, i.e. $T = 0$.

    3)absolute homogeneity:
    $$\forall x\in \mathbb{F}^n, T, |aT|:=\left(\sum_{j = 1}^{n}||aTe_j||^2\right)^\frac{1}{2} = \left(a^2\sum_{j = 1}^{n}||Te_j||^2\right)^\frac{1}{2} = |a|\left(\sum_{j = 1}^{n}||Te_j||^2\right)^\frac{1}{2} = |a|||T$$

    4)triangle inequality:
    $$\forall T_1, T_2,\begin{aligned}
        |T_1+ T_2| &= \left(\sum_{j = 1}^{n}||(T_1 + T_2)e_j||^2\right)^\frac{1}{2}\\
        &\leq \left(\sum_{j = 1}^{n}||T_1e_j||^2 ||T_2e_j||^2\right)^\frac{1}{2}\\
        &=|T_1| + |T_2|
    \end{aligned}$$

    Then $|.|$ is a norm.
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 4} Exercise 10.20}
\begin{proof}
    By Corollary 10.18, we have
    $$\begin{aligned}
        |TS| &= \left(\sum_{j = 1}^{n}||TSe_j||^2\right)^\frac{1}{2}\\
        &\leq \left(\sum_{j = 1}^{n}|T|\cdot ||Se_j||^2\right)^\frac{1}{2}\\
        &\leq |T||S|
    \end{aligned}$$

    That is $|TS| \leq |T||S|$.
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 5} Exercise 10.36}
\begin{proof}
    Since $\forall X, \exists $ diagonal matrix $D$ and invertible matrix $P$, s.t. $X = PDP^{-1}$.

    Then
    $$
    \begin{aligned}
        det e^X &= det(\sum_{N = 0}^{\infty} \frac{1}{N!}X^N)\\
        & = \sum_{N = 0}^{\infty} \frac{1}{N!}det(X)^N\\
        &= \sum_{N = 0}^{\infty} e^{\lambda _N}\\
        & = e^{Trace(D)} = e^{Trace(DP^{-1}P)} = e^{Trace(PDP^{-1})}\\
        & = e^{Trace(X)}
    \end{aligned}$$
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 6} Exercise 10.50}
\begin{proof}
    The two IVPs are respectively solved by
    $$\begin{aligned}
        v(t) &= v_0 + \int_a^t f(v(s), s)ds\\
        w(t) &= w_0 + \int_a^t f(w(s), s)ds\\
    \end{aligned}$$

    Thus we have
    $$\begin{aligned}
    ||v(t) - w(t)|| &= ||v_0-w_0 + \int_a^t (f(v(s), s) - f(w(s,s)))ds||\\
    &\leq ||v_0 - w_0|| + \int_a^t ||f(v(s), s) - f(w(s,s))||ds\\
    &\leq ||v_0 - w_0|| + L\int_a^t ||v(t) - w(t)||ds
    \end{aligned}$$

    That is
    $$\begin{aligned}
        ||v(t) - w(t)||- L\int_a^t ||v(t) - w(t)||ds &\leq ||v_0 - w_0||\\
        \left(e^{-tL} \int_a^t ||v(t) - w(t)||ds\right)' &\leq e^{-tL}||v_0 - w_0||
    \end{aligned}$$

    Then $$
    \begin{aligned}
        e^{-tL} \int_a^t ||v(t) - w(t)||ds &\leq  ||v_0 - w_0|| \frac{e^{-sL}}{L}|_a^t = ||v_0 - w_0||\frac{e^{-aL}-e^{-tL}}{L}\\
        \int_a^t ||v(t) - w(t)||ds &\leq ||v_0 - w_0||\frac{e^{(t-a)L} - 1}{L}\\
    \end{aligned}$$

    which the derivative is $$||v(t) - w(t)|| \leq ||v_0 - w_0|| e^{(t-a)L}$$

\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 7} Exercise 10.100}
\begin{proof}
    For the trapezoidal rule, we have a 1-step Adams-Moulton formula with $s=1, \alpha_1 = 1, \alpha_0 = -1, \beta_1 = \beta_0 = \frac{1}{2}.$
Hence, by lemma 10.95,
$$\begin{aligned}
    C_0 &= \alpha_0 + \alpha_1 = 0;\\
    C_1 &= \sum_{j = 0}^{s}(j\alpha_j-\beta_j) = -\beta_1+(\alpha_1 - \beta_1) = 0;\\
    C_2 &= \sum_{j=0}^{s}(\frac{1}{2}j^2\alpha_j-j\beta_j) = 0;\\
    C_3 &= \sum_{j=0}^{s}(\frac{1}{3!}j^3\alpha_j-\frac{1}{2!}j^2\beta_j) = -\frac{1}{12};\\
    C_4 &= \sum_{j = 0}^{s}(\frac{1}{4!}j^4\alpha_j - \frac{1}{3!}j^3\beta_j) = - \frac{1}{24};
\end{aligned}$$

    For the midpoint method, we have a 2-step Nystrom formula with $s = 2, \alpha_2 = 1, \alpha_1 = 0, \alpha_0 = -1, \beta_1 = 2,\beta_0 = 0.$
Similarly, we have
$$\begin{aligned}
    C_0 &= \sum_{j = 0}^{s} \alpha_j= 0;\\
    C_1 &= -\beta_0 + \alpha_1 - \beta_1 +2\alpha_2 = 0;\\
    C_2 &= \frac{1}{2}\alpha_1 - \beta_1 +2\alpha_2 - 2\beta_2= 0;\\
    C_3 &= \frac{1}{6}\alpha_1 - \frac{1}{2}\beta_1+\frac{4}{3}\alpha_2-2\beta_2 = \frac{1}{3};\\
    C_4 &= \frac{1}{24}\alpha_1-\frac{1}{6}\beta_1+\frac{2}{3}\alpha_2 = \frac{1}{3};
\end{aligned}$$
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 8} Exercise 10.102}
\begin{proof}
    If $||\mathscr{L}u(t_n)|| = O(k^3)$, then we have $C_0 = C_1 = C_2 = 0$, i.e.
    $$\sum_{j = 0}^{s} \alpha_j = 0,\qquad \sum_{j = 0}^{s}(j\alpha_j-\beta_j) = 0, \qquad \sum_{j=0}^{s}(\frac{1}{2}j^2\alpha_j-j\beta_j) = 0$$

    That is $\rho(1) = 1,\qquad \rho'(1) = \sigma(1),\qquad \rho'(1)+\rho''(1) = \frac{1}{2}\sigma'(1).$
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 9} Exercise 10.103}
\begin{proof}
    Use C++. we now have written "fraction.h" to compute fractions, and do Gaussian Elimination in compute.cpp.

    Input g++ compute.cpp and then ./a.out, we have
\begin{lstlisting}
    --------------------Adams-Bashforth formulas--------------------
    s = 1   p = 1   1
    s = 2   p = 2   3/2     1/-2
    s = 3   p = 3   23/12   4/-3    5/12
    s = 4   p = 4   55/24   59/-24  37/24   3/-8
    --------------------Adams-Moulton formulas--------------------
    s = 1   p = 2   1/2     1/2
    s = 2   p = 3   5/12    2/3     1/-12
    s = 3   p = 4   3/8     19/24   5/-24   1/24
    s = 4   p = 5   251/720 323/360 11/-30  53/360  -19/720
    --------------------Backward differentiation formulas--------------------
    s = 1   p = 1   1       1       -1
    s = 2   p = 2   2/3     1       4/-3    1
    s = 3   p = 3   6/11    1       -18/11  9/11    -8/11
    s = 4   p = 4   12/25   1       48/-25  36/25   16/-25  129/275
\end{lstlisting}

\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 10} Exercise 10.108}
\begin{proof}
    By Ex10.103, when $s = 3, p = 3$ we have the characteristic polynomial
    $$\left\{\begin{aligned}
        \rho (\zeta) &= \zeta^3-\frac{18}{11}\zeta^2+\frac{9}{11}\zeta-\frac{2}{11} \\
        \sigma(\zeta) &= \frac{6}{11}\zeta^3
    \end{aligned} \right.$$
    Then we compute the $\frac{\rho(\zeta)}{\sigma(\zeta)}-log(\zeta)$ as $\zeta \rightarrow 1$

    $$\begin{aligned}
        \lim_{\zeta \rightarrow 1}\frac{\frac{\rho(\zeta)}{\sigma(\zeta)}-log(\zeta)}{(\zeta - 1)^4} &= \frac{\zeta^{-4}-\zeta^{-3}+3\zeta^{-2}-\zeta^{-1}}{4(\zeta - 1)^3}\\
        &=\frac{-4\zeta^{-5}+9\zeta^{-4}-6\zeta^{-3}+\zeta}{12(\zeta - 1)^2}=\cdots\\
        & = \frac{-120\zeta^{-7}+180\zeta^{-6}-72\zeta^{-5}+6\zeta^{-4}}{24} = -\frac{1}{4}
    \end{aligned}$$

    That is the order of accuracy is 3.

\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 11} Exercise 10.109}(参考黄文翀)
\begin{proof}
    $"\Rightarrow"$ Suppose that an  s -step LMM has order of accuracy  p . By lemma 11.95 we have:

    $$\mathcal{L} \mathbf{u}\left(t_{n}\right)=\sum_{n=0}^{\infty} C_{n} k^{n} \frac{\mathrm{d}^{n} \mathbf{u}}{\mathrm{d} t^{n}}\left(t_{n}\right)$$
    
    Since it has order of accuracy  p, by definition 11.96 we have  $C_{0}=C_{1}=\cdots=C_{p}=0 $. If  $\mathbf{u}_{t}$  is a polynomial of degree  <p , then  $\frac{\mathrm{d}^{n} \mathbf{u}}{\mathrm{d} t^{n}} \equiv 0$  for any  $n \geq q+1 $. 
    Hence  $\mathcal{L} \mathbf{u} \equiv 0 $. However for  $\mathbf{u}_{t}$  of degree  p, 
    $$\mathcal{L} \mathbf{u}=C_{q+1} k^{q+1} \frac{\mathrm{d}^{q+1} \mathbf{u}}{\mathrm{d} t^{q+1}} \not \equiv 0 $$
    $"\Leftarrow"$ Apply induction to prove  $C_{0}=\cdots=C_{p}=0 $, and finally prove $ C_{p+1} \neq 0 $. Take $ \mathbf{u}(t)=c \in \mathbb{C} $, then  $\mathbf{u}_{t} $ is polynomial of degree  <p , hence
    
    $$\mathcal{L} \mathbf{u} \equiv 0 \Longrightarrow C_{0} c=0(\forall c \in \mathbb{C}) \Longrightarrow C_{0}=0$$
    
    Now suppose we have proved  $C_{0}=\cdots=C_{m-1}=0$  for some  $m \leq p$ . Take $ \mathbf{u}(t)=\frac{c t^{m}}{m !}, c \in \mathbb{C}$ , then  $\mathbf{u}_{t} $ is polynomial of degree  <p , hence
    
    $$\mathcal{L} \mathbf{u}=C_{m} k^{m} \frac{\mathrm{d}^{m} \mathbf{u}}{\mathrm{d} t^{m}} \equiv 0 \Longrightarrow C_{m} k^{m} c=0(\forall c \in \mathbb{C}) \Longrightarrow C_{m}=0$$
    
    Hence we proved  $C_{0}=C_{1}=\cdots=C_{p}=0 $ by induction. Now take $ \mathbf{u}(t)=\frac{t^{p+1}}{(p+1) !}$ , then  $\mathbf{u}_{t} $ is polynomial of degree  p , hence
    
    $$\mathcal{L} \mathbf{u}=C_{p+1} k^{p+1} \frac{\mathrm{d}^{p+1} \mathbf{u}}{\mathrm{d} t^{p+1}} \not \equiv 0 \Longrightarrow C_{p+1} k^{p+1} \neq 0 \Longrightarrow C_{p+1} \neq 0$$
    
    Hence by the definition, the LMM has order of accuracy  p .
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 12} Exercise 10.113}
\begin{proof}
    $$\begin{aligned}
        &\det\begin{pmatrix}
           z & -1 &  & \\
            & z &  & \\
           \vdots & \vdots & \ddots & -1 \\
           a_0 & a_1 & \cdots & z+a_{s-1}
          \end{pmatrix} \\
          &= \det\begin{pmatrix}
           z & 0 &  & \\
            & z &  & \\
           \vdots & \vdots & \ddots & -1 \\
           a_0 & a_1+a_0z^{-1} & \cdots & z+a_{s-1}
          \end{pmatrix}\\
        &=det\begin{pmatrix}
           z & 0 & 0 & 0 &\cdots& 0\\
            0 & z & 0 &0 &\cdots & 0\\
            0 & 0 & z & -1 &\cdots & 0\\
           \vdots & \vdots &\vdots &\vdots & \ddots & \vdots \\
           a_0 & a_1+a_0z^{-1}& a_2 + a_1z^{-1}+a_0z^{-2} & a_3&\cdots & z+a_{s-1}
          \end{pmatrix}\\
        &=\cdots\\
        &=det\begin{pmatrix}
            z & 0 & 0 & 0 &\cdots& 0\\
            0 & z & 0 &0 &\cdots & 0\\
            0 & 0 & z & 0 &\cdots & 0\\
           \vdots & \vdots &\vdots &\vdots & \ddots & \vdots \\
           a_0 & a_1+a_0z^{-1}& a_2 + a_1z^{-1}+a_0z^{-2} & a_3+a_2z^{-1} + a_1z^{-2}+a_0z^{-3}&\cdots & z+\sum_{i = 0}^{s-1}a_iz^{i-s+1}
        \end{pmatrix}\\
        &=z^s+\sum_{i=0}^{s-1}a_iz^i
    \end{aligned}$$
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 13} Exercise 10.119}
\begin{proof}
    When $n=0\cdots s-1$, we need to prove that $y_n = \sum_{i = 0}^{s-1}\theta_{n-i}\tilde{y_i}$

    Since we have(10.75).

    That is
    $$\left[\begin{array}{c}
        y_{s-1} \\
        y_{s-2} \\
        y_{s-3} \\
        \vdots \\
        y_{1} \\
        y_{0}
        \end{array}\right]=\left[\begin{array}{cccccc}
            1 & \theta_{1} & \theta_{2} & \cdots & \theta_{s-2} & \theta_{s-1} \\
            0 & 1 & \theta_{1} & \cdots & \theta_{s-3} & \theta_{s-2} \\
            0 & 0 & 1 & \cdots & \theta_{s-4} & \theta_{s-3} \\
            \vdots & \vdots & \vdots & & \vdots & \vdots \\
            0 & 0 & 0 & \cdots & 1 & \theta_{1} \\
            0 & 0 & 0 & \cdots & 0 & 1
            \end{array}\right] \left[\begin{array}{c}
                \tilde{y}_{s-1} \\
                \tilde{y}_{s-2} \\
                \tilde{y}_{s-3} \\
                \vdots \\
                \tilde{y}_{1} \\
                \tilde{y}_{0}
                \end{array}\right].$$
    i.e.$y_n = \sum_{i = 0}^{s-1}\theta_{n-i}\tilde{y_i}$ holds.

Then when $n>s-1$. Assume $y_n = \sum_{i = 0}^{s-1}\theta_{n-i}\tilde{y_i}+\sum_{i=s}^{n}\theta_{n-i}\Phi_i$ holds for $n = m +s-1$, then we have to prove it holds for $n = m + s$.

Since we have $y_{n+s} = \psi_{n+s} - \sum_{i=0}^{s-1}\alpha_iy_{n+i}$, then we have
$$\begin{aligned}
    y_{m+s} &= \psi_{m+s} - \sum_{i=0}^{s-1}\alpha_iy_{m+i}\\
    &=\psi_{m+s} - \sum_{i=0}^{s-1}\alpha_i(\sum_{j = 0}^{s-1}\theta_{m+i-j}\tilde{y_j}+\sum_{j=s}^{m+i}\theta_{m+i-j}\Phi_j)\\
    &=\psi_{m+s} - \sum_{j=0}^{s-1}\tilde{y_j}\sum_{i=0}^{s-1}\alpha_i\theta_{m+i-j}-\sum_{j=s}^{m+s-1}\psi_j\sum_{i=0}^{s-1}\alpha_i\theta_{m+i-j}\\
    &=\psi_{m+s} + \sum_{j=0}^{s-1}\tilde{y_j}\theta_{m+s-i} + \sum_{j=s}^{m+s-1}\psi_j\theta_{m+s-j}\\
    &=\sum_{1}^{s-1}\tilde{y_j}\theta_{m+s-j}+\sum_{j=s}^{m+s}\psi_j\theta_{m+s-j}
\end{aligned}
$$
Holds.
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 14} Exercise 10.124}(buhui)
\begin{proof}
    
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 15} Exercise 10.141}
\begin{proof}
    To generate the Adams-Bahsforth formulas
\begin{lstlisting}
theta = linspace(0, 2*pi, 1000);
%p = 1
z = exp(1i*theta) - 1;
plot(real(z), imag(z));
%p = 2
hold on
z = 2 * (exp(2*1i*theta) - exp(1i*theta))./(3*exp(1i*theta) - 1);
plot(real(z), imag(z));
%p = 3
hold on
z = 12*(exp(3*1i*theta) - exp(2*1i*theta))./(23*exp(2*1i*theta) - 16*exp(1i*theta)+ 5) ;
plot(real(z), imag(z));

%p = 4
z = 24*(exp(4*1i*theta) - exp(3*1i*theta))./(55*exp(3*1i*theta) - 59*exp(2*1i*theta) + 37*exp(1i*theta) - 9) ;
plot(real(z), imag(z));
\end{lstlisting}

And the result is
\begin{figure}[H]
  \centering
  \begin{subfigure}[b]{0.4\textwidth}
    \includegraphics[width=\textwidth]{../pic/adamsbashforth.png}
    \caption{p = 1,2,3}
  \end{subfigure}
  \begin{subfigure}[b]{0.4\textwidth}
    \includegraphics[width=\textwidth]{../pic/adamsbashforth4.png}
    \caption{p = 4}
  \end{subfigure}
  \caption{Adams-Bashforth formula}
\end{figure}

 To generate the Adams-Moulton formulas
\begin{lstlisting}
theta = linspace(0, 2*pi, 1000);
z = 12 * (exp(2 * 1i*theta) - exp(1i*theta))./(5*exp(2*1i*theta)+8*exp(1i*theta) - 1);
plot(real(z), imag(z));

hold on
z = 24 * (exp(3 * 1i*theta) - exp(2*1i*theta))./(9*exp(3*1i*theta)+19*exp(2*1i*theta)-5*exp(1i*theta) + 1);
plot(real(z), imag(z));

hold on
z = 720 * (exp(4 * 1i*theta) - exp(3*1i*theta))./(251*exp(4*1i*theta)+646*exp(3*1i*theta)-264*exp(2*1i*theta)+106*exp(1i*theta) -19);
plot(real(z), imag(z));
\end{lstlisting}

And the result is
\begin{figure}[H]
  \centering
    \includegraphics[width=0.5\textwidth]{../pic/adamsmoulton.png}
    \caption{p = 3,4,5 Adams-Moulton formula}
\end{figure}

 To generate the Adams-Moulton formulas
\begin{lstlisting}
theta = linspace(0, 2*pi, 1000);
z = (exp(1i*theta) - 1)./exp(1i*theta);
plot(real(z), imag(z));

hold on
z =  (3*exp(2 * 1i*theta) - 4*exp(1i*theta)+1)./(2*exp(2*1i*theta));
plot(real(z), imag(z));

hold on
z = (11*exp(3 * 1i*theta) - 18*exp(2 * 1i*theta)+9*exp(1i*theta)-2)./(6*exp(3*1i*theta));
plot(real(z), imag(z));

hold on
z = (25*exp(4 * 1i*theta) - 48*exp(3 * 1i*theta) +36*exp(2 * 1i*theta)-16*exp(1i*theta)+3)./(12*exp(4*1i*theta));
plot(real(z), imag(z));
\end{lstlisting}

And the result is
\begin{figure}[H]
  \centering
    \includegraphics[width=0.5\textwidth]{../pic/backforth.png}
    \caption{p = 1,2,3,4 backforth differentiation formula}
\end{figure}


\end{proof}


\subsection*{\uppercase\expandafter{\romannumeral 16} Exercise 10.159}
\begin{proof}
    Take the modified Euler method for an example. Since the red line stands for 
    $$u_3(t_{n+1})  - u(t_{n+1})+u(t_n)-U^n= U^n+ky_2- u(t_{n+1})+u(t_n)-U^n=ky_2- u(t_{n+1})+u(t_n)$$
    where $y_2=f(U^n,t_n;k)$ as one-step error defined in 10.162
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 17} Exercise 10.161}
\begin{proof}
 As the definition of 160, we have 
 $$\left\{\begin{aligned}
     U^* & = U^n+\frac{k}{4}(f(U^n, t_n)+f(u^*,t_n+\frac{k}{2})) \\
     U^{n+1} & = \frac{1}{3}(4U^*-U^n+kf(U^{n+1} ,t_{n+1}))
   \end{aligned}\right.$$
 
 Then it can be written as the form
 $$\left\{
 \begin{aligned}
 y_1&=f(U^n, t_n)\\
 y_2 &= f(U^*, t_n + \frac{k}{2})where\ U^* = U^n+\frac{k}{4}(y_1+y_2)\\
 y_3 &= f(U^n + \frac{k}{3}(y_1 + y_2+ y_3), t_n+k)\\
 U^{n+1} &= U^n + \frac{k}{3}(y_1+y_2 +y_3)
 \end{aligned}\right.$$
Then we have $l_1:u_1(t) = U^n + y_1(t-t_n)$. And $l_2$ passes through $(u_1(t_n+\frac{k}{2}), t_n + \frac{k}{2})$ with slope $y_2$. 

Similarly, $u_3 = U^n + \frac{k}{2}y_1+\frac{k}{2}y_2 + y_3(t-t_n-k)$.But I can't figure out how to draw the picture.
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 18} Exercise 10.165}
\begin{proof}
  $$\begin{aligned}
  \mathscr{L}u(t_n) &= u(t_{n+1}) - u(t_{n-1}) - 2kf(u(t_n), t_n)\\
  &= 2ku'(t_n) + \frac{k^3}{6}u'''(t_n) - 2ku'(t_n)\\
  &= \frac{k^3}{6}u'''(t_n) = \Theta(k^3)
  \end{aligned}$$
  i.e. the explicit midpoint method is second-order accurate by definition 162.
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 19} Exercise 10.174*}
\begin{proof}
    (1)The TR-BDF2 method has the form of

    $$\left\{\begin{array}{l}
    \mathbf{U}^{*}=\mathbf{U}^{n}+\frac{k}{4}\left(\mathbf{f}\left(\mathbf{U}^{n}, \iota_{n}\right)+\mathbf{f}\left(\mathbf{U}^{*}, \iota_{n}+\frac{k}{2}\right)\right) \\
    \mathbf{U}^{n+1}=\frac{1}{3}\left(4 \mathbf{U}^{*}-\mathbf{U}^{n}+k \mathbf{f}\left(\mathbf{U}^{n+1}, \iota_{n+1}\right)\right)
    \end{array}\right.$$
    
    Then we have
    
    $$\left\{\begin{array}{l}
    \mathbf{U}^{*}=\mathbf{U}^{n}+\frac{k}{4}\left(\lambda \mathbf{U}^{n}+\lambda \mathbf{U}^{*}\right) \\
    \mathbf{U}^{n+1}=\frac{4}{3} \mathbf{U}^{*}-\frac{1}{3} \mathbf{U}^{n}+\frac{k}{3} \lambda \mathbf{U}^{n+1}
    \end{array}\right.$$
    
    and we substitute the first into the second,we get
    
    $$\left(1-\frac{1}{3} z\right) \mathbf{U}^{n+1}=\left(\frac{4}{3} \frac{1+\frac{z}{4}}{1-\frac{z}{4}}-\frac{1}{3}\right) \mathbf{U}^{n}$$
    
    where  $z=k \lambda$  So
    
    $$R(z)=\frac{\left(\frac{4}{3} \frac{1+\frac{z}{4}}{1-\frac{1}{4}}-\frac{1}{3}\right)}{\left(1-\frac{1}{3} z\right)}=\frac{1+\frac{5}{12} z}{1-\frac{7}{12} z+\frac{1}{12} z^{2}}$$
    
    (2)We use the Taylor expansion and get
    
    $$\begin{array}{c}
    R(z)=\left(1+\frac{5}{12} z\right)\left(1+\left(\frac{7}{12} z-\frac{1}{12} z^{2}\right)+\left(\frac{7}{12} z-\frac{1}{12} z^{2}\right)^{2}+O\left(z^{3}\right)\right)=1+z+\frac{z^{2}}{2}+O\left(z^{3}\right) \\
    e^{z}=1+z+\frac{z^{2}}{2}+O\left(z^{3}\right)
    \end{array}$$
    
    So $R(z)-e^{z}=O\left(z^{3}\right)$ as  $z \rightarrow 0$ 
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 20} Exercise 10.179}(buhui)
\begin{proof}
    
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 21} Exercise 10.182}
\begin{proof}
Modified Euler method:
\begin{center}
  \begin{tabular}{c|c}
$\begin{matrix}
  0 \\
  \frac{1}{2}
\end{matrix}$ &$ \begin{matrix}
                 0 &0 \\
                \frac{1}{2} & 0
               \end{matrix}$ \\
\hline
   & $\begin{matrix}
       0 & 1
     \end{matrix}$    \\
\end{tabular}
\end{center}
Improved Euler method:
\begin{center}
  \begin{tabular}{c|c}
$\begin{matrix}
  0 \\
  1
\end{matrix}$ &$ \begin{matrix}
                 0 &0 \\
                1 & 1
               \end{matrix}$ \\
\hline
   & $\begin{matrix}
       \frac{1}{2} & \frac{1}{2}
     \end{matrix}$    \\
\end{tabular}
\end{center}
Heun's third-order formula:
\begin{center}
  \begin{tabular}{c|c}
$\begin{matrix}
  0 \\
  \frac{1}{3}\\
  \frac{2}{3}
\end{matrix}$ &$ \begin{matrix}
                 0 &0 & 0\\
                \frac{1}{3} & 0 & 0\\
                0 & \frac{2}{3} & 0
               \end{matrix}$ \\
\hline
   & $\begin{matrix}
       \frac{1}{4}& 0 & \frac{3}{4}
     \end{matrix}$    \\
\end{tabular}
\end{center}

\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 22} Exercise 10.183}
\begin{proof}
  Note $\xi_i = U^n+k\sum_{j = 1}^{s}a_{ij}y_j$, then $y_i = f(\xi_i, t_n+c_ik)$, then by definition 10.180, 
  $$\left\{\begin{aligned}
  \xi_i &= U^n+k\sum_{j = 1}^{s}a_{ij}y_j\\
  U^{n+1} &= U^n +k\sum_{j=1}^{s}b_jy_j = U^n +k\sum_{j=1}^{s}b_jf(\xi_j, t_n+c_jk)
  \end{aligned}\right.$$
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 23} Exercise 10.190}
\begin{proof}
Similar to Example 10.188, we have 
$$\left\{\begin{aligned}
    y_1 &= f(U^n, t_n)\\
    y_2 &= f(u^{n}+ka_{21}y_1, t_n +c_2k)\\
    y3 &= f(u^n+ka_{31}+ka_{32}y_2,t_n+c_3k)\\
    U^{n+1} &= U^n+k(b_1y_1+b_2y_2+b_3y_3)
\end{aligned}\right.$$
Then we have $$\mathscr{L}u(t_n) = k(1-b_1-b_2-b_3)u'+k^2(\frac{1}{2}-b_2c_2-b_3c_3)u''+k^3(\frac{1}{6} - \frac{1}{2}b_2c_2^2-\frac{1}{2}b_3c_3^2)u'''+O(k^4)$$

Hence let 
$$b_1 +b_2 + b_3 = 1,\qquad b_2c_2+b_3c_3=\frac{1}{2},\qquad b_2c_2^2+b_3c_3^2=\frac{1}{3}$$ 

Then let $c_2 = c_3 = \frac{2}{3}$, we have the family
\begin{center}
    \begin{tabular}{c|ccc}
  $\begin{matrix}
    0 \\
    \frac{2}{3}\\
    \frac{2}{3}
  \end{matrix}$ &$ \begin{matrix}
                   0 & 0 &0 \\
                  \frac{2}{3} & 0 & 0\\
                  \frac{2}{3} - \frac{1}{4\alpha}& \frac{1}{4\alpha} & 0
                 \end{matrix}$ \\
  \hline
     & $\begin{matrix}
         \frac{1}{4} & \frac{3}{4} - \alpha & \alpha
       \end{matrix}$    \\
  \end{tabular}
  \end{center}

By Ex 10.182, clearly Heun's third-order formula doesn't belong to this family.
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 24} Exercise 10.196}
\begin{proof}
    $\Rightarrow:$ Since we have $$I_s(f) = I_s(a_nx^n) = ka_n\sum_{j=1}^{s}b_j(t_n+c_jk)^n = \int_{t_n}^{t_{n}+k}fdt = \frac{a_n}{n+1}[(t_n+k)^{n+1}-t_n^{n+1}]$$
    That is 
    $$\sum_{j = 1}^{s}b_j(t_n+c_jk)^n = \frac{\sum_{i = 0}^{n}(t_n+k)^it_n^{n-i}}{n+1}$$
    Hence $$\sum_{j = 1}^{s}b_jc_j^nk^n=\frac{k^n}{n+1}$$
    By the arbitrary of n, then it is B(r).

    $\Leftarrow:$ Since RK is B(r), then we have to prove 
    $$\frac{C^m_nk^{n-m}}{(n-m+1)}k^{n-m} = \sum_{j=1}^{s}b_jc_j^{n-m}k^{n-m}C_n^m = \frac{\sum_{i=0}^{m}C_{n-i}^{m-i}k^{n-m}}{n+1}$$

    Since $C_n^m+C_n^{m-1} = C_{n+1}^m, C^{n+1}_m = \frac{n+1}{n-m+1}C_n^m$, we have 
    $$\frac{C^m_n}{n-m+1} = \frac{\sum_{i=0}^{m}C_{n-i}^{m-i}}{n+1}.$$
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 25} Exercise 10.212}(buhui)
\begin{proof}
    To show that an s-stage collocation method is at least s-order accurate, we need to show that the local truncation error (LTE) is of order $O(k^{s+1})$, where k is the step size.
     Then we have 
    $$LTE = u(t_n) - U^n - k * sum_{j=1}^s a_{ij} * f(t_i, y_j)$$

where $a_ij$ are the coefficients of the collocation method and f(t,y) is the right-hand side of the differential equation.

To analyze the LTE, we use the Taylor series expansion of $y(t_i)$ and $f(t_i, y_j)$ around $t_i$:

$y(t_i) = y_i + h * y'(t_i) + (h^2 / 2) * y''(t_i) + ... + (h^s / s!) * y^(s)(t_i) + O(h^(s+1))$

$f(t_i, y_j) = f(t_i, y_i) + (y_j - y_i) * f_y(t_i, y_i) + O(h)$

where $f_y$ denotes the derivative of f with respect to y.

Substituting these expansions into the LTE expression and simplifying, we get:

$$LTE_i = O(h^(s+1)) + O(h^(s+1)) + O(h^(s+2)) + ... + O(h^(s+1)) = O(h^{(s+1)})$$

The first two terms come from the Taylor expansion of $y(t_i) and f(t_i, y_i)$, and the remaining terms are of higher order. Therefore, the LTE is of order $O(h^(s+1))$.

Since the collocation method is consistent (i.e., the LTE approaches zero as h approaches zero), it is also convergent of order s or higher. This means that the error between the exact solution and the numerical approximation decreases as $O(h^s)$ or faster.

Therefore, we have shown that an s-stage collocation method is at least s-order accurate.

\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 26} Exercise 10.213}
\begin{proof}
    Since the collocation method as defined in definition 209 is an s-stage IRK method, then it's an RK method. It only suffices to prove $c_i = \sum_{j=1}^{s}a_{ij},\sum_{i=1}^{s}b_i=1.$
    where $a_{ij}=\int_0^{c_i}l_j(\tau)d\tau, b_j=\int_0^1l_j(\tau)d\tau$

    Since by lemma 2.13, we have $\sum_{j = 0}^{s}l_j(\tau)\equiv 1$, then we have
    $$\begin{aligned}
        \sum_{j=1}^{s}a_{ij} &= \sum_{j = 0}^{s}\int_0^{c_i}l_j(\tau)d\tau=\int_0^{c_i}\sum_{j = 0}^{s}l_j(\tau)d\tau=c_i\\
        \sum_{i=1}^{s}b_i&=\sum_{i=1}^{s}\int_0^1l_j(\tau)d\tau=\int_0^1\sum_{i=1}^{s}l_j(\tau)d\tau = 1
    \end{aligned}$$
\end{proof}

\subsection*{\uppercase\expandafter{\romannumeral 27} Exercise 10.215}
\begin{proof}
    Let $s = 3, c_1 = \frac{1}{4}, c_2=\frac{1}{2}, c_3 = \frac{3}{4}$, then the corresponding elementary Lagrange interpolation polynomials are 
    $$l_1(\tau)= \frac{(\tau - \frac{1}{2})(\tau - \frac{3}{4})}{\frac{1}{8}},\qquad l_2(\tau)=\frac{(\tau - \frac{1}{4})(\tau - \frac{3}{4})}{\frac{1}{16}}\qquad l_3(\tau) 
    = \frac{(\tau - \frac{1}{4})(\tau - \frac{1}{2})}{\frac{1}{8}}$$
    and 10.150 yields the IRK method with Butcher tableau
    \begin{center}
        \begin{tabular}{c|ccc}
      $\begin{matrix}
        \frac{1}{4} \\
        \frac{1}{2}\\
        \frac{3}{4}
      \end{matrix}$ &$ \begin{matrix}
                       \frac{23}{48} & \frac{7}{12} & \frac{9}{16}\\
                      \frac{1}{3} & \frac{1}{6} & 0\\
                      \frac{5}{48}& \frac{1}{12}& \frac{3}{16}
                     \end{matrix}$ \\
      \hline
         & $\begin{matrix}
             \frac{2}{3} & \frac{1}{3} & \frac{2}{3}
           \end{matrix}$    \\
      \end{tabular}
    \end{center}

    Hence we have 
    $$\left\{\begin{aligned}
        &y_i = f\left(U^n+k \sum_{j = 1}^{s}a_{ij}y_j, t_n+ c_ik \right)\\
        &U^{n+1} = U^n +k\sum_{j = 1}^{s}b_jy_j
    \end{aligned}\right.$$
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 28} Exercise 10.218}
\begin{proof}
    Denote $u = (u_1, u_2, \cdots, u_s)'$, then by C(r)
    $$(Vu)_k = \sum_{j=1}^{s}c_j^{k-1}u_j = \sum_{j=1}^{s}c_j^{k-1}\sum_{i=1}^{s}b_ic_i^{m-1}a_{ij}=\sum_{i=1}^{s}b_i\frac{c_i^k}{k} = \frac{1}{k(m+k)}$$
    Denote $v = (v_1, v_2,\cdots, v_s)'$, then by C(r) we have
    $$(Vu)_k = \sum_{j=1}^{s}c_j^{k-1}v_j=\sum_{j=1}^{s}c_j^{k-1}\frac{1}{m}b_j(1-c_j^m)=\frac{1}{m}(\frac{1}{k}- \frac{1}{m+k}) = \frac{1}{k+m}$$
    Then we have $Vu=Vv$, then u = v which means D(r) by definition 216.
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 29} Exercise 10.222}
\begin{proof}
    By theorem 10.221 and lemma 10.220, we have $s = 3$. Then we suffices to compute r.

    Since $q_r(x) = (x-\frac{1}{4})(x-\frac{1}{2})(x -\frac{3}{4})$ and 
    $$\begin{aligned}
        &\int_0^1(x-\frac{1}{4})(x-\frac{1}{2})(x -\frac{3}{4})dx = 0\\
        &\int_0^1(x-\frac{1}{4})(x-\frac{1}{2})(x -\frac{3}{4})xdx = \frac{7}{960} \neq 0
    \end{aligned}$$
    Then $s+r = 3+1 = 4$. The collocation method has 4-order accuracy.
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 30} Exercise 10.241}(buhui)
\begin{proof}
    
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 31} Exercise 10.247}
\begin{proof}
    Since classical fourth-order RK method has the form of
    $$\begin{aligned}
    &y_1 = kf(U^n, t_n)\\
    &y_2 = kf(U^n + \frac{y_1}{2}, t_n + k/2)\\
    &y_3 = kf(U^n + \frac{y_2}{2}, t_n + k/2)\\
    &y_4 = kf(U^n + y_3, t_n + k)\\
    &U^{n+1} = U^n + \frac{y_1 + 2y_2 + 2y_3 + y_4}{6}
    \end{aligned}$$
    Then we have 
    $$\begin{aligned}
        R(z) &= 1+zb^T(I-zA)^{-1}\textbf{1} \\
        &= 1+z\begin{pmatrix}
        \frac{1}{6} & \frac{1}{3} & \frac{1}{3} & \frac{1}{6}
    \end{pmatrix}\begin{pmatrix}
        1&0&0&0\\-\frac{z}{2}&1&0&0\\
        0&-\frac{z}{2}&1&0\\0&0&-z&0
    \end{pmatrix}\begin{pmatrix}
        1&1&1&1
    \end{pmatrix}^T\\
    &=1+z+\frac{z^2}{2}+\frac{z^3}{6}+\frac{z^4}{24}
\end{aligned}$$
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 32} Exercise 10.252}
\begin{proof}
    Since we have 
    $$R_1(z) =1+z,\qquad R_2(z) = 1+z+\frac{z^2}{2},\qquad R_3(z) = 1+z+\frac{z^2}{2}+\frac{z^3}{6}.$$

    Hence $S_i:=\{z:|R_s(z)|\leq 1\}$.

    Firstly, we prove $S_1\subseteq S_2$. Note $\omega = z-1$, then we have $S_1 = \{|\omega|\leq 1\}$, then $R_2 = \{|\frac{\omega  ^2+1}{2}|\leq 1\}.$ Clearly, 
    $\forall \omega \in S_1, \omega \in S_2$ 
    
    Then we prove $S_2\subseteq S_3.$ We can figure it out by picture 10.251.

    It doesn't hold for all $s>3$.
    
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 33} Exercise 10.258}(buhui)
\begin{proof}
    Assume  $\operatorname{deg} \mathrm{Q}(\mathrm{z})=\mathrm{m}$ , and  $\operatorname{deg} \mathrm{P}(\mathrm{z})=\mathrm{n}$ .

$$\lim _{z \rightarrow \infty}|z|^{n-m} / 4=\frac{\lim _{z \rightarrow \infty}\left|z^{n}\right| / 2}{2 \lim _{z \rightarrow \infty}\left|z^{m}\right|} \leq \lim _{z \rightarrow \infty}|R(z)|=\lim _{z \rightarrow \infty}|P(z)| / \lim _{z \rightarrow \infty}|Q(z)| \leq \frac{2 \lim _{z \rightarrow \infty}\left|z^{n}\right|}{\lim _{z \rightarrow \infty}\left|z^{m}\right| / 2}=4 \lim _{z \rightarrow \infty}|z|^{n-m}$$

so $$ \lim _{z \rightarrow \infty}|R(z)|=0 \Longleftrightarrow \operatorname{deg} Q(z)>\operatorname{deg} P(z) $$
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 34} Exercise 10.261}
\begin{proof}
    It suffices to prove $lim_{z\rightarrow \infty}|R(z)| = 0$. Similar to Theorem 10.260 we have
    $$\begin{aligned}
        \lim _{z \rightarrow \infty} R(z) & =\lim _{z \rightarrow \infty}\left(1+z \mathbf{b}^{T}(I-z A)^{-1} \mathbf{1}\right) \\
        & =1+\lim _{z \rightarrow \infty} \mathbf{b}^{T}\left(\frac{1}{z} I-A\right)^{-1} \mathbf{1} \\
        & =1-\mathbf{b}^{T} A^{-1} \mathbf{1} \\
        & =1-\mathbf{e}^{T} \mathbf{1}=0
        \end{aligned}$$
    where  $e\ s.t.\ A^Te = b$ since $a_{i1} = b_1$, we have $b_1\sum_{i=0}^{s}c_i = b_1$, then $\mathbf{e}^{T} \mathbf{1} = 1$.

    That is, it is L-stable;
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 35} Exercise 10.266}
\begin{proof}
    To prove the collocation method is L-stable, it is similar to the last Exercise.
We only need to apply the table the following formula:
    $$\lim _{z \rightarrow \infty} R(z) =1-\mathbf{e}^{T} \mathbf{1}$$
    Clearly it tends to 0.

    Then we prove it is 5th-order accurate.

    Similar to Exercise 222, then we can easily get $r = 2$, then order of accuracy is $3+2=5.$
\end{proof}
\subsection*{\uppercase\expandafter{\romannumeral 36} Exercise 10.275}
\begin{proof}
    The standard form is 
    $$\left\{\begin{aligned}
        &y_1 = f(U^n+\frac{ky_1}{2}, t_n+\frac{k}{2})\\
        &U^{n+1} = U^n + y_1
    \end{aligned}\right.$$
    Butcher tableau is
    \begin{center}
        \begin{tabular}{c|c}
      $\begin{matrix}
        \frac{1}{2}
      \end{matrix}$ &$ \begin{matrix}
                       \frac{1}{2}
                     \end{matrix}$ \\
      \hline
         & $\begin{matrix}
            1
           \end{matrix}$    \\
      \end{tabular}
    \end{center}
    Then we prove it is B-stable:

\end{proof}

\end{document} 