\documentclass{article}
\usepackage{geometry}
\geometry{margin=1.5cm, vmargin={0pt,1cm}}
\setlength{\topmargin}{-1cm}
\setlength{\paperheight}{29.7cm}
\setlength{\textheight}{25.3cm}

% useful packages.
\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{enumerate}
\usepackage{graphicx}
\usepackage{multicol}
\usepackage{fancyhdr}
\usepackage{layout}
%\usepackage{ctex}
\usepackage{listings}
\usepackage{subfigure}


% some common command
\newcommand{\dif}{\mathrm{d}}
\newcommand{\avg}[1]{\left\langle #1 \right\rangle}
\newcommand{\difFrac}[2]{\frac{\dif #1}{\dif #2}}
\newcommand{\pdfFrac}[2]{\frac{\partial #1}{\partial #2}}
\newcommand{\OFL}{\mathrm{OFL}}
\newcommand{\UFL}{\mathrm{UFL}}
\newcommand{\fl}{\mathrm{fl}}
\newcommand{\op}{\odot}
\newcommand{\Eabs}{E_{\mathrm{abs}}}
\newcommand{\Erel}{E_{\mathrm{rel}}}

\usepackage{xcolor}
\usepackage{fontspec} 
\definecolor{dkgreen}{rgb}{0,0.6,0}
\definecolor{gray}{rgb}{0.5,0.5,0.5}
\definecolor{comment}{rgb}{0.56,0.64,0.68}

\newfontfamily\monaco{Monaco}
\lstset {
aboveskip=3mm,
belowskip=3mm,
showstringspaces=false,       % underline spaces within strings
columns=flexible,
framerule=1pt,
rulecolor=\color{gray!35},
backgroundcolor=\color{gray!5},
basicstyle={\small\monaco},           % the size of the fonts that are used for the code
numbers=left,                   % where to put the line-numbers
numberstyle=\tiny\monaco\color{gray},  % the style that is used for the line-numbers
numbersep=5pt,                  % how far the line-numbers are from the code
commentstyle=\color{comment},
keywordstyle=\color{blue},
stringstyle=\color{dkgreen},
tabsize=2,                      % sets default tabsize to 2 spaces
captionpos=b,                   % sets the caption-position to bottom
breaklines=true,                % sets automatic line breaking
breakatwhitespace=false,        % sets if automatic breaks should only happen at whitespace
escapeinside={\%*}{*)},            % if add LaTeX within your code
morekeywords={*,...}               % if add more keywords to the set
}


\begin{document}
\title{Homework \#2}
\pagestyle{fancy}
\lhead{Name Li HuiTeng 3180102114}
\chead{ numPDE\#2}
\rhead{Date 21.04.08}


\tableofcontents

\newpage


\section{7.53 Complete the proof of Theorem 7.49.}
\begin{proof}[Proof]
If $v(t)$ , $u(t)$ satisfy 
\begin{align*}
	u(0) = u_0,\quad v(0)=u_0 + \varepsilon_0 ;\\ 
	u'(t)=f(u(t),t),\quad v'(t)=f(v(t),t) + \delta (t),\quad \forall t \in [0,T]. 
\end{align*}
with
\[
\forall t \in [0,T],\quad || \varepsilon_0|| < \epsilon, \quad||\delta (t)|| < \epsilon. 	
\]
We will prove that there exists a constant C so that 
\[
||u(t)-v(t)||<C\epsilon , \quad t \in [0,T].
\]
First, we have 
\[
||u'-v'|| = ||f(u,t)-f(v,t)-\epsilon (t)|| \leq L||u-v|| + \epsilon
\]
Let $||u(t)-v(t)||=h(t)$. 
Since $ |h'| \leq ||(u-v)'||$,
\[
|h'| \leq Lh +\epsilon,	
\] 
\[
h' \leq Lh +\epsilon, \quad -h'	\leq Lh + \epsilon.
\]
Integrate from 0 to t,
\begin{align}
	h(t) &\leq L \int_{0}^{t}h(x)dx + t\epsilon + h(0) \notag\\ 
	&\leq  L \int_{0}^{t}h(x)dx + (T+1)\epsilon. \tag{7.53.1}
\end{align}

Multiply both sides by $Le^{-Lt}$, a non-negative term. And we have 
\[
L\difFrac{(\int_{0}^{t}h(x)dx\cdot e^{-Lt})}{t} \leq L e^{-Lt}(T+1)\epsilon.	
\]
Integrate from 0 to t, 
\[
L\int_{0}^{t}h(x)dx\cdot e^{-Lt} \leq (1-e^{-Lt})(T+1)\epsilon,
\]
\[
L\int_{0}^{t}h(x)dx \leq (e^{Lt}-1)(T+1)\epsilon \leq (e^{LT}-1)(T+1)\epsilon.		
\]
Substitute this into (7.53.1), we have
\[
h(t) \leq (T+1)e^{LT}\epsilon.
\]
Let $C=(T+1)e^{LT}$, then we have $||u(t)-v(t)|| \leq C\epsilon$, which completes the proof.
\end{proof}


\section{7.132 Prove Theorem 7.131.}
\begin{proof}[Proof]
The proof is by mathematical induction on n. 
When n=s,we have 
\begin{align*}
% \left[ \begin{array}{c}
% 	\widetilde y_{s-1} \\
% 	\widetilde y_{s-2} \\
% 	\widetilde y_{s-3} \\
% 	\vdots 	\\
% 	\widetilde y_{1} \\
% 	\widetilde y_{0}\\
% 		\end{array} 
% \right ]=	
% \left[ \begin{array}{cccccc}
% 	\theta_0  & \theta_1 & \theta_2 & \cdots & \theta_{s-2} & \theta_{s-1}\\
% 	0 		  & \theta_0 & \theta_1	&\cdots  &\theta_{s-3}  &\theta_{s-2} \\
% 	0 		  & 0 	     & \theta_0	&\cdots  &\theta_{s-4}  &\theta_{s-3} \\
% 	\vdots 	  & \vdots	 & \vdots   &\vdots	 &\vdots	    &\vdots		  \\
% 	0 		  & 0 	     & 0		&\cdots	 &\theta_0		&\theta_{1}	  \\
% 	0 		  & 0 	     & 0		&\cdots	 &	0		    &\theta_0	  \\
% 	\end{array} 
% 	\right ]^{-1}
% 	\left[ \begin{array}{c}
% 	y_{s-1} \\
% 	y_{s-2} \\
% 	y_{s-3} \\
% 	\vdots 	\\
% 	y_{1} \\
% 	y_{0}\\
% 			\end{array} 
% 	\right ]\\
	\left[ \begin{array}{cccccc}
		\theta_0  & \theta_1 & \theta_2 & \cdots & \theta_{s-2} & \theta_{s-1}\\
		0 		  & \theta_0 & \theta_1	&\cdots  &\theta_{s-3}  &\theta_{s-2} \\
		0 		  & 0 	     & \theta_0	&\cdots  &\theta_{s-4}  &\theta_{s-3} \\
		\vdots 	  & \vdots	 & \vdots   &\vdots	 &\vdots	    &\vdots		  \\
		0 		  & 0 	     & 0		&\cdots	 &\theta_0		&\theta_{1}	  \\
		0 		  & 0 	     & 0		&\cdots	 &	0		    &\theta_0	  \\
		\end{array} 
	\right ]
	\left[ \begin{array}{c}
		\widetilde y_{s-1} \\
		\widetilde y_{s-2} \\
		\widetilde y_{s-3} \\
		\vdots 	\\
		\widetilde y_{1} \\
		\widetilde y_{0}\\
			\end{array} 
	\right ]=
	\left[ \begin{array}{c}
		y_{s-1} \\
		y_{s-2} \\
		y_{s-3} \\
		\vdots 	\\
		y_{1} \\
		y_{0}\\
			\end{array} 
		\right ].
	\end{align*}
	Hence,
	\begin{align*}
	 \sum_{i=0}^{s-1}\alpha_i y_{i}
	&=
	\left[ \begin{array}{c}
	\alpha _{s-1} \\
	\alpha _{s-2} \\
	\alpha _{s-3} \\
		\vdots 	\\
	\alpha _{1} \\
	\alpha _{0}\\
		\end{array} 
	\right ]^T
	\left[ \begin{array}{cccccc}
		\theta_0  & \theta_1 & \theta_2 & \cdots & \theta_{s-2} & \theta_{s-1}\\
		\theta_{-1} & \theta_0 & \theta_1	&\cdots  &\theta_{s-3}  &\theta_{s-2} \\
		\theta_{-2} & \theta_{-1} & \theta_0	&\cdots  &\theta_{s-4}  &\theta_{s-3} \\
		\vdots 	  & \vdots	 & \vdots   &\vdots	 &\vdots	    &\vdots		  \\
		\theta_{2-s} & \theta_{3-s} & \theta_{4-s}&\cdots	 &\theta_0		&\theta_{1}	  \\
		\theta_{1-s} & \theta_{2-s} & \theta_{3-s}&\cdots	 &\theta_{-1	}&\theta_0	  \\
		\end{array} 
	\right ]
	\left[ \begin{array}{c}
		\widetilde y_{s-1} \\
		\widetilde y_{s-2} \\
		\widetilde y_{s-3} \\
		\vdots 	\\
		\widetilde y_{1} \\
		\widetilde y_{0}\\
			\end{array} 
	\right ]\\
	&=\left[ \begin{array}{c}
		\sum_{i=0}^{s-1}\alpha _{i}\theta_{i+1-s} \\
		\sum_{i=0}^{s-1}\alpha _{i}\theta_{i+2-s}\\
		\sum_{i=0}^{s-1}\alpha _{i}\theta_{i+3-s} \\
			\vdots 	\\
		\sum_{i=0}^{s-1}\alpha _{i}\theta_{i-1} \\
		\sum_{i=0}^{s-1}\alpha _{i}\theta_{i}\\
		\end{array} 
		\right ]^T
		\left[ \begin{array}{c}
			\widetilde y_{s-1} \\
			\widetilde y_{s-2} \\
			\widetilde y_{s-3} \\
			\vdots 	\\
			\widetilde y_{1} \\
			\widetilde y_{0}\\
			\end{array} 
		\right ]\\
	&=\left[ \begin{array}{c}
		-\theta_{1} \\
		-\theta_{2}\\
		-\theta_{3} \\
			\vdots 	\\
		-\theta_{s-1} \\
		-\theta_{s}\\
		\end{array} 
		\right ]^T
		\left[ \begin{array}{c}
			\widetilde y_{s-1} \\
			\widetilde y_{s-2} \\
			\widetilde y_{s-3} \\
			\vdots 	\\
			\widetilde y_{1} \\
			\widetilde y_{0}\\
			\end{array} 
		\right ]\\
	&=-\sum_{i=0}^{s-1}\theta_{s-i} \widetilde y_i.
\end{align*}
Substitute this equation into (7.82), we have
\[y_s=\sum_{i=0}^{s-1}\theta_{s-i} \widetilde y_i+ \theta_0\psi_s, \]
which proves Theorem 7.126 holds for n=s.

Assume that for any $n \leq q$ $(q \geq s)$, the theorem holds and consider the situation 
where $n=q+1$. We have 
\begin{align*}
\sum_{i=0}^{s-1}\alpha_i y_{i+q+1-s}
&=
\left[ \begin{array}{c}
	\alpha _{s-1} \\
	\alpha _{s-2} \\
	\alpha _{s-3} \\
		\vdots 	\\
	\alpha _{1} \\
	\alpha _{0}\\
		\end{array} 
	\right ]^T
	\left[ \begin{array}{cccccc}
			\theta_0  & \theta_1 & \theta_2 & \cdots & \theta_{q-1} & \theta_{q} \\
			0 		  & \theta_0 & \theta_1	&\cdots  &\theta_{q-2}  &\theta_{q-1} \\
			0 		  & 0 	     & \theta_0	&\cdots  &\theta_{q-3}  &\theta_{q-2} \\
			\vdots 	  & \vdots	 & \vdots   &\vdots	 &\vdots	    &\vdots		  \\
			0 		  & 0 	     & 0		&\cdots	 &\theta_{q-s+1}&\theta_{q-s+2}	  \\
			0 		  & 0 	     & 0		&\cdots	 &\theta_{q-s}	&\theta_{q-s+1}	  \\
		\end{array} \right ]_{s\times (q+1)}
	\left[ \begin{array}{c}
		\psi_q \\ 
		\psi_{q-1}\\
		\vdots \\
		\psi _s \\
		\widetilde y_{s-1} \\
		\widetilde y_{s-2} \\
		\widetilde y_{s-3} \\
		\vdots 	\\
		\widetilde y_{1} \\
		\widetilde y_{0}\\
			\end{array} 
	\right ]_{(q+1)\times 1}
\end{align*}
\begin{align*}
&=\left[ \begin{array}{c}
	-\theta_{1} \\
	-\theta_{2}\\
	-\theta_{3} \\
		\vdots 	\\
	-\theta_{q} \\
	-\theta_{q+1}\\
	\end{array} 
	\right ]^T
	\left[ \begin{array}{c}
		\psi_q \\ 
		\psi_{q-1}\\
		\vdots \\
		\psi _s \\
		\widetilde y_{s-1} \\
		\widetilde y_{s-2} \\
		\widetilde y_{s-3} \\
		\vdots 	\\
		\widetilde y_{1} \\
		\widetilde y_{0}\\
			\end{array} 
	\right ]_{(q+1)\times 1}
\end{align*}
Substitute this equation into (7.82), and we have
\[y_{q+1}=\sum_{i=0}^{s-1}\theta_{q+1-i} \widetilde y_i+ \sum_{i=s}^{q+1}\theta_{q+1-i}\psi_i.\]
Hence the theorem holds for n=q+1, and so the theorem is true for all n by mathematical induction.
\end{proof}

\section{7.137 Prove a convergent LMM is consistent.}
\begin{proof}[Proof]

One attempt

A convergent LMM should be preconsistent and zero stable.
Preconsistency implies that 
\[\rho(1)=0.\]
Zero-stability implies that 
\[\rho'(1)\neq 0,\]
since z=1 is a simple root of $\rho(z)$.

Then consider the IVP
\[u'(t)=1,u(0)=0\]
with T=1. The exact solution is $u(t)=t$. 
For N steps, k=$\frac{1}{N}$. Set the initial values as 
$U^i=\frac{ik\sigma(1) }{\rho'(1)}, \quad i=0,1,\cdots ,s-1$. They 
clearly satisfy (7.85), i.e. $\lim_{k\to 0} U^i=0, \forall i=0,1,\cdots ,s-1$.

Applying those initial values to IVP fields the recurrence
 \[\sum_{i=0}^{s}\alpha_i U^{n+i}=k\sum_{i=0}^{s}\beta_i ,\]
 \[n=0,1,\cdots,N-s\]

In fact, the solutions of the above recurrence can be reduced to a general 
term formula, 
\[U^i=\frac{ik\sigma(1) }{\rho'(1) },\forall i=0,1,\cdots ,s-1,s,\cdots,N-1,N, \]
which, literally, is an expansion of initial values' general term formula.

We prove this fact by mathematical induction on i. The result is immediate when 
$i=0,1,\cdots,s-1$. Assume that for all i $\leq q$ $(q \geq s-1 )$, the formula holds. 
For i=q+1, we check whether $U^{q+1}=\frac{k(q+1)\sigma(1) }{\rho^{'}(1) }$ is 
a solution of recurrence:
\begin{align*}
	\sum_{j=0}^{s}\alpha_j U^{q-s+j+1}&=\sum_{j=0}^{s}\alpha_j \frac{k(q-s+j+1)\sigma(1) }{\rho'(1) }\\
	&=k\frac{\sigma(1)}{\rho'(1)}\cdot(\sum_{j=0}^{s}\alpha_j j+(q-s+1)\cdot \sum_{j=0}^{s}\alpha_j )\\
	[Preconsistency:\sum_{j=0}^{s}\alpha_j=0]&=k\frac{\sigma(1)}{\rho'(1)}\cdot \rho'(1)\\
	&=k\sigma(1)\\
 \end{align*}
Since $\sigma(1)=\sum_{i=0}^{s}\beta_i$, we conclude $U^{q+1}=\frac{k(q+1)\sigma(1) }{\rho'(1) }$ 
is in agreement with the recurrence definition.
Also, the recurrence result is unique, so this shows the formula is true for i=q+1 and the formula is 
true for all $i=0,1,\cdots,N$ by mathematical induction.

By this formula, we have 
\[U^{N}=\frac{kN\sigma(1) }{\rho'(1) }=\frac{\sigma(1) }{\rho'(1)}\]
Then we return to the IVP. Since the exact solution at T=1 is u(1)=1, we have 
\[\lim_{k\to0}\frac{\sigma(1) }{\rho'(1)}=1\]
\[\frac{\sigma(1) }{\rho'(1)}=1\]
\[C_1=0\]
So we prove the convergent LMM is consistent.
\end{proof}


% \begin{proof}
% Another attempt( fail, However)

% By Lemma 7.130, we have
% \[\sum_{j=0}^{s}\alpha_j=0\]
% Substitute this equation into (7.55),
% \begin{align*}
% 	\sum_{j=0}^{s}\alpha_j U^{N-s+j}&=k\sum_{j=0}^{s}\beta_j f_{N-s+j}\\
% 	\sum_{j=0}^{s}\alpha_j U^{N-s+j}-\sum_{j=0}^{s}\alpha_j U^{N-s}&=k\sum_{j=0}^{s}\beta_j f_{N-s+j}\\
% 	\sum_{j=1}^{s}j\alpha_j \frac{U^{N-s+j}-U^{N-s}}{jk}&=\sum_{j=0}^{s}\beta_j f_{N-s+j}\\
%  \end{align*}
%  When $k \to 0$, it yields
%  \[\sum_{j=0}^{s} (j\alpha_j-\beta_j)u^{'}(T)=0.\]
% Due to the arbitrariness of u(t), we conclude that $\sum_{j=0}^{s}(j\alpha_j-\beta_j)=0$.
% \end{proof}

\section{7.154 Write a program to reproduce the RAS plots in three former Examples.}
\begin{proof}[Proof]
	The RAS plots are reproduced by MATLAB R2019b.
	\lstset{language=Matlab}
	\begin{lstlisting}
%This is a file named rootlocus_AB.m .
	function y= rootlocus_AB(x,p)
		rho=(x-1.0).*(x.^(p-1));
		switch p
		case 1
			sigma=1;
		case 2
			sigma=1.5.*x-0.5;
		case 3
			sigma=23/12*x.^2-16/12*x+5/12;
		case 4
		sigma=55/24.*x.^3-59/24.*x.^2+37/24.*x-9/24;
		end
		y=1.0*rho./sigma;
	end

%This is a file named rootlocus_AB.m .
	function y= rootlocus_AM(x,p)
		rho=x.^(p-1)-x.^(p-2);
		switch p
		case 3
			sigma=5/12.*x.^2+8/12.*x-1/12;
		case 4
		sigma=9/24*x.^3+19/24.*x.^2-5/24.*x+1/24;
		case 5
		sigma=251/720*(x.^4)+646/720*(x.^3)-264/720*(x.^2)+106/720*x-19/720;  
		end
		y=rho./sigma;
	end

%This is a file named rootlocus_BD.m .
	function y= rootlocus_BD(x,p)
		sigma=x.^p;
		switch p
		case 1
			rho=x-1.0;
		case 2
			rho=x.^2-4/3*x+1/3;
			sigma=2/3*sigma;
		case 3
			rho=x.^3-18/11*x.^2+9/11*x-2/11;
			sigma=6/11*sigma;
		case 4
		rho=x.^4-48/25.*x.^3+36/25.*x.^2-16/25.*x+3/25;
		sigma=12/25*sigma;
		end
		y=1.0*rho./sigma;
	end
	
%This is a file named RASs.m, which generates the results.
	clear;
	clc
	w=0:0.01:2*pi;
	j=sqrt(-1);

	figure(1);
	for p=1:4
	H(p,:)=rootlocus_AB(exp(j*w),p);
	plot(H(p,:));
	hold on;
	end
	xlabel('Real Axis'),ylabel('Imaginary Axis'),title('rootlocus_{ABF}'),grid on
	saveas(1,'root-locus-ABF.png');

	figure(2);
	for p=3:5
	H(p-2,:)=rootlocus_AM(exp(j*w),p);
	plot(H(p-2,:));
	hold on;
	end
	xlabel('Real Axis'),ylabel('Imaginary Axis'),title('rootlocus_{AMF}'),grid on
	saveas(2,'root-locus-AMF.png');

	figure(3);
	for p=1:4
	H(p,:)=rootlocus_BD(exp(j*w),p);
	plot(H(p,:));
	hold on;
	end
	xlabel('Real Axis'),ylabel('Imaginary Axis'),title('rootlocus_{BDF}'),grid on
	saveas(3,'root-locus-BDF.png');
\end{lstlisting}
The output are shown below,
\begin{figure*}[!htbp]
	\begin{center}
		\includegraphics[width=0.5\textwidth]{root-locus-ABF.png}
	\end{center}
\end{figure*}
\begin{figure*}[!htbp]
	\begin{center}
		\includegraphics[width=0.5\textwidth]{root-locus-AMF.png}
	\end{center}
\end{figure*}
\begin{figure*}[!htbp]
	\begin{center}
		\includegraphics[width=0.5\textwidth]{root-locus-BDF.png}
	\end{center}
\end{figure*}
\end{proof}

\section{7.159 Judge the mathematical meaning of the segment length.}
\begin{proof}[Proof]
It's almost right, which means I agree that the length of the thick red
 line segment represents the absolute value of one-step error. 

Because when we calculate the one-step error $\mathcal{L} =u(t_{n+1})-U^{n+1}$, there's the assumption that $U^n$ is 
exact. So the curve $u(t)-u(t_n)+U^n$is indeed $u(t)$ under the assumption, which implies the lower point of the 
thick red line segment is $u(t_{n+1})$. Also, since $l_3$ is parallel to $l_2$ with 
$y_2=f(U^n+k/2y_1,t_n+k/2)$ and $l_3$ passes $(U^n,t_n)$, the upper point of the segment is $U^n+ky_2$, which 
is indeed $U^{n+1}$.

Thus, the length of the segment represents the absolute value of the one-step error $u(t_{n+1})-U^{n+1}$.

\end{proof}

\section{7.169 Write down the Butcher tableaux of modified Euler, improved Euler, Heun's 3rd method.}
\begin{proof}[Proof]
	We have
	\begin{table}[!htbp]
		\centering  % 显示位置为中间
		\caption{Modified Euler Butcher tableau}  % 表格标题
		\label{table1}  % 用于索引表格的标签
		%字母的个数对应列数，|代表分割线
		% l代表左对齐，c代表居中，r代表右对齐
		\begin{tabular}{c|c c}  
			& &  \\[-6pt]  %可以避免文字偏上来调整文字与上边界的距离
			0&0&  \\  % 表格中的内容，用&分开，\\表示下一行
			& &  \\[-6pt] 
			$\frac{1}{2}$ &	$\frac{1}{2}$ &0\\
			& &  \\[-6pt] 
			\hline
			& &  \\[-6pt] 
			&0&1 \\
		\end{tabular}

		\centering  
		\caption{Improved Euler Butcher tableau} 
		\label{table2} 
		\begin{tabular}{c|c c}  
			& &  \\[-6pt] 
			0&0&  \\  
			& &  \\[-6pt] 
			1 &	 1&0\\
			& &  \\[-6pt] 
			\hline
			& &  \\[-6pt] 
			&$\frac{1}{2}$ & $\frac{1}{2}$ \\
		\end{tabular}

		\centering  
		\caption{Heun's 3rd Butcher tableau}  
		\label{table3} 
		\begin{tabular}{c|c c c}  
			& & & \\[-6pt]  
			0&0& & \\ 
			& & & \\[-6pt]  
			$\frac{1}{3}$&$\frac{1}{3}$ &0& \\ 
			& & & \\[-6pt]  
			$\frac{2}{3}$&0&$\frac{2}{3}$&0\\		
			& & & \\[-6pt]  
			\hline
			& & & \\[-6pt]  
			&$\frac{1}{4}$&0&$\frac{3}{4}$
		\end{tabular}
	\end{table}
	
\end{proof}

\section{7.171 Rewrite TR-BDF2 in standard form and derive Butcher tableaux and 
geometric interpretation.}
\begin{proof}[Proof]
We have
\begin{align*}
	\left\{
	\begin{array}{l}
		y_1=f(U^n,t_n),\\
		\\[-6pt]
	 	y_2=f(U^n+k(\frac{1}{4}y_1+\frac{1}{4}y_2),t_n+\frac{1}{2}k),\\
		 \\[-6pt]
		y_3=f(U^n+k(\frac{1}{3}y_1+\frac{1}{3}y_2+\frac{1}{3}y_3),t_n+k),\\
		\\[-6pt]
		U^{n+1}=U^n+k(\frac{1}{3}y_1+\frac{1}{3}y_2+\frac{1}{3}y_3).
	\end{array} 
	\right.
\end{align*}
\begin{table}[!htbp]
	\centering  
	\caption{TR-BDF2 Butcher tableau}  
	\label{table4} 
	\begin{tabular}{c|c c c}  
		& & & \\[-6pt]  
		0&0& & \\ 
		& & & \\[-6pt]  
		$\frac{1}{2}$&$\frac{1}{4}$ &$\frac{1}{4}$& \\ 
		& & & \\[-6pt]  
		$1$ & $\frac{1}{3}$ & $\frac{1}{3}$ & $\frac{1}{3}$ \\		
		& & & \\[-6pt]  
		\hline
		& & & \\[-6pt]  
		&$\frac{1}{3}$&$\frac{1}{3}$&$\frac{1}{3}$
	\end{tabular}
\end{table}
\begin{figure*}[!htbp]
	\begin{center}
		\includegraphics[width=0.9\textwidth]{geogebra-export.png}
	\end{center}
\end{figure*}
\end{proof}
\newpage

\section{7.176 Derive $O(k^3)$ term of modified Euler method to verify that it does not vanish.}
\begin{proof}[Proof]
We have
\begin{align*}
	u(t_{n+1})-u(t_n)=ku'(t_n+\frac{k}{2})+\frac{k^3}{24}u'''(t_n+\frac{k}{2})+O(k^5),\\
	u(t_n)+\frac{k}{2}u'(t_n)-u(t_n+\frac{k}{2})=-\frac{k^2}{8}u''(t_n)+O(k^3).
\end{align*}
Thus,
\begin{align*}
	\mathcal{L}u(t_n)&=u(t_{n+1})-u(t_n)-kf(u(t_n)+\frac{1}{2}ku^{'}(t_n),t_n+\frac{k}{2})\\
	&=ku'(t_n+\frac{k}{2})+\frac{k^3}{24}u'''(t_n+\frac{1}{2})+O(k^5)\\
	&-k[f(u(t_n+\frac{k}{2}),t_n+\frac{k}{2})
	+(u(t_n)+\frac{k}{2}u'(t_n)-u(t_n+\frac{k}{2}))f_1(u(t_n+\frac{k}{2}),t_n+\frac{k}{2})
	+O(k^4)]\\
	&=ku'(t_n+\frac{k}{2})+\frac{k^3}{24}u'''(t_n+\frac{1}{2})+O(k^5)\\
	&-k\Big(f(u(t_n+\frac{k}{2}),t_n+\frac{k}{2})
	+\big(-\frac{k^2}{8}u''(t_n)+O(k^3)\big)f_1(u(t_n+\frac{k}{2}),t_n+\frac{k}{2})
	+O(k^4)\Big)\\ 
	&=\frac{k^3}{24}u'''(t_n+\frac{k}{2})+\frac{k^3}{8}u''(t_n)\cdot f_1\big(u(t_n+\frac{k}{2}),t_n+\frac{k}{2}\big)+O(k^4)\\ 
	&=k^3\Big(\frac{1}{24}u'''(t_n)+\frac{1}{8}u''(t_n)f_1\big(u(t_n),t_n\big)\Big)+O(k^4),
\end{align*}
which shows $O(k^3)$ does not vanish.

\end{proof}

\section{7.184 Prove $\mathcal{L}(u(t_n))=O(k^5)$ for classical RK method.}
\begin{proof}[Proof]
Define $F_{n+i}=f_1(u(t_{n+i}),t_{n+i})$, $t_{n+i}=t_n+ik$. By Taylor expansion, we have
\begin{align*}
	y_1&=u'(t_n)\\ 
	\\[-6pt]
	y_2&=u'(t_{n+0.5})+F_{n+0.5}\cdot\big(u(t_n)+\frac{k}{2}u'(t_n)
	-u(t_{n+0.5})\big)+O(k^4)\\
	&=u'(t_{n+0.5})+F_{n+0.5}\cdot\big(-\frac{k^2}{8}u''(t_n)
	-\frac{k^3}{48}u'''(t_n)\big)+O(k^4)\\
	\\[-6pt]
	y_3&=u'(t_{n+0.5})+F_{n+0.5}\cdot\big(u(t_n)+\frac{k}{2}y_2
	-u(t_{n+0.5})\big)+O(k^4)\\
	&=u'(t_{n+0.5})+F_{n+0.5}\cdot\big(\frac{k^2}{8}u''(t_{n+0.5})
	-\frac{k^3}{48}u'''(t_{n+0.5})-F_{n+0.5}\cdot\frac{k^3}{16}u''(t_{n+0.5})\big)
	+O(k^4)\\
	\\[-6pt]
	y_4&=u'(t_{n+1})+F_{n+1}\cdot\big(u(t_n)+ky_3
	-u(t_{n+1})\big)+O(k^4)\\
	&=u'(t_{n+1})+F_{n+1}\cdot\big(-\frac{k^3}{24}u''(t_{n+0.5})
	+F_{n+0.5}\cdot\frac{k^3}{8}u'''(t_{n+0.5})\big)
	+O(k^4)
\end{align*}
Thus, 
\begin{align*}
	&\quad y_1+2y_2+2y_3+y_4\\
	&= u'(t_n)+4u'(t_{n+0.5})+u'(t_{n+1})\\
	&\quad+ 2F_{n+0.5}\cdot\big(-\frac{k^2}{8}u''(t_n)
	-\frac{k^3}{48}u'''(t_n)
	+\frac{k^2}{8}u''(t_{n+0.5})
	-\frac{k^3}{48}u'''(t_{n+0.5})-F_{n+0.5}\cdot\frac{k^3}{16}u''(t_{n+0.5})
	\big)\\
	&\quad+ F_{n+1}\cdot\big(-\frac{k^3}{24}u''(t_{n+0.5})
	+F_{n+0.5}\cdot\frac{k^3}{8}u'''(t_{n+0.5})\big)+O(k^4)\\
	&= u'(t_n)+4u'(t_{n+0.5})+u'(t_{n+1})
	+ 2F_{n+0.5}\cdot\big(\frac{k^2}{8}\frac{k}{2}u'''(t_n)
	-\frac{k^3}{24}u'''(t_n)
	-F_{n+0.5}\cdot\frac{k^3}{16}u''(t_{n+0.5})
	\big)\\
	&\quad+ F_{n+1}\cdot\big(-\frac{k^3}{24}u''(t_{n+0.5})
	+F_{n+0.5}\cdot\frac{k^3}{8}u'''(t_{n+0.5})\big)+O(k^4)\\ 
	&= u'(t_n)+4u'(t_{n+0.5})+u'(t_{n+1})+ F_{n+0.5}\cdot\big(\frac{k^3}{24}u'''(t_n)
	-F_{n+0.5}\cdot\frac{k^3}{8}u''(t_{n+0.5})
	\big)\\
	&\quad+ F_{n+1}\cdot\big(-\frac{k^3}{24}u''(t_{n+0.5})
	+F_{n+0.5}\cdot\frac{k^3}{8}u'''(t_{n+0.5})\big)+O(k^4).
\end{align*}
Since 
\[F_{n+1}=F_{n+0.5}+\big(u(t_{n+1})-u(t_{n+0.5})\big)\cdot f_{11}(u(t_{n+0.5}),t_{n+0.5})
+\frac{k}{2}f_{12}(u(t_{n+0.5}),t_{n+0.5})+O(k)=F_{n+0.5}+O(k),\]
the above equation can be reduced to 
\begin{align}
	&\quad y_1+2y_2+2y_3+y_4\notag\\
	&=u'(t_n)+4u'(t_{n+0.5})+u'(t_{n+1})\notag\\
	&\quad+F_{n+0.5}\cdot\big(\frac{k^3}{24}u'''(t_n)
	-F_{n+0.5}\cdot\frac{k^3}{8}u''(t_{n+0.5})\big)
	+\big(F_{n+0.5}+O(k)\big)\cdot\big(-\frac{k^3}{24}u'''(t_n)
	+F_{n+0.5}\cdot\frac{k^3}{8}u''(t_{n+0.5})\big)+O(k^4)\notag\\
	&=u'(t_n)+4u'(t_{n+0.5})+u'(t_{n+1})+O(k^4).\tag{7.182.1}
\end{align}
Also, since 
\begin{align}
	u(t_{n+1})-u(t_n)=ku'(t_{n+0.5})+\frac{k^3}{24}u'''(t_{n+0.5})+O(k^5),\tag{7.182.2}\\
	u'(t_n)+u'(t_{n+1})=2u'(t_{n+0.5})+\frac{k^2}{4}u'''(t_{n+0.5})+O(k^4),\tag{7.182.3}
\end{align}
combine it with (7.182.1) and we have 
\begin{align*}
	&\quad \mathcal{L}(u(t_n))\\ 
	&=u(t_{n+1})-u(t_n)-\frac{k}{6}(y_1+2y_2+2y_3+y_4)\\
	[by\quad(7.182.1)]&=u(t_{n+1})-u(t_n)-\frac{k}{6}\big(u'(t_n)+4u'(t_{n+0.5})+u'(t_{n+1})\big)+O(k^5)\\
	[by\quad(7.182.2),(7.182.3)]&=ku'(t_{n+0.5})+\frac{k^3}{24}u'''(t_{n+0.5})-\frac{k}{6}\big(6u'(t_{n+0.5})+\frac{k^2}{4}u'''(t_{n+0.5})\big)+O(k^5)\\
	&=O(k^5).
\end{align*}
\end{proof}


\section{7.189 Show R(z) of the TR-BDF2 method satisfies $R(z)-e^z=O(z^3)$ as $z \to 0$ .}
\begin{proof}[Proof]
\begin{align*}
U^*=U^n+\frac{z}{4}(U^n+U^*)\\ 
\Rightarrow U^*=\frac{4+z}{4-z}U^n.\\ 
\\[-6pt]
U^{n+1}=U^n+\frac{z}{3}(U^n+U^*)\\ 
\Rightarrow U^{n+1}=\frac{5z+12}{(3-z)(4-z)}U^n.
\end{align*}
Hence, TR-BDF2 has 
\[R(z)=\frac{\frac{5}{12}z+1}{1-\frac{7}{12}z+\frac{1}{12}z^2}.\]
Also, by Taylor expansion at z=0,
\[e^z(z^2-7z+12)=12+5z+0z^2-3z^3+O(z^4),\]
and we have 
\[\frac{5z+12}{(3-z)(4-z)}-e^z=\frac{5z+12-e^z(z^2-7z+12)}{(3-z)(4-z)}=\frac{3z^3+O(z^4)}{(3-z)(4-z)}=\Theta (z^3).\]
\end{proof}

\section{7.205 Reproduce and explain the results about a stiff problem.}
\begin{proof}[Proof]
The results are reproduced by MATLAB R2019b.
\begin{lstlisting}
%This is a file named fstif.m .
function f = fstif(u,t)
f = -1e6 * (u - cos(t)) - sin(t);
end


%This is a file named sol_acu.m .
function u = sol_acu(t,a)
u = exp((-1e6)*t)*(a-1)+cos(t);
end


%This is a file named backwardeuler.m .
function [y,u]= backwardeuler(T,a,k)
acu = sol_acu(T,a);
t=0;
u=a;
temp1=a;
while (t<T)
t = t + k;
flag = 0;
diff = 1000;
while (flag<10)&&(abs(diff)>1e-10)
temp0 = temp1;
diff = (u + k*fstif(temp0,t) - temp0)/(1+k*1e6);
temp1 = temp0 + diff;
end
u = temp1;
end
y=abs(u-acu);
end


%This is a file named trapezoidal.m .
function [y,u]= trapezoidal(T,a,k)
acu = sol_acu(T,a);
t=0;
u=a;
temp1=a;
while (t<T)
t = t + k;
flag = 0;
diff = 1000;
while (flag<10)&&(abs(diff)>1e-10)
temp0 = temp1;
diff = (u + (k/2.0)*(fstif(temp0,t) + fstif(u,t-k))- temp0)/(1+(k*1e6/2.0));
temp1 = temp0 + diff;
flag = flag+1;
end
u = temp1;
end
y=abs(u-acu);
end


%This is a file named pro7_205.m .
clear;
T = 3.0;

eta = 1.0;
result(2,1)=backwardeuler(T,eta,0.2);
result(2,2)=trapezoidal(T,eta,0.2);
result(3,1)=backwardeuler(T,eta,0.1);
result(3,2)=trapezoidal(T,eta,0.1);
result(1,1)=backwardeuler(T,eta,0.05);
result(1,2)=trapezoidal(T,eta,0.05);

eta = 1.5;
result(5,1)=backwardeuler(T,eta,0.2);
result(5,2)=trapezoidal(T,eta,0.2);
result(6,1)=backwardeuler(T,eta,0.1);
result(6,2)=trapezoidal(T,eta,0.1);
result(4,1)=backwardeuler(T,eta,0.05);
result(4,2)=trapezoidal(T,eta,0.05);

fprintf("%8s %6s %20s %20s \n",'','k','Backward Euler','Trapezoidal');
fprintf("%8s %6s %20e %20e \n",'eta=1','0.2',result(2,1),result(2,2));
fprintf("%8s %6s %20e %20e \n",'','0.1',result(3,1),result(3,2));
fprintf("%8s %6s %20e %20e \n",'','0.05',result(1,1),result(1,2));
fprintf("%8s %6s %20e %20e \n",'eta=1.5','0.2',result(5,1),result(5,2));
fprintf("%8s %6s %20e %20e \n",'','0.1',result(6,1),result(6,2));
fprintf("%8s %6s %20e %20e \n",'','0.05',result(4,1),result(4,2));


%%This is the input and the output of Terminal.
>> pro7_205
              k       Backward Euler          Trapezoidal 
   eta=1    0.2         9.773074e-08         4.722892e-10 
            0.1         4.922330e-08  \begin{align*}

\end{align*}       1.177194e-10 
           0.05         2.468586e-08         2.940836e-11 
 eta=1.5    0.2         9.773074e-08         4.998500e-01 
            0.1         4.922330e-08         4.994004e-01 
           0.05         2.468586e-08         4.976058e-01  
\end{lstlisting}

Why the results show that a 1st-order method can be superior to a 2rd-order method? This can be attributed to 
the following reasons. First, only when $k$ is small enough to make $z=k \lambda \to 0$ can a method with high accuracy
 ensures a small solution error. As for a stiff problem with a huge $\lambda $ like this, our k is not small enough. Thus, 
 large accuracy won't ensure a small solution error. Also, since $\lambda$ is huge enough to make $z=k\lambda$ a huge number, 
we can consider the limitation $lim_{z \to \infty}|U^{n+1}/U^n|=lim_{z \to \infty}|R(z)|$. The trapezoidal |R(z)| inclines to 1, 
which means the error produced in every step cannot be damped. The backward Euler R(z), however, inclines to 0 and the error can be 
more effectively damped in a single time step. Hence, backward Euler method can be superior to trapezoidal method when we deal with a stiff 
problem where k is not small enough.

\end{proof}



\end{document}

%%% Local Variables: 
%%% mode: latex
%%% TeX-master: t
%%% End: 
