\documentclass[openany]{book}
\usepackage{geometry}
\geometry{margin=1.5cm, vmargin={0pt,1cm}}
\setlength{\topmargin}{-1cm}
\setlength{\paperheight}{29.7cm}
\setlength{\textheight}{25.3cm}

% useful packages.
\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{enumerate}
\usepackage{graphicx}
\usepackage{multicol}
\usepackage{fancyhdr}
\usepackage{layout}
% \usepackage{ctex}
\usepackage{listings}
\usepackage{subfigure}
\usepackage{setspace}

% some common command
\newcommand{\dif}{\mathrm{d}}
\newcommand{\avg}[1]{\left\langle #1 \right\rangle}
\newcommand{\difFrac}[2]{\frac{\dif #1}{\dif #2}}
\newcommand{\pdfFrac}[2]{\frac{\partial #1}{\partial #2}}
\newcommand{\OFL}{\mathrm{OFL}}
\newcommand{\UFL}{\mathrm{UFL}}
\newcommand{\fl}{\mathrm{fl}}
\newcommand{\op}{\odot}
\newcommand{\Eabs}{E_{\mathrm{abs}}}
\newcommand{\Erel}{E_{\mathrm{rel}}}

\usepackage{xcolor}
\usepackage{fontspec} 
\definecolor{dkgreen}{rgb}{0,0.6,0}
\definecolor{gray}{rgb}{0.5,0.5,0.5}
\definecolor{comment}{rgb}{0.56,0.64,0.68}

\newfontfamily\monaco{Monaco}
\lstset {
aboveskip=3mm,
belowskip=3mm,
showstringspaces=false,       % underline spaces within strings
columns=flexible,
framerule=1pt,
rulecolor=\color{gray!35},
backgroundcolor=\color{gray!5},
basicstyle={\small\monaco},           % the size of the fonts that are used for the code
numbers=left,                   % where to put the line-numbers
numberstyle=\tiny\monaco\color{gray},  % the style that is used for the line-numbers
numbersep=5pt,                  % how far the line-numbers are from the code
commentstyle=\color{comment},
keywordstyle=\color{blue},
stringstyle=\color{dkgreen},
tabsize=2,                      % sets default tabsize to 2 spaces
captionpos=b,                   % sets the caption-position to bottom
breaklines=true,                % sets automatic line breaking
breakatwhitespace=false,        % sets if automatic breaks should only happen at whitespace
escapeinside={\%*}{*)},            % if add LaTeX within your code
morekeywords={*,...}               % if add more keywords to the set
}


\begin{document}
\title{Homework \#1}
\pagestyle{fancy}
\lhead{Name Li HuiTeng 3180102114}
\chead{ NumAnalysis\#1}
\rhead{Date 21.10.01}

\begin{spacing}{2.5}
\tableofcontents	
\end{spacing}

\chapter{Theoretical Questions}
\section{Width in bisection.}
\begin{proof}
	Since the initial width is 2, the width of n-interval is $\frac{1}{2^{n-1}}$.
	Also in Algorithm 1.9 we always take the left part as "the interval". Hence the maximum possible distance is $\frac{3}{2^n}$, a sum of half a left part and a whole right part.
\end{proof}


\section{Necessary steps of bisection $(a_0>0)$.}
\begin{proof}
	Since $b_0>a_0>0$, we have $\alpha>0$ and 
\begin{align*}
	\frac{|\alpha-c_n|}{\alpha}\leq \frac{|\alpha-c_n|}{a_0} \leq \frac{2^{-(n+1)(b_0-a_0)}}{a_0},
\end{align*}
where the second step follows from Theorem 1.13. Let $n \in N^+$ denote the number of steps such that relative error is no greater than $\epsilon$. 
Hence we have
\begin{align*}
\frac{2^{-(n+1)(b_0-a_0)}}{a_0} & \leq \epsilon ,\\
2^{n+1} & \geq \frac{(b_0-a_0)}{a_0\epsilon},\\
n & \geq\frac{log(b_0-a_0)-log\epsilon-loga_0}{log2}-1,
\end{align*}
which completes the proof.
\end{proof}

\section{Single precision in bisection.}
\begin{proof}
	The answer is No. This is because single precision only permits 23 bits of mantissa. To archive an absolute accuaracy no greater 
	than $10^{-6}$, the fractional part of our guesstimate demands at least 18 valid bits, since $2^{-18}=8^{-6}>10^{-6}$. However, 
	the integer part also demands 8 bits since $128=(10000000)_2$. As a consequence, the valid bits of mantissa should no less than $18+8-1=25$, which 
	apparently fails a single precision machine. (-1 is because we always leak out the first '1' in a mantissa string.)
\end{proof}

\section{Four iterations of Newton's method.}
\begin{proof}
	Since $p(x)=4x^3-2x^2+3$, $p'(x)=12x^2-4x$, and $x_{n+1}=x_n-\frac{p(x_n)}{p'(x_n)}$, we have
	\begin{align*}
		x_0&=-1\\
		x_1&=-0.812500000000000\\
		x_2&=  -0.770804195804196\\
		x_3&=  -0.768832384255760\\
		x_4&=  -0.768828085849211
	\end{align*}
\end{proof}

\section{Variation of Newton's method.}
\begin{proof}
	Since $x_{n+1}=x_n-\frac{f(x_n)}{f'(x_0)}$, we have 
	\begin{align*}
		e_{n+1}=x_{n+1}-\alpha&=x_n-\alpha-\frac{f(x_n)}{f'(x_0)}\\
		&=e_n-\frac{f(\alpha)+e_nf'(\xi_n)}{f'(x_0)}\\
		&=e_n(1-\frac{f'(\xi_n)}{f'(x_0)}),
	\end{align*}
	where $\xi_n$ is between $\alpha$ and $x_n$.
\end{proof}

\section{Convergence of $x_{n+1}=tan^{-1}x_n$.}
\begin{proof}
	It will converge for all $x_0 \in I=(-\frac{\pi}{2},\frac{\pi}{2})$.

	Suppose $x_0=0$, then we have $x_n=0$, $\forall n \in N^+$.

	Suppose $x_0>0$. Since $f(x)=tan^{-1}x$ is monotonically increasing on $I$, we have $x_1=tan^{-1}x_0\geq tan^{-1}(0)=0$. 
	Then by induction, $x_n=tan^{-1}x_{n-1}\geq tan^{-1}0=0$, $\forall n \in N^+$. Also, $x_{n+1}=tan^{-1}x_n$ implies $\{x_n\}$ is a monotonically decreasing sequence 
	since $\forall x \in [0,\frac{\pi}{2})$, $x \geq tan^{-1}x$. Then by monotonically sequence 
	theorem, $\{x_n\}$ converges when $x_0>0$.

	The proof of $x_0<0$ makes no difference to $x_0>0$ 's.
\end{proof}


\section{Prove the convergence of $x=\frac{\displaystyle 1}{p+{\displaystyle\frac1{p+{\displaystyle\cdots}}}}$.}
\begin{proof}
	$x_1=\frac{1}{p}$, $x_{n+1}=\frac{1}{p+x_n},\forall n \in N^+$.\\
	Then define $p(x)=\frac{1}{p+x}$, $x \in [0,1]$. First, $p(x)$ is a contraction on $[0,1]$:
	\begin{align*}
	\forall x_1,x_2 \in [0,1], \text{ } |\frac{1}{p+x_1}-\frac{1}{p+x_2}|=|x_2-x_1||\frac{1}{(p+x_1)(p+x_2)}|\leq \frac{1}{p^2}|x_2-x_1|<|x_2-x_1| \text{ (p>1)},\\
	p([0,1])=[\frac{1}{p+1},\frac{1}{p}] \subseteq [0,1].	
	\end{align*}
	Since $x_1 \in [0,1]$, by Convergence of Contractions Theorem we have $\{x_n\}$ converges to $\alpha$, where 
	$\alpha$ is the fixed point of $ p(x)=x $ when $x\in [0,1]$. Then solving $x=\frac{1}{p+x}(x\in[0,1])$ yields 
	$x=\lim_{n \to \infty}x_n=\alpha=\frac{\sqrt{p^2+4}-p}{2}$.

\end{proof}

\section{Necessary steps of bisection $(a_0<0)$.}
\begin{proof}
	The relative error is not a good choice in this case. Just consider $\alpha=0$. For any two algorithms, their relative 
	errors will be both too big to compare, making it no sense to judge which algorithm is a better one. Thus the absolute 
	error should be taken into account. Denote the expected absolute error as $E_A$. Since
	\begin{align*}
		|\alpha-c_n| &\leq 2^{-(n+1)(b_0-a_0)},
	\end{align*}
	and similarly we have
	\begin{align*}
	2^{-(n+1)(b_0-a_0)} & \leq E_A ,\\
	2^{n+1} & \geq \frac{(b_0-a_0)}{E_A},\\
	n & \geq \frac{log(b_0-a_0)-logE_A}{log2}-1.
	\end{align*}

\end{proof}

\section{Multiple zeros in Newton's method.}
\begin{proof}
	1. By $L'Hospital$ Lemma, we have

	$\alpha$ is a k-multiple zero of $f$ $\Leftrightarrow$\\
	$\exists k >1$, s.t. $\forall \{x_n\} \to \alpha$, $lim_{n\to \infty}\frac{|f(x_n)-f(\alpha)|}{|(x_n-\alpha)^k|}=c>0.$

	Then for $\alpha$, a zero of $f$, we can apply a sequence $\{x_n\}$ converging to it to check if $\{\frac{|f(x_n)-f(\alpha)|}{|(x_n-\alpha)^k|}\}$ 
	is bounded for a given integer k ($k>1$).

	2. (Denote $e_n=x_n-r$.)

	Since $r$ is a zero of multiplicity k of $f$, applying Taylor expansion to $f(x_n),f'(x_n)$ at $x=r$ separately yields 
	\begin{align}
		f(x_n)&=\sum_{i=0}^{k}(x_n-r)^i\frac{f^{(i)}(r)}{i!}+(x_n-r)^{k+1}\frac{f^{(k+1)}(\xi_n)}{(k+1)!}=e_n^k\frac{f^{(k)}(r)}{k!}+e_n^{k+1}\frac{f^{(k+1)}(\xi_n)}{(k+1)!},\\
		f'(x_n)&=\sum_{i=0}^{k-1}(x_n-r)^i\frac{f^{(i+1)}(r)}{i!}+(x_n-r)^{k}\frac{f^{(k+1)}(\eta_n)}{k!}=e_n^{k-1}\frac{f^{(k)}(r)}{(k-1)!}+e_n^{k}\frac{f^{(k+1)}(\eta_n)}{k!},\\
		f'(x_n)&=\sum_{i=0}^{k-2}(x_n-r)^i\frac{f^{(i+1)}(r)}{i!}+(x_n-r)^{k-1}\frac{f^{(k)}(\zeta_n)}{(k-1)!}=e_n^{k-1}\frac{f^{(k)}(\zeta_n)}{(k-1)!},
	\end{align}
	where $\xi_n,\eta_n,\zeta_n$ are between $x_n$ and $r$.
	Then for the modified Newton's method, we have 
	\begin{align*}
		(*):\quad e_{n+1}&=e_n-k\frac{f(x_n)}{f'(x_n)}\\
			&=e_n-k\frac{\displaystyle e_n^k\frac{f^{(k)}(r)}{k!}+e_{n}^{k+1}\frac{\displaystyle f^{(k+1)}(\xi_n)}{\displaystyle(k+1)!}}{e_n^{k-1}\frac{\displaystyle f^{(k)}(r)}{\displaystyle(k-1)!}+e_{n}^{k}\frac{\displaystyle f^{(k+1)}(\eta _n)}{\displaystyle k!}}\\
			&=e_n^2 \left[\frac{{\displaystyle\frac{f^{(k+1)}(\eta_n)}k}-{\displaystyle\frac{f^{(k+1)}(\xi_n)}{k+1}}}{f^{(k)}(r)+e_n{\displaystyle\frac{f^{(k+1)}(\eta_n)}k}}\right]\\
			&=e_n^2 \left[\frac{{\displaystyle\frac{f^{(k+1)}(\eta_n)}k}-{\displaystyle\frac{f^{(k+1)}(\xi_n)}{k+1}}}{f^{(k)}(\zeta_n)}\right],
		\end{align*}
	where the second step follows from Taylor expansion (1) and (2), the fourth step from a combination of Taylor expansion (2) and (3). 
	
	Then the assumption $f^{(k)}(r)\neq0$ and the continuity of $f^{(k)}$ yield 
	\begin{align*}
		\exists \delta_1, \text{ s.t. }  \forall x \in B_1,f^{(k)}(x)\neq 0
	\end{align*}
	where $B_1=[r-\delta_1,r+\delta_1]$. Define 
	\begin{align*}
		M=\frac{\displaystyle(2k+1)\max_{x \in B_1}|f^{(k+1)}(x)|}{\displaystyle k(k+1)\min_{x \in B_1}|f^{(k)}(x)|},
	\end{align*}
	and pick $x_0$ sufficiently close to $r$ such that $\delta_0=|x_0-r|<\delta_1$ and $M\delta_0<1$. 
	
	The definition of $M$ and $(*)$ imply 
	\begin{align*}
		|x_{n+1}-r|\leq M |x_n-r|^2.
	\end{align*}
	The rest is the same as the proof of Theorem 1.15. Then $\{x_n\}$ converges quadratically to $r$.
\end{proof}
\chapter{Programming Assignments}
Use $ \textbf{make all} $ to generate and store all answers into $ \textbf{Assignment-x.txt} $ documents! ($x=1,2,\cdots,5$)\\ 
Codes are contained in the e-mail tar, and only conclusions will be shown in the following part.
\section{Assignment B}
The results are shown below, generating from $ \textbf{make B} $.
\lstset{language=Matlab}
\begin{lstlisting}
	B.1
	By Bisection Method, the solution is 0.860334, in 26 steps, with tolerance of 1e-08.
	B.2
	By Bisection Method, the solution is 0.641186, in 25 steps, with tolerance of 1e-08.
	B.3
	By Bisection Method, the solution is 1.82938, in 27 steps, with tolerance of 1e-08.
	B.4
	Warning: The given tolerance 1e-08 cannot be reached, because the width of interval is less than 1e-10 in 35 steps.
	The ouput is 0.117877, with f(x0) = 4.1527e+09.
\end{lstlisting}

\section{Assignment C}
The results are shown below, generating from $ \textbf{make C} $.
\lstset{language=Matlab}
\begin{lstlisting}
	C.
	By Newton's Method, the solution is 4.49341, in 3 steps, with tolerance of 1e-08.
	By Newton's Method, the solution is 7.72525, in 4 steps, with tolerance of 1e-08.
\end{lstlisting}

\section{Assignment D}
The results are shown below, generating from $ \textbf{make D} $.
\lstset{language=Matlab}
\begin{lstlisting}
	D.1
	Take initial values as 0, 1.5708:
	By Secant Method, the solution is 3.14118, in 17 steps, with tolerance of 1e-08.
	Take initial values as -7.85398, -6.28319:
	By Secant Method, the solution is -9.42437, in 17 steps, with tolerance of 1e-08.
	D.2
	Take initial values as 1, 1.4:
	By Secant Method, the solution is 1.30633, in 13 steps, with tolerance of 1e-08.
	Take initial values as -3.5, -1:
	By Secant Method, the solution is -3.09641, in 5 steps, with tolerance of 1e-08.
	D.3
	Take initial values as 0, -1.5:
	By Secant Method, the solution is -0.188685, in 6 steps, with tolerance of 1e-08.
	Take initial values as 10, 20:
	By Secant Method, the solution is 11.7371, in 6 steps, with tolerance of 1e-08.
\end{lstlisting}
The reason why results differ as initial values differ is trivial. When $f$ doesn't have a unique zero, the iteration sequence 
will tend to converge to a zero that is closer to its initial values. 

\section{Assignment E}
The results are shown below, generating from $ \textbf{make E} $.
\lstset{language=Matlab}
In this case, we should realize that tolerance is not absolute error, i.e. residual is not solution error. 

To ensure absolute error to within $1e-2$, 
we can use class function $ \textbf{set-min-width} $ to make the width between $x_n$ and $x_{n-1}$ less than $1e-2$. 

Of course, when tolerance is set to be sufficiently less than $1e-2$, the solution can also meet up with expectation.
\begin{lstlisting}
	E.
	By Bisection Method, the solution is 0.164062, in 8 steps, with tolerance of 0.005.
	The depth is 0.835938 .
	By Newton's Method, the solution is 0.166166, in 2 steps, with tolerance of 0.005.
	The depth is 0.833834 .
	By Secant Method, the solution is 0.166638, in 2 steps, with tolerance of 0.005.
	The depth is 0.833836 .
\end{lstlisting}

\section{Assignment F}
The results are shown below, generating from $ \textbf{make F} $.
\lstset{language=Matlab}
\begin{lstlisting}
	F.(a)
	By Newton's Method, the solution is 0.575473, in 2 steps, with tolerance of 1e-06.
	The angle is 32.9722 degree.
	F.(b)
	By Newton's Method, the solution is 0.578907, in 2 steps, with tolerance of 1e-06.
	The angle is 33.1689 degree.
	F.(c)
	First, a guess close to 33 degree, for example take another as 120 degree:
	By Secant Method, the solution is 0.578907, in 3 steps, with tolerance of 1e-06.
	The angle is 33.1689 degree.
	Next, a guess far from 33 degree, for example take another as 348.4 degree:
	By Secant Method, the solution is 6.08248, in 3 steps, with tolerance of 1e-06.
	The angle is 348.5 degree.
\end{lstlisting}
For (c), as we can see, $-\beta_1+2k\pi$, $k\in Z$ is always a solution for the equation. 
Hence if we define another initial value sufficiently near $-\beta_1+360=348.5$ degree, the sequence will 
converge to $348.5$ degree, i.e. $6.08248$ rad.    
\end{document}

%%% Local Variables: 
%%% mode: latex
%%% TeX-master: t
%%% End: 
