\documentclass[a4paper]{article}
\usepackage[affil-it]{authblk}
\usepackage{indentfirst}
\usepackage{amsmath}
\usepackage{geometry}
\usepackage{xeCJK}
\geometry{margin=1.5cm, vmargin={0pt,1cm}}
\setlength{\topmargin}{-1cm}
\setlength{\paperheight}{29.7cm}
\setlength{\textheight}{25.3cm}

\begin{document}
% =================================================
\title{Numerical Analysis homework \# 1}

\author{周川迪 Zhou Chuandi 3220101409}
\affil{强基数学2201}

\date{\today}

\maketitle

\begin{abstract}
  1.8.1 Theoretical questions

  Please complete question 1.8.1 I - VIII (VIII is optional),  
  Theoretical Questions, on page 7 of the lecture notes 
  in English. 

\end{abstract}
\section*{I. Bisection Method on $[1.5,3.5]$}
\subsection*{I-a}
Let the interval at the $0$th step be $[1.5,3.5]$, its width is 2.

Thus, the width of the interval at the $n$th step is $\frac{2}{2^n}=2^{1-n}$.

\subsection*{I-b}
The supremum is half the width of the interval, which is $\frac{1}{2^n}=2^{-n}$.

(For the initial interval, it is $1$.)

\section*{II. Proof that $n \geq \frac{\log(b_0 - a_0) - \log \epsilon - \log a_0}{\log 2} - 1$, where $\epsilon$ is the relative error.}
\subsection*{II-a}
At the $n$th step, the width of the interval is $\frac{b_0-a_0}{2^n}$.

Let the root be $\alpha$ and the absolute error be $\delta$,

Since $\alpha \geq a_0$, the relative error 
\[\epsilon=\frac{\delta}{\alpha} \leq \frac{\delta}{a_0}\]

The absolute error is no more than half the length of the interval, so we have
\[\epsilon a_0 \leq \delta \leq \frac{b_0-a_0}{2^{n+1}} \]
\[\Rightarrow n+1 \leq \log_{2}(\frac{b_0-a_0}{\epsilon a_0})\]
\[\Rightarrow n \leq \frac{\log(b_0-a_0)-\log \epsilon-\log a_0}{\log 2}-1\]

\section*{III. Perform four iterations for $p(x) = 4x^3 - 2x^2 + 3 = 0$ starting at $x_0 = -1$}
\subsection*{III-a}
We have $p'(x)=12x^2-4x$

and the iterations:
\begin{center}
\begin{tabular}{|c|c|c|c|c|}
  \hline
  $n$ & $x_n$ & $p(x_n)$ & $p'(x_n)$ & $x_n - \frac{p(x_n)}{p'(x_n)}$ \\ \hline
  0 & -1.0000 & -3.0000 & 16.0000 & -0.8125 \\ \hline
  1 & -0.8125 & -0.4658 & 11.1719 & -0.7708 \\ \hline
  2 & -0.7708 & -0.0201 & 10.2129 & -0.7688 \\ \hline
  3 & -0.7688 & -3.9801 & 10.1686 & -0.7688 \\ \hline
  4 & -0.7688 &  &  &  \\ \hline
\end{tabular}
\end{center}


\section*{IV. Find $C$ and $s$ such that $e_{n+1} = C e_n^s$ in $x_{n+1} = x_n - \frac{f(x_n)}{f'(x_0)}$}
\subsection*{IV-a}
Let $\alpha$ be the root, $f(\alpha)=0$

By \textbf{Lagranges's Mean Value Theorem}, there exists $\xi_n$ between $x_n$ and $\alpha$ s.t.
\[f(x_n)=f(\alpha)+f'(\xi_n)(x_n-\alpha)=f'(\xi_n)(x_n-\alpha)\]
\[\Rightarrow x_{n+1}=x_n-\frac{f(x_n)}{f'(x_0)}=x_n-\frac{f'(\xi_n)(x_n-\alpha)}{f'(x_0)}\]
\[\Rightarrow x_{n+1}-\alpha=(x_n-\alpha)(1-\frac{f'(\xi_n)}{f'(x_0)})\]

Since $e_n=|x_n-\alpha|$,
\[\Rightarrow e_{n+1}=e_n |1-\frac{f'(\xi_n)}{f'(x_0)}|\]

Thus we get $s=1$ and $C=|1-\frac{f'(\xi_n)}{f'(x_0)}|$

(when ${x_n}$ converges, when $n \rightarrow \infty$, $\xi_n \rightarrow \alpha$, $C \rightarrow |1-\frac{f'(\alpha)}{f'(x_0)}|$.)

\section*{V. Convergence of $x_{n+1} = \tan^{-1}(x_n)$ within $(-\frac{\pi}{2}, \frac{\pi}{2})$}
\subsection*{V-a}
when $x>0$ we have $tanx>x$ and thus $0<tan^{-1}(x)<x$ when $x\in (0,\frac{\pi}{2})$.

$\bullet$ if $x_1=0$ then $x_n \equiv 0$.

$\bullet$ if $x_1>0$ then $x_n>0$ for all $n$ by induction.

So we have \[0<x_{n+1}=tan^{-1}(x_n)<x_n\]

By \textbf{Monotone Convergence Theorem}, ${x_n}$ converges.

$\bullet$ Similarly, if $x_1<0$ we also have \[x_n<tan^{-1}(x_n)=x_{n+1}<0\]

and $x_n$ also converges.

Thus the iteration converges within $(-\frac{\pi}{2},\frac{\pi}{2})$.

\section*{VI. Prove that $x = \frac{1}{p + \frac{1}{p + \cdots}}, p>1$ converges}
\subsection*{VI-a}
We have $x_1=\frac{1}{p}$, $x_{n+1}=f(x_n)$, where $f(x)=\frac{1}{p+x}$.

Clear that $x_n>0$ for all $n$.

Since $\forall a,b>0$, \[|f(a)-f(b)|=|\frac{|b-a|}{(p+a)(p+b)}|<\frac{|b-a|}{p^2}\]

$f$ is contract on $(0,\infty)$ and by \textbf{Theorem 1.39}, ${x_n}$ converges to a unique fixed point $\alpha \in (0,\infty)$,

where $\alpha=f(\alpha) \Rightarrow \alpha=\frac{1}{1+\alpha} \Rightarrow \alpha=\frac{\sqrt{p^2+4}-p}{2}$. (discard the negative root)

\section*{VII. Derive an inequality as in II when $a_0 < 0 < b_0$, and discuss the relative error}
\subsection*{VII-a}
At the $n$th step, the width of the interval is $\frac{b_0-a_0}{2^n}$.

Let the root be $\alpha$ and the absolute error be $\delta$,

This time we can only get
\[\epsilon \leq \frac{b_0-a_0}{2^{n+1} |\alpha|} \]
\[\Rightarrow n \leq \frac{\log(b_0-a_0)-\log \epsilon-\log |\alpha|}{\log 2}-1\]

This result depends on $\alpha$, and therefore, it is not appropiate because $|\alpha|$ can be close to $0$.

\section*{VIII. (*) Detect multiple zeros by behavior of $(x_n, f(x_n))$, Prove quadratic convergence is restored with the modification: $x_{n+1} = x_n - k \frac{f(x_n)}{f'(x_n)}.$}
\subsection*{VIII-a}
When $x_n$ is close to $\alpha$, $f'(\alpha)$ can be approximated by difference quotient $\frac{f(x_{n+1})-f(x_n)}{x_{n+1}-x_n}$.

So when $\frac{f(x_{n+1})-f(x_n)}{x_{n+1}-x_n}$ is close to zero, 

we can detect a multiple zero close to $x_n$ by the behavior of the points $(x_n, f(x_n))$

Similarly we can calculate higher-order difference quotients to detect the multiplicity.

\subsection*{VIII-b}
When $\alpha$ is a zero of multiplicity $k$ of $f$, by \textbf{Taylor's Formula}, 
\[f(x)=(x-\alpha)^k \frac{f^{(k)}(x)}{k!}+O((x-\alpha)^{k+1})\]
\[\Rightarrow f'(x)=(x-\alpha)^{k-1} \frac{f^{(k)}(x)}{(k-1)!}+O((x-\alpha)^k) \]

Use the modified iteration,

\begin{equation*}
  \begin{aligned}
    x_{n+1} &= x_n-k \frac{(x_n-\alpha)^k \frac{f^{(k)}(x_n)}{k!}+O((x_n-\alpha)^{k+1})}{(x_n-\alpha)^{k-1} \frac{f^{(k)}(x_n)}{(k-1)!}+O((x_n-\alpha)^k)} \\
            &= x_n-\frac{(x_n-\alpha) f^{(k)}(x_n)+O((x_n-\alpha)^2)}{f^{(k)}(x_n)+O(x_n-\alpha)} \\
  \end{aligned}
\end{equation*}

Then,
\begin{equation*}
  \begin{aligned}
    x_{n+1}-\alpha &= x_n-\alpha-\frac{(x_n-\alpha) f^{(k)}(x_n)+O((x_n-\alpha)^2)}{f^{(k)}(x_n)+O(x_n-\alpha)} \\
                   &= \frac{(x_n-\alpha)O(x_n-\alpha)-O(x_n-\alpha)^2}{f^{(k)}(x_n)+O(x_n-\alpha)} \\
                   &= O((x_n-\alpha)^2) \\
  \end{aligned}
\end{equation*}

Thus, quadratic convergence is proven.

\end{document}