\documentclass[a4paper]{article}
\usepackage[affil-it]{authblk}
\usepackage[backend=bibtex,style=numeric]{biblatex}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{geometry}
\geometry{margin=1.5cm, vmargin={0pt,1cm}}
\setlength{\topmargin}{-1cm}
\setlength{\paperheight}{29.7cm}
\setlength{\textheight}{25.3cm}

\addbibresource{citation.bib}

\begin{document}
% =================================================
\title{Numerical Analysis homework 1}

\author{YuXiangJin 3220103054
  \thanks{Electronic address: \texttt{2621201771@qq.com}}}
\affil{XinJi 2201, Zhejiang University }


\date{Due time: \today}

\maketitle


% ============================================
\section*{I. Consider the bisection method starting with the initial
interval [1.5, 3.5] }

\subsection*{I-a What is the width of the interval at the nth step?}

\[ h_0 = 3.5 - 1.5 = 2.0 \]
\[ h_n = \frac{h_{n-1}}{2} = \cdots = \frac{h_0}{2^{n+1}} = \frac{1}{2^n} \]

\subsection*{I-b What is the supremum of the distance between
the root r and the midpoint of the interval?
}

\[ d_n = \frac{h_n}{2} = \frac{b_0-a_0}{2^{n+1}} \]

\section*{II. Prove that this goal of accuracy is guaranteed by the following choice
of the number of steps}

We want to prove:
\[
n \geqslant \frac{\log(b_0 - a_0) - \log \epsilon - \log a_0}{\log 2} - 1. 
\]
\textbf{Proof:}
We need to show:
\[
\frac{d_n}{a_0} \leqslant \epsilon
\]
where \(d_n\) is the width of the interval at the \(n\)th step. The width of the interval at the \(n\)th step is:
\[
d_n = \frac{b_0 - a_0}{2^{n+1}}
\]
Thus:
\[ 
\frac{d_n}{a_0} = \frac{\frac{b_0 - a_0}{2^{n+1}}}{a_0} = \frac{b_0 - a_0}{2^{n+1} \cdot a_0}
\]
We need:
\[
\frac{b_0 - a_0}{2^{n+1} \cdot a_0} \leqslant \epsilon
\]
Rearranging this inequality:
\[
\frac{b_0 - a_0}{2^{n+1} \cdot a_0} \leqslant \epsilon \implies b_0 - a_0 \leqslant \epsilon \cdot 2^{n+1} \cdot a_0
\]
\[
\log(b_0 - a_0) \leqslant \log(\epsilon \cdot 2^{n+1} \cdot a_0)
\]
\[
\log(b_0 - a_0) \leqslant \log \epsilon + \log 2^{n+1} + \log a_0
\]
\[
\log(b_0 - a_0) \leqslant \log \epsilon + (n+1) \log 2 + \log a_0
\]
\[
\log(b_0 - a_0) - \log \epsilon - \log a_0 \leqslant (n+1) \log 2
\]
\[
n \geqslant \frac{\log(b_0 - a_0) - \log \epsilon - \log a_0}{\log 2} - 1
\]

\section*{III. Perform four iterations of Newton's method for the
polynomial equation $ p(x) = 4x^3 - 2x^2 + 3 = 0 $ with
the starting point $ x_0 = -1 $. }

\begin{table}[h!]
  \centering
  \begin{tabular}{|c|c|c|}
    \hline
    \textbf{Iteration}& \( x_n \) & \( p(x_n) \) \\
    \hline
    0 & -1.0000 & -3.0000 \\
    \hline
    1 & -0.8125 & -0.4658 \\
    \hline
    2 & -0.7708 & -0.0201 \\
    \hline
    3 & -0.7688 & -4.3708e-05 \\
    \hline
    4 & -0.7688 & -2.0741e-10 \\
    \hline
  \end{tabular}
\end{table}

\section*{IV. Consider a variation of Newton's method in which only
the derivative at $ x_0 $ is used.}

We have:
\[
x_{n+1} = x_n - \frac{f(x_n)}{f'(x_0)}.
\]
We want to find C and s such that:
\[
e_{n+1} = Ce_n^s,
\]
\textbf{Proof:}
\[
\because x_{n+1} = x_n - \frac{f(x_n)}{f'(x_0)},
\]
\[
f(\alpha) = f(x_n) + (\alpha-x_n)f'(x_n)+\frac{f^{\prime\prime}(\xi)}{2}(\alpha-x_n)^2 = 0,
\]
\[
\alpha - x_{n+1} = \alpha -x_n + \frac{f(x_n)}{f'(x_0)}
\]
\[
\therefore \alpha - x_{n+1} = \alpha -x_n -(\alpha -x_n) \cdot \frac{f'(x_n)}{f'(x_0)} - \frac{f"(\xi)}{2f'(x_0)} \cdot (\alpha-x_n)^2
\]
\[
\therefore e_{n+1} = (1-\frac{f'(x_n)}{f'(x_0)})e_n - \frac{f^{\prime\prime}(\xi)}{2f'(x_0)} e_n^2
\]
\[
\therefore e_{n+1} \approx (1-\frac{f'(x_n)}{f'(x_0)})e_n
\]
Thus:
\[
s = 1, C =  1-\frac{f'(\alpha)}{f'(x_0)}
\]

\section*{V. Within $(-\frac{\pi}{2},\frac{\pi}{2})$, will the iteration $x_{n+1} = \arctan x_n $
converge?}

We want to prove it converges. 

\begin{enumerate}
  \item If $x_0 > 0$, then we have:
  \[ 
  0<x_{n+1}<x_n<x_{n-1}<\dots<x_1<x_0.
  \]
  Thus, the sequence$\{x_n\}$ is bounded and decreasing, so it has a limit. Let's suppose this limit is $a$. Then, substituting into the iteration gives us:
  \[ 
  a = \arctan a \Rightarrow a = 0.
  \]

  \item If $x_0 < 0$, a similar argument shows that:
  \[
  x_{n+1} > x_n > x_{n-1} > \dots > x_1 > x_0,
  \]
  indicating that $\{x_n\}$ is bounded and increasing, again converging to a limit. Substituting this limit into the iteration also leads us to:
  \[
  a = \arctan a \Rightarrow a = 0.
  \]
  \item If $x_0 = 0$, we have:
  \[
  x_{n+1} = x_{n} = \dots = x_2 = x_1 = \arctan(0) = 0,
  \]
  so the sequence trivially converges to 0.
\end{enumerate}
Therefore, in all cases, the iteration converges to 0.

\section*{VI. Let $p > 1$. What is the value of the following continued fraction?}

We want to prove the sequence $x_n = \frac{1}{p + \frac{1}{p + \frac{1}{p + \dots}}}$ converges.
\newline
\textbf{Proof:}
We notice that:
\[
x_{n+1} = \frac{1}{x_n + p}, x_1 = \frac{1}{p}.
\]
then we find:
\[
x_{2n+1} > x^*, n = 0,1,2, \dots
\]
\[
x_{2n} < x^*, n = 1,2,3, \dots
\]
where $x^*$ is the fixed point of the iteration:
\[
x^* = \frac{1}{x^* + p} \Rightarrow x^* = \frac{-p + \sqrt{p^2 + 4} }{2}
\]
and we know:
\[
x_1 = \frac{1}{p}, x_2 = \frac{1}{p+x_1}, x_3 = \frac{1}{p+x_2} < x_1,
\]
\[
x_4 = \frac{1}{p+x_3} > \frac{1}{p+x_1} = x_2, x_5 = \frac{1}{p+x_4} < \frac{1}{p+x_2} = x_3,
\]
Thus:
\[
0< x_{2n+1} < x_{2n-1} < \dots < x_3 < x_1
\]
\[
1 > \frac{1}{p} > x_{2n} > x_{2n-2} > \dots > x_4 > x_2
\]
Thus, the sequences $\{x_{2n-1}\}$ and $\{x_{2n}\}$ are bounded and decreasing, so both of them have limits. 
\newline
Let's suppose the limit of $\{x_{2n-1}\}$ is $a$ , and the limit of $\{x_{2n}\}$ is $b$, substituting into the iteration gives us:
\[
b = \frac{1}{a+p}, a = \frac{1}{b+p} \Rightarrow a = b = \frac{-p + \sqrt{p^2 + 4} }{2}.
\]
Therefore, the sequence $x_n = \frac{1}{p + \frac{1}{p + \frac{1}{p + \dots}}}$ converges to $\frac{-p + \sqrt{p^2 + 4} }{2}$.

\section*{VII. What happens in problem II if $a_0 < 0 < b_0$ ? }
We already know that:
\[
d_n = \frac{h_n}{2} = \frac{b_0-a_0}{2^{n+1}}
\]
Because the real root may be too close to zero, which may make the relative error too big, now we consider the absolute error.
\newline
When we want to ensure $d_n\leqslant \epsilon$, we have:
\[
\frac{b_0-a_0}{2^{n+1}} \leqslant \epsilon,
\]
\[
\log(b_0 - a_0) - (n+1)\log 2 \leqslant \log \epsilon
\]
\[
\therefore n \geqslant \frac{\log(b_0 - a_0) - \log \epsilon }{\log 2} - 1
\]

\section*{VIII. Consider solving $f(x) = 0 \ (f \in C^{k+1}) $ by Newton's method with the starting point $x_0$ close to a root of multiplicity $k$. }
\subsection*{VIII-a How can a multiple zero be detected by examining the behavior of the points $(x_n, f(x_n))$?
}
The standard Newton's iteration is $x_{n+1} = x_n - \frac{f(x_n)}{f'(x_n)}$. 
\newline
As $x_n$ approaches to a multiple zero, $f(x_n)$ may be always near to zero for several iterations and the iterates$(x_n)$ remain very close together.

\subsection*{VIII-b Prove that if $r$ is a zero of multiplicity $k$ of the function $f$, then quadratic convergence in Newton's iteration will be restored by making this modification: \[x_{n+1} = x_n - k\frac{f(x_n)}{f'(x_n)}. \]}
When $r$ is a zero of multiplicity $k$ of the function $f$, then we have:
\[
f(x) = (x-r)^k \cdot g(x), \ f'(x) = k(x-r)^{k-1} \cdot g(x) + (x-r)^k \cdot g'(x) .
\]
Particularly, for $x_n$ sufficiently close to $r$ :
\[
f(x_n) = (x_n-r)^k \cdot g(x_n), \ f'(x_n) = k(x_n-r)^{k-1}\cdot g(x_n) + O((x_n-r)^k) .
\]
Thus:
\begin{align*}
  x_{n+1} - r &= x_n - r -k \cdot \frac{f(x_n)}{f'(x_n)} \\
  &= x_n - r - \frac{k(x_n-r)^k \cdot g(x_n)}{k(x_n-r)^{k-1} \cdot g(x_n) + O((x_n-r)^k)} \\ 
  &= x_n - r - \frac{k(x_n-r)^k \cdot g(x_n) + O((x_n-r)^{k+1}) - O((x_n-r)^{k+1}) }{k(x_n-r)^{k-1} \cdot g(x_n) + O((x_n-r)^k)} \\
  &= C \cdot O((x_n-r)^2)
\end{align*}
Therefore quadratic convergence is restored.


% ===============================================

\end{document}