\documentclass[a4paper]{article}
\usepackage[affil-it]{authblk}
\usepackage[backend=bibtex,style=numeric]{biblatex}

\usepackage{geometry}
\usepackage{amsmath}
\geometry{margin=1.5cm, vmargin={0pt,1cm}}
\setlength{\topmargin}{-1cm}
\setlength{\paperheight}{29.7cm}
\setlength{\textheight}{25.3cm}

\addbibresource{citation.bib}

\begin{document}
% =================================================
\title{Numerical Analysis homework \# 1}

\author{wangjie 3220100105
  \thanks{Electronic address: \texttt{2645443470@qq.com}}}
\affil{(math), Zhejiang University }


\date{Due time: \today}

\maketitle

\begin{abstract}
    theoretical homework     
\end{abstract}





% ============================================
\section*{theoretical homework}

complete question 1.8.1 I - VIII (VIII is optional),  
Theoretical Questions, on page 7 of the lecture notes in English. 
\cite{wangheyu2024}

\section*{Question 1.8.1 I}  
  
Consider the bisection method starting with the initial interval $[1.5, 3.5]$. In the following questions, "the interval" refers to the bisection interval whose width changes across different loops.  
  
\begin{itemize}  
    \item What is the width of the interval at the $n$th step?  
    \item What is the supremum of the distance between the root $r$ and the midpoint of the interval?  
\end{itemize}  
  
\textbf{Answer:}  
\begin{itemize}  
    \item The width of the interval at the $n$th step can be calculated using the following formula:  
    \[ \text{width}_n = \frac{\text{initial width}}{2^n} \]  
    so   
    \[ \text{width}_n = \frac{3.5-1.5}{2^n} = \frac{1}{2^{n-1}} \].  
    \item We can calculate the supremum of the distance between the root $r$ and the midpoint of the interval. Assuming the root is within the interval $[1.5, 3.5]$, the supremum can be represented as:  
    \[ \frac{3.5 - 1.5}{2} = 1 \]  
\end{itemize}  
  
\section*{Question 1.8.1 II}  
  
In using the bisection algorithm with its initial interval as $[a_0, b_0]$ with $a_0 > 0$, we want to determine the root with its relative error no greater than $\varepsilon$. Prove that this goal of accuracy is guaranteed by the following choice of the number of steps,  
\[ n \geq \frac{\log(b_0 - a_0) - \log \varepsilon - \log a_0 }{\log 2}  - 1 \]  
  
\textbf{Answer:} 
Equal to :  
  
\[ \varepsilon \geq \frac{b_0 - a_0}{2^{n+1} \cdot a_0} \]  
  
Let $\alpha$ be the truth value, c be the the measurement value at the n-th step.  
  
Obviously, $\alpha \geq a_0$, so    
  
\[ \text{relative error} =  \frac{\lvert \alpha - c \rvert}{\alpha} \leq \frac{b_0 - a_0}{2^{n+1} \cdot a_0} \leq \varepsilon\]   
  
\textbf{Proof complete}  

\subsection*{Question 1.8.1 III}

Perform four iterations of Newton's method for the  
polynomial equation $p(x) = 4x^3 - 2x^2 + 3 = 0$ with  
the starting point $x_0 = -1$. Use a hand calculator and  
organize results of the iterations in a table.  

\textbf{Answer:}
\[ p(x) = 4x^3 - 2x^2 + 3 \]
\[ p'(x) = 12x^2 - 4x \]
the starting point $x_0 = -1$ and $x_{n+1} = x_n - \frac{f(x_n)}{f'(x_n)}$.\\
\begin{itemize}  
    \item Iterations for Newton's method applied to the given polynomial equation $p(x) = 4x^3 - 2x^2 + 3 = 0$ with the starting point $x_0 = -1$:  
    \begin{center}  
    \begin{tabular}{|c|c|}  
    \hline  
    Iteration & $x_n$ Value \\  
    \hline  
    0 & -1.000000 \\  
    1 & -0.812500 \\  
    2 & -0.770804 \\  
    3 & -0.768832 \\  
    4 & -0.768828 \\
    \hline  
    \end{tabular}  
    \end{center}  
    \item The iterations are as follows:  
    \begin{itemize}  
        \item $x_1 = x_0 - \frac{p(x_0)}{p'(x_0)} = -0.812500 $  
        \item $x_2 = x_1 - \frac{p(x_1)}{p'(x_1)} \approx -0.770804 $  
        \item $x_3 = x_2 - \frac{p(x_2)}{p'(x_2)} \approx -0.768832 $  
        \item $x_4 = x_3 - \frac{p(x_3)}{p'(x_3)} \approx -0.768828 $
    \end{itemize}  
\end{itemize}

\subsection*{Question 1.8.1 IV}

Consider a variation of Newton's method in which only the derivative at $x_0$ is used,  
$x_{n+1} = x_n - \frac{f(x_n)}{f'(x_0)}$.  
Find $C$ and $s$ such that  
$e_{n+1} = Ce^{sn}$,  
where $e_n$ is the error of Newton's method at step $n$, $s$ is a constant, and $C$ may depend on $x_n$, the true solution $\alpha$, and the derivative of the function $f$.  \\

\textbf{Answer:}
By Taylor expansion, it is known that $f(\alpha) =f(x_n) + (\alpha-x_n)f'(x_n) + O((\alpha-x_n)^2) = 0$. Then $x_{n+1} = x_n - \frac{f(x_n)}{f'(x_0)}$.  
  
So,   
$$  
x_{n+1} - \alpha = (x_n - \alpha) \cdot \left(1 - \frac{f'(x_n)}{f'(x_0)}\right)  
$$  
  
And   
$$  
e_{n+1} = \lvert x_{n+1} - \alpha \rvert \quad \text{and} \quad e_n = \lvert x_n - \alpha \rvert  
$$  
  
Therefore, for $e_{n+1} = Ce^{sn}$, the result shows $C = 1 - \frac{f'(x_n)}{f'(x_0)}$ and $s = 1$.  

\subsection*{Question 1.8.1 V}

Within $(-\frac{\pi}{2}, \frac{\pi}{2})$, will the iteration $x_{n+1} = \tan^{-1} x_n$ converge?  \\

\textbf{Answer:}
For \( x_{n+1} = \tan^{-1}(x_n) \), we define:
\[
g(x) = \arctan x.
\]
Calculating the derivative, we find:
\[
|g'(x)| = \frac{1}{1+x^2} \leq 1.
\]
Therefore, the iterative sequence converges to a fixed point of \( g(x) \). 

\textbf{Proof complete}

\subsection*{Question 1.8.1 VI}

Let \( p > 1 \). What is the value of the following continued fraction?
\[
x = \frac{1}{p + \frac{1}{p + \frac{1}{p + \cdots}}}
\]
Prove that the sequence of values converges. (Hint: this can be interpreted as \( x = \lim_{n \to \infty} x_n \), where
\[
x_1 = \frac{1}{p}, \quad x_2 = \frac{1}{p + \frac{1}{p}}, \quad x_3 = \frac{1}{p + \frac{1}{p + \frac{1}{p}}}, \quad \ldots
\]
and so forth. Formulate \( x \) as a fixed point of some function.) 

\textbf{Answer:}

From the definitions:
\[
x_1 = \frac{1}{p}, \quad x_2 = \frac{1}{p + \frac{1}{p}}, \quad x_3 = \frac{1}{p + \frac{1}{p + \frac{1}{p}}}, \quad \ldots
\]
we set \( g(x) = \frac{1}{p+x} \), hence \( x_{n+1} = g(x_n) \).

We find:
\[
|g'(x)| = \frac{1}{(x+p)^2} \leq 1,
\]
indicating that the sequence converges.

Setting \( x = \lim_{n \to \infty} x_n \), we have \( g(x) = x \):
\[
\frac{1}{p+x} = x.
\]
Multiplying through by \( p+x \) gives us:
\[
1 = px + x^2.
\]
Rearranging leads to the quadratic equation:
\[
x^2 + px - 1 = 0.
\]

Using the quadratic formula:
\[
x = \frac{-b \pm \sqrt{b^2 - 4ac}}{2a} = \frac{-p \pm \sqrt{p^2 + 4}}{2}.
\]

This results in two potential solutions:
\[
x_1 = \frac{-p + \sqrt{p^2 + 4}}{2}, \quad x_2 = \frac{-p - \sqrt{p^2 + 4}}{2}.
\]

Since \( p > 1 \), we find that:
\[
\sqrt{p^2 + 4} > p,
\]
ensuring that \( x_1 \) is positive:
\[
x_1 = \frac{-p + \sqrt{p^2 + 4}}{2} > 0.
\]

In contrast, \( x_2 \) is negative:
\[
x_2 < 0.
\]
Thus, we discard \( x_2 \) as it is not meaningful in this context. Therefore, we conclude that:
\[
x = \frac{-p + \sqrt{p^2 + 4}}{2} \text{ is the result.}
\]
\subsection*{Question 1.8.1 VII}

What happens in problem II if \( a_0 < 0 < b_0 \)? Derive an inequality for the number of steps similar to that in II. In this case, is the relative error still an appropriate measure? 

\textbf{Answer:}

Let \( \alpha \) be the truth value and \( c \) be the measurement value at the \( n \)-th step.

For the relative error:

After \( n \) steps, we have:
\[
\text{if satisfies } n \geq \frac{\log(b_0 - a_0) - \log \varepsilon - \log |\alpha|}{\log 2} - 1,
\]
which equals 
\[
\frac{b_0 - a_0}{2^{n+1} |\alpha|} \leq \varepsilon.
\]

The relative error is given by:
\[
\text{relative error} = \frac{|\alpha - c|}{|\alpha|} \leq \frac{b_0 - a_0}{2^{n+1} |\alpha|} \leq \varepsilon.
\]

Because the root as the denominator can be very close to 0, in which case the relative error is not a reasonable measure.

Therefore, for the case where \( a_0 < 0 < b_0 \), you can turn the measurement into:
\[
\frac{|\alpha - c|}{\max(|a_0|, |b_0|)}.
\]

Then we get:
\[
\text{if satisfies } n \geq \frac{\log(b_0 - a_0) - \log \varepsilon - \log \max(|a_0|, |b_0|)}{\log 2} - 1,
\]
which equals 
\[
\frac{b_0 - a_0}{2^{n+1} \max(|a_0|, |b_0|)} \leq \varepsilon.
\]

The new measure of error is:
\[
\frac{|\alpha - c|}{\max(|a_0|, |b_0|)} \leq \frac{b_0 - a_0}{2^{n+1} \max(|a_0|, |b_0|)} \leq \varepsilon.
\]



% ===============================================
\section*{ \center{\normalsize {Acknowledgement}} }
None.


\printbibliography

\end{document}